diff --git a/go.mod b/go.mod index 6541c95a7..5ba9b47ff 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.4 // indirect + github.com/klauspost/compress v1.18.6 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/mistifyio/go-zfs/v4 v4.0.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect @@ -71,10 +71,10 @@ require ( go.opentelemetry.io/otel v1.42.0 // indirect go.opentelemetry.io/otel/metric v1.42.0 // indirect go.opentelemetry.io/otel/trace v1.42.0 // indirect - go.podman.io/storage v1.62.1-0.20260310180906-9819c3739308 // indirect + go.podman.io/storage v1.62.1-0.20260430194920-3ceb1b29d72d // indirect golang.org/x/mod v0.33.0 // indirect golang.org/x/sync v0.20.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 9e55c8b59..0718a8c38 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= -github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.6 h1:2jupLlAwFm95+YDR+NwD2MEfFO9d4z4Prjl1XXDjuao= +github.com/klauspost/compress v1.18.6/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -167,8 +167,8 @@ go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4Len go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.podman.io/image/v5 v5.39.2-0.20260306124909-d48bc74146d6 h1:aEDmWRMDzDhCvkmGcbooyUr8iN0m9HxzvWvMb1fUrr4= go.podman.io/image/v5 v5.39.2-0.20260306124909-d48bc74146d6/go.mod h1:7TiwfxX95KG1bg4MdM6+ImDtw1Iw1ktlTiC8XCTgNwQ= -go.podman.io/storage v1.62.1-0.20260310180906-9819c3739308 h1:ItVOnUmApDtfnqEehnD5HO6hircHr5ud7lCJmNf+5Mk= -go.podman.io/storage v1.62.1-0.20260310180906-9819c3739308/go.mod h1:B83Ad8mtO0GZs7rEwb66f0Ed5G57NyKI/iJZHoJrpUE= +go.podman.io/storage v1.62.1-0.20260430194920-3ceb1b29d72d h1:JHvCWd1irnPVwjP2srHfLEs+tpHLqNq81g9XTLXmQ4g= +go.podman.io/storage v1.62.1-0.20260430194920-3ceb1b29d72d/go.mod h1:c2/RBYEPTpqJ4BQfI3nDZB5zUKXuhX1nSGgrunZV2TM= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -192,8 +192,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes index 402433593..57aa6487c 100644 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -1,2 +1,3 @@ * -text *.bin -text -diff +*.md text eol=lf diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4528059ca..804a20181 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -31,6 +31,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2d" binary: s2d @@ -57,6 +60,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2sx" binary: s2sx @@ -84,6 +90,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm archives: - @@ -91,7 +100,7 @@ archives: name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows - format: zip + formats: ['zip'] files: - unpack/* - s2/LICENSE diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 5125c1f26..fb023f2cf 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -1,693 +1,700 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# package usage - -Use `go get github.com/klauspost/compress@latest` to add it to your project. - -This package will support the current Go version and 2 versions back. - -* Use the `nounsafe` tag to disable all use of the "unsafe" package. -* Use the `noasm` tag to disable all assembly across packages. - -Use the links above for more information on each. - -# changelog -* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) - * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). - -* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2) - * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115 - * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106 - -* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED - * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 - * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 - * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 - * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 - * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 - * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 - * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 - * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 - -* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) - * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 - * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 - * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 - * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 - * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 - * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 - * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 - -
- See changes to v1.17.x - -* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) - * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 - * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 - * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 - * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 - -* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) - * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 - * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 - * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 - * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 - * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 - -* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) - * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 - * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 - * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 - * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 - -* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) - * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 - * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 - -* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) - * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 - * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 - -* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) - * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 - * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 - -* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) - * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 - * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 - * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 - * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 - * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 -https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 - -* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) - * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 - * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 - * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 - * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 - * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 - -* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) - * fse: Fix max header size https://github.com/klauspost/compress/pull/881 - * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 - * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 - -* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) - * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 - -* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 - * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -
-
- See changes to v1.16.x - - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them: - -Typical speed is about 2x of the standard library packages. - -| old import | new import | Documentation | -|------------------|---------------------------------------|-------------------------------------------------------------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compression described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -To disable all assembly add `-tags=noasm`. This works across all packages. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -```go - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. - - - - +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + +# changelog + +* Feb 9th, 2026 [1.18.4](https://github.com/klauspost/compress/releases/tag/v1.18.4) + * gzhttp: Add zstandard to server handler wrapper https://github.com/klauspost/compress/pull/1121 + * zstd: Add ResetWithOptions to encoder/decoder https://github.com/klauspost/compress/pull/1122 + * gzhttp: preserve qvalue when extra parameters follow in Accept-Encoding by @analytically in https://github.com/klauspost/compress/pull/1116 + +* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) + * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). + +* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2) + * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115 + * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106 + +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +
+ See changes to v1.17.x + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them: + +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compression described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. + + + + + diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 5f901bd0f..4b312dea3 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -407,8 +407,8 @@ func histogramSplit(b []byte, h []uint16) { for i, t := range x { v0 := &h[t] v1 := &h[y[i]] - v3 := &h[w[i]] v2 := &h[z[i]] + v3 := &h[w[i]] *v0++ *v1++ *v2++ diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go index 1b7a2cbd7..e62caf711 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -1,5 +1,4 @@ //go:build !amd64 -// +build !amd64 package flate diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md index ea7324da6..27d8ed56f 100644 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -1,79 +1,79 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index 8b6e5c663..26d5101b3 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -1,89 +1,89 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index 99ddd4af9..2d6ef64be 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X // and Decoder.Decompress1X that use an asm implementation of thir main loops. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 908c17de6..610392322 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // This file contains a generic implementation of Decoder.Decompress4X. package huff0 diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go index e802579c4..b97f9056f 100644 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package cpuinfo diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd35ea148..0e33aea44 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -78,6 +78,7 @@ func (b *blockEnc) initNewEncode() { b.recentOffsets = [3]uint32{1, 4, 8} b.litEnc.Reuse = huff0.ReusePolicyNone b.coders.setPrev(nil, nil, nil) + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index c1192ec38..c4de134a7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -21,7 +21,7 @@ type fastBase struct { crc *xxhash.Digest tmp [8]byte blk *blockEnc - lastDictID uint32 + lastDict *dict lowMem bool } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c1581cfcb..851799322 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -479,10 +479,13 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]prevEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -510,13 +513,14 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -538,8 +542,8 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id } + e.lastDict = d // Reset table to initial state copy(e.longTable[:], e.dictLongTable) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 85dcd28c3..3305f0924 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -1102,10 +1102,13 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -1133,14 +1136,15 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id e.allDirty = true } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1162,9 +1166,9 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id e.allDirty = true } + e.lastDict = d // Reset table to initial state { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index cf8cad00d..2fb6da112 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1040,15 +1040,18 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { allDirty := e.allDirty + dictChanged := d != e.lastDict e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]tableEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1065,7 +1068,6 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id allDirty = true } // Reset table to initial state diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 9180a3a58..5e104f1a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -805,9 +805,11 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || d != e.lastDict { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 @@ -827,7 +829,7 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id + e.lastDict = d e.allDirty = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 19e730acc..0f2a00a00 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -138,11 +138,18 @@ func (e *Encoder) Reset(w io.Writer) { func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { e.o.resetOpt = true defer func() { e.o.resetOpt = false }() + hadDict := e.o.dict != nil for _, o := range opts { if err := o(&e.o); err != nil { return err } } + hasDict := e.o.dict != nil + if hadDict != hasDict { + // Dict presence changed — encoder type must be recreated. + e.state.encoder = nil + e.init = sync.Once{} + } e.Reset(w) return nil } @@ -448,6 +455,12 @@ func (e *Encoder) Close() error { if s.encoder == nil { return nil } + if s.w == nil { + if len(s.filling) == 0 && !s.headerWritten && !s.eofWritten && s.nInput == 0 { + return nil + } + return errors.New("zstd: encoder has no writer") + } err := e.nextBlock(true) if err != nil { if errors.Is(s.err, ErrEncoderClosed) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 8e0f5cac7..e217be0a1 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -42,6 +42,7 @@ func (o *encoderOptions) setDefault() { level: SpeedDefault, allLitEntropy: false, lowMem: false, + fullZero: true, } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go index d04a829b0..b8c8607b5 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 8adfebb02..2138f8091 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 0be16cefc..9576426e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,4 @@ //go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go index f41932b7a..1ed18927f 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index bea1779e9..379746c96 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 1f8c3cec2..18c3703dd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 7cec2197c..516cd9b07 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/go.podman.io/storage/check.go b/vendor/go.podman.io/storage/check.go index 3fb648d8c..8187f9ab9 100644 --- a/vendor/go.podman.io/storage/check.go +++ b/vendor/go.podman.io/storage/check.go @@ -2,6 +2,7 @@ package storage import ( "archive/tar" + "cmp" "errors" "fmt" "io" @@ -9,7 +10,6 @@ import ( "path" "path/filepath" "slices" - "sort" "strings" "sync" "time" @@ -292,14 +292,13 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { reader := io.TeeReader(diff, counter) var wg sync.WaitGroup var archiveErr error - wg.Add(1) - go func(layerID string, diffReader io.Reader) { + wg.Go(func() { // Read the diff, one item at a time. - tr := tar.NewReader(diffReader) + tr := tar.NewReader(reader) hdr, err := tr.Next() for err == nil { diffHeadersByLayerMutex.Lock() - diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr) + diffHeadersByLayer[id] = append(diffHeadersByLayer[id], hdr) diffHeadersByLayerMutex.Unlock() hdr, err = tr.Next() } @@ -307,16 +306,15 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { archiveErr = err } // consume any trailer after the EOF marker - if _, err := io.Copy(io.Discard, diffReader); err != nil { - err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err) + if _, err := io.Copy(io.Discard, reader); err != nil { + err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", id, err) if isReadWrite { - report.Layers[layerID] = append(report.Layers[layerID], err) + report.Layers[id] = append(report.Layers[id], err) } else { - report.ROLayers[layerID] = append(report.ROLayers[layerID], err) + report.ROLayers[id] = append(report.ROLayers[id], err) } } - wg.Done() - }(id, reader) + }) wg.Wait() diff.Close() if archiveErr != nil { @@ -690,25 +688,40 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { return CheckReport{}, err } - // If the driver can tell us about which layers it knows about, we should have previously - // examined all of them. Any that we didn't are probably just wasted space. - // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported. - if err := s.startUsingGraphDriver(); err != nil { - return CheckReport{}, err - } - defer s.stopUsingGraphDriver() - layerList, err := s.graphDriver.ListLayers() - if err != nil && !errors.Is(err, drivers.ErrNotSupported) { - return CheckReport{}, err - } - if !errors.Is(err, drivers.ErrNotSupported) { - for i, id := range layerList { - if _, known := referencedLayers[id]; !known { - err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted) - report.Layers[id] = append(report.Layers[id], err) + if _, err := readPrimaryLayerStore(s, func(store rwLayerStore) (struct{}, error) { + // If the driver can tell us about which layers it knows about, we should have + // corresponding metadata records. + // Any layers without them are probably just wasted space. + // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported. + driverLayers, err := s.graphDriver.ListLayers() + if err != nil && !errors.Is(err, drivers.ErrNotSupported) { + return struct{}{}, err + } + if !errors.Is(err, drivers.ErrNotSupported) { + // Update the list of layers known to the layerStore, something + // might have been added recently. + currentLayers, err := store.Layers() + if err != nil { + return struct{}{}, err + } + for i := range currentLayers { + id := currentLayers[i].ID + if _, known := referencedLayers[id]; !known { + referencedLayers[id] = false + } + } + + for i, id := range driverLayers { + if _, known := referencedLayers[id]; !known { + err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted) + report.Layers[id] = append(report.Layers[id], err) + } + report.layerOrder[id] = i + 1 } - report.layerOrder[id] = i + 1 } + return struct{}{}, nil + }); err != nil { + return CheckReport{}, err } return report, nil @@ -776,23 +789,22 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { return errors.Is(err, ErrLayerUnaccounted) }) } - sort.Slice(layersToDelete, func(i, j int) bool { + slices.SortFunc(layersToDelete, func(a, b string) int { // we've not heard of either of them, so remove them in the order the driver suggested - if isUnaccounted(report.Layers[layersToDelete[i]]) && - isUnaccounted(report.Layers[layersToDelete[j]]) && - report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 { - return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]] + if isUnaccounted(report.Layers[a]) && isUnaccounted(report.Layers[b]) && + report.layerOrder[a] != 0 && report.layerOrder[b] != 0 { + return cmp.Compare(report.layerOrder[a], report.layerOrder[b]) } // always delete the one we've heard of first - if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) { - return false + if isUnaccounted(report.Layers[a]) && !isUnaccounted(report.Layers[b]) { + return 1 } // always delete the one we've heard of first - if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) { - return true + if !isUnaccounted(report.Layers[a]) && isUnaccounted(report.Layers[b]) { + return -1 } // we've heard of both of them; the one that's on the end of a longer chain goes first - return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later + return -cmp.Compare(depth(a), depth(b)) // closer-to-a-notional-base layers get removed later }) // Now delete the layers that haven't been removed along with images. for _, id := range layersToDelete { @@ -821,6 +833,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { } if err = s.DeleteLayer(id); err != nil { err = fmt.Errorf("deleting layer %s: %w", id, err) + } else { logrus.Debugf("deleted layer %s", id) } } @@ -1049,25 +1062,25 @@ func (c *checkDirectory) headers(hdrs []*tar.Header) { // before content when they both appear in the same directory, per // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts // and that hard links appear after other types of entries - sort.SliceStable(hdrs, func(i, j int) bool { - if hdrs[i].Typeflag != tar.TypeLink && hdrs[j].Typeflag == tar.TypeLink { - return true + slices.SortStableFunc(hdrs, func(a, b *tar.Header) int { + if a.Typeflag != tar.TypeLink && b.Typeflag == tar.TypeLink { + return -1 } - if hdrs[i].Typeflag == tar.TypeLink && hdrs[j].Typeflag != tar.TypeLink { - return false + if a.Typeflag == tar.TypeLink && b.Typeflag != tar.TypeLink { + return 1 } - idir, ifile := path.Split(hdrs[i].Name) - jdir, jfile := path.Split(hdrs[j].Name) - if idir != jdir { - return hdrs[i].Name < hdrs[j].Name + adir, afile := path.Split(a.Name) + bdir, bfile := path.Split(b.Name) + if adir != bdir { + return cmp.Compare(a.Name, b.Name) } - if ifile == archive.WhiteoutOpaqueDir { - return true + if afile == archive.WhiteoutOpaqueDir { + return -1 } - if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) { - return true + if strings.HasPrefix(afile, archive.WhiteoutPrefix) && !strings.HasPrefix(bfile, archive.WhiteoutPrefix) { + return -1 } - return false + return 0 }) for _, hdr := range hdrs { c.header(hdr) @@ -1147,14 +1160,14 @@ func compareCheckSubdirectory(path string, a, b *checkDirectory, idmap *idtools. // compareCheckDirectory walks two directory trees and returns a sorted list of differences func compareCheckDirectory(a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { diff := compareCheckSubdirectory("", a, b, idmap, ignore) - sort.Slice(diff, func(i, j int) bool { - if strings.Compare(diff[i][1:], diff[j][1:]) < 0 { - return true + slices.SortFunc(diff, func(a, b string) int { + if a[1:] < b[1:] { + return -1 } - if diff[i][0] == '-' { - return true + if a[0] == '-' { + return -1 } - return false + return 1 }) return diff } diff --git a/vendor/go.podman.io/storage/containers.go b/vendor/go.podman.io/storage/containers.go index 5c1045377..ebdd19c59 100644 --- a/vendor/go.podman.io/storage/containers.go +++ b/vendor/go.podman.io/storage/containers.go @@ -74,7 +74,7 @@ type Container struct { // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value // is set before using it. - Created time.Time `json:"created,omitempty"` + Created time.Time `json:"created"` // UIDMap and GIDMap are used for setting up a container's root // filesystem for use inside of a user namespace where UID mapping is diff --git a/vendor/go.podman.io/storage/deprecated.go b/vendor/go.podman.io/storage/deprecated.go deleted file mode 100644 index b1e8baf1c..000000000 --- a/vendor/go.podman.io/storage/deprecated.go +++ /dev/null @@ -1,213 +0,0 @@ -package storage - -import ( - "io" - "time" - - digest "github.com/opencontainers/go-digest" - drivers "go.podman.io/storage/drivers" - "go.podman.io/storage/pkg/archive" -) - -// The type definitions in this file exist ONLY to maintain formal API compatibility. -// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES. - -// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROFileBasedStore interface { - Locker - Load() error - ReloadIfChanged() error -} - -// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type RWFileBasedStore interface { - Save() error -} - -// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type FileBasedStore interface { - ROFileBasedStore - RWFileBasedStore -} - -// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROMetadataStore interface { - Metadata(id string) (string, error) -} - -// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type RWMetadataStore interface { - SetMetadata(id, metadata string) error -} - -// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type MetadataStore interface { - ROMetadataStore - RWMetadataStore -} - -// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROBigDataStore interface { - BigData(id, key string) ([]byte, error) - BigDataSize(id, key string) (int64, error) - BigDataDigest(id, key string) (digest.Digest, error) - BigDataNames(id string) ([]string, error) -} - -// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type RWImageBigDataStore interface { - SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error -} - -// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ContainerBigDataStore interface { - ROBigDataStore - SetBigData(id, key string, data []byte) error -} - -// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROLayerBigDataStore interface { - BigData(id, key string) (io.ReadCloser, error) - BigDataNames(id string) ([]string, error) -} - -// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type RWLayerBigDataStore interface { - SetBigData(id, key string, data io.Reader) error -} - -// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type LayerBigDataStore interface { - ROLayerBigDataStore - RWLayerBigDataStore -} - -// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type FlaggableStore interface { - ClearFlag(id string, flag string) error - SetFlag(id string, flag string, value any) error -} - -// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ContainerStore interface { - FileBasedStore - MetadataStore - ContainerBigDataStore - FlaggableStore - Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) - SetNames(id string, names []string) error - AddNames(id string, names []string) error - RemoveNames(id string, names []string) error - Get(id string) (*Container, error) - Exists(id string) bool - Delete(id string) error - Wipe() error - Lookup(name string) (string, error) - Containers() ([]Container, error) -} - -// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROImageStore interface { - ROFileBasedStore - ROMetadataStore - ROBigDataStore - Exists(id string) bool - Get(id string) (*Image, error) - Lookup(name string) (string, error) - Images() ([]Image, error) - ByDigest(d digest.Digest) ([]*Image, error) -} - -// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ImageStore interface { - ROImageStore - RWFileBasedStore - RWMetadataStore - RWImageBigDataStore - FlaggableStore - Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) - SetNames(id string, names []string) error - AddNames(id string, names []string) error - RemoveNames(id string, names []string) error - Delete(id string) error - Wipe() error -} - -// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type ROLayerStore interface { - ROFileBasedStore - ROMetadataStore - ROLayerBigDataStore - Exists(id string) bool - Get(id string) (*Layer, error) - Status() ([][2]string, error) - Changes(from, to string) ([]archive.Change, error) - Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) - DiffSize(from, to string) (int64, error) - Size(name string) (int64, error) - Lookup(name string) (string, error) - LayersByCompressedDigest(d digest.Digest) ([]Layer, error) - LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) - Layers() ([]Layer, error) -} - -// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. -// -// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. -type LayerStore interface { - ROLayerStore - RWFileBasedStore - RWMetadataStore - FlaggableStore - RWLayerBigDataStore - Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) - CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any) (layer *Layer, err error) - Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any, diff io.Reader) (*Layer, int64, error) - SetNames(id string, names []string) error - AddNames(id string, names []string) error - RemoveNames(id string, names []string) error - Delete(id string) error - Wipe() error - Mount(id string, options drivers.MountOpts) (string, error) - Unmount(id string, force bool) (bool, error) - Mounted(id string) (int, error) - ParentOwners(id string) (uids, gids []int, err error) - ApplyDiff(to string, diff io.Reader) (int64, error) - DifferTarget(id string) (string, error) - LoadLocked() error - PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) -} diff --git a/vendor/go.podman.io/storage/drivers/btrfs/btrfs.go b/vendor/go.podman.io/storage/drivers/btrfs/btrfs.go index aba898ed5..7b80c9513 100644 --- a/vendor/go.podman.io/storage/drivers/btrfs/btrfs.go +++ b/vendor/go.podman.io/storage/drivers/btrfs/btrfs.go @@ -33,12 +33,12 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" graphdriver "go.podman.io/storage/drivers" + "go.podman.io/storage/internal/driver" "go.podman.io/storage/internal/tempdir" "go.podman.io/storage/pkg/directory" "go.podman.io/storage/pkg/fileutils" "go.podman.io/storage/pkg/idtools" "go.podman.io/storage/pkg/mount" - "go.podman.io/storage/pkg/parsers" "go.podman.io/storage/pkg/system" "golang.org/x/sys/unix" ) @@ -97,23 +97,34 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { var options btrfsOptions userDiskQuota := false for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) + driver, key, val, err := driver.ParseDriverOption(option) if err != nil { return options, userDiskQuota, err } - key = strings.ToLower(key) + if driver != "" && driver != "btrfs" { + // do not parse options meant for another storage driver + continue + } + switch key { - case "btrfs.min_space": + case "min_space": minSpace, err := units.RAMInBytes(val) if err != nil { return options, userDiskQuota, err } userDiskQuota = true options.minSpace = uint64(minSpace) - case "btrfs.mountopt": + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.size = uint64(size) + case "mountopt": return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") default: - return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option) + return options, userDiskQuota, fmt.Errorf("unknown option %q (%q)", key, option) } } return options, userDiskQuota, nil @@ -157,6 +168,12 @@ func (d *Driver) Cleanup() error { return mount.Unmount(d.home) } +// SyncMode returns the sync mode configured for the driver. +// Btrfs does not support sync mode configuration, always returns SyncModeNone. +func (d *Driver) SyncMode() graphdriver.SyncMode { + return graphdriver.SyncModeNone +} + func free(p *C.char) { C.free(unsafe.Pointer(p)) } @@ -466,11 +483,20 @@ func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idt // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + return d.create(id, parent, opts, false) } // Create the filesystem with given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) error { + quota, err := d.parseStorageOpt(opts, readOnly) + if err != nil { + return err + } + quotas := d.quotasDir() subvolumes := d.subvolumesDir() if err := os.MkdirAll(subvolumes, 0o700); err != nil { @@ -497,24 +523,14 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } } - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if _, ok := storageOpt["size"]; ok { - driver := &Driver{} - if err := d.parseStorageOpt(storageOpt, driver); err != nil { - return err - } - - if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + if quota != nil { + if err := d.setStorageSize(path.Join(subvolumes, id), *quota); err != nil { return err } if err := os.MkdirAll(quotas, 0o700); err != nil { return err } - if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil { + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(quota.size)), 0o644); err != nil { return err } } @@ -527,8 +543,27 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { return label.Relabel(path.Join(subvolumes, id), mountLabel, false) } -// Parse btrfs storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { +// layerQuota contains per-layer quota settings. +type layerQuota struct { + size uint64 +} + +// parseStorageOpt parses CreateOpts.StorageOpt. +// Returns a *layerQuota if a quota should be applied, nil otherwise. +func (d *Driver) parseStorageOpt(opts *graphdriver.CreateOpts, readOnly bool) (*layerQuota, error) { + var storageOpt map[string]string = nil // Iterating over a nil map is safe + if opts != nil { + storageOpt = opts.StorageOpt + } + + res := layerQuota{} + needQuota := false + + if !readOnly && d.options.size > 0 { + res.size = d.options.size + needQuota = true + } + // Read size to change the subvolume disk quota per container for key, val := range storageOpt { key := strings.ToLower(key) @@ -536,23 +571,27 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e case "size": size, err := units.RAMInBytes(val) if err != nil { - return err + return nil, err } - driver.options.size = uint64(size) + res.size = uint64(size) + needQuota = true default: - return fmt.Errorf("unknown option %s (%q)", key, storageOpt) + return nil, fmt.Errorf("unknown option %s (%q)", key, storageOpt) } } - return nil + if needQuota { + return &res, nil + } + return nil, nil } // Set btrfs storage size -func (d *Driver) setStorageSize(dir string, driver *Driver) error { - if driver.options.size <= 0 { - return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) +func (d *Driver) setStorageSize(dir string, quota layerQuota) error { + if quota.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(quota.size))) } - if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + if d.options.minSpace > 0 && quota.size < d.options.minSpace { return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) } @@ -560,7 +599,7 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { return err } - if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + if err := subvolLimitQgroup(dir, quota.size); err != nil { return err } diff --git a/vendor/go.podman.io/storage/drivers/driver.go b/vendor/go.podman.io/storage/drivers/driver.go index 38706dc99..1ca50b646 100644 --- a/vendor/go.podman.io/storage/drivers/driver.go +++ b/vendor/go.podman.io/storage/drivers/driver.go @@ -27,6 +27,40 @@ const ( FsMagicUnsupported = FsMagic(0x00000000) ) +// SyncMode defines when filesystem synchronization occurs during layer creation. +type SyncMode int + +const ( + // SyncModeNone - no synchronization + SyncModeNone SyncMode = iota + // SyncModeFilesystem - use syncfs() before layer marked as present + SyncModeFilesystem +) + +// String returns the string representation of the sync mode +func (m SyncMode) String() string { + switch m { + case SyncModeNone: + return "none" + case SyncModeFilesystem: + return "filesystem" + default: + return "unknown" + } +} + +// ParseSyncMode converts a string to SyncMode +func ParseSyncMode(s string) (SyncMode, error) { + switch strings.ToLower(strings.TrimSpace(s)) { + case "", "none": + return SyncModeNone, nil + case "filesystem": + return SyncModeFilesystem, nil + default: + return SyncModeNone, fmt.Errorf("invalid sync mode: %q", s) + } +} + var ( // All registered drivers drivers map[string]InitFunc @@ -169,6 +203,8 @@ type ProtoDriver interface { AdditionalImageStores() []string // Dedup performs deduplication of the driver's storage. Dedup(DedupArgs) (DedupResult, error) + // SyncMode returns the sync mode configured for the driver. + SyncMode() SyncMode } // DiffDriver is the interface to use to implement graph diffs @@ -388,6 +424,7 @@ func init() { // MustRegister registers an InitFunc for the driver, or panics. // It is suitable for package’s init() sections. +// If you are adding a call to this, update also isKnownDriverName in storage/internal/opts/driver.go. func MustRegister(name string, initFunc InitFunc) { if err := Register(name, initFunc); err != nil { panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err)) @@ -395,6 +432,7 @@ func MustRegister(name string, initFunc InitFunc) { } // Register registers an InitFunc for the driver. +// If you are adding a call to this, update also isKnownDriverName in storage/internal/opts/driver.go. func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { return fmt.Errorf("name already registered %s", name) diff --git a/vendor/go.podman.io/storage/drivers/overlay/check.go b/vendor/go.podman.io/storage/drivers/overlay/check.go index 7caf50ea5..bd77ca019 100644 --- a/vendor/go.podman.io/storage/drivers/overlay/check.go +++ b/vendor/go.podman.io/storage/drivers/overlay/check.go @@ -240,11 +240,12 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { upperDir := filepath.Join(layerDir, "upper") workDir := filepath.Join(layerDir, "work") - _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) - _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) - _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0) - _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) - _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + idPair := idtools.IDPair{UID: 0, GID: 0} + _ = idtools.MkdirAndChown(mergedDir, 0o700, idPair) + _ = idtools.MkdirAndChown(lowerDir, 0o700, idPair) + _ = idtools.MkdirAndChown(lowerMappedDir, 0o700, idPair) + _ = idtools.MkdirAndChown(upperDir, 0o700, idPair) + _ = idtools.MkdirAndChown(workDir, 0o700, idPair) mapping := []idtools.IDMap{ { @@ -296,11 +297,12 @@ func supportsDataOnlyLayers(home string) (bool, error) { upperDir := filepath.Join(layerDir, "upper") workDir := filepath.Join(layerDir, "work") - _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) - _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) - _ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0) - _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) - _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + idPair := idtools.IDPair{UID: 0, GID: 0} + _ = idtools.MkdirAndChown(mergedDir, 0o700, idPair) + _ = idtools.MkdirAndChown(lowerDir, 0o700, idPair) + _ = idtools.MkdirAndChown(lowerDirDataOnly, 0o700, idPair) + _ = idtools.MkdirAndChown(upperDir, 0o700, idPair) + _ = idtools.MkdirAndChown(workDir, 0o700, idPair) opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir) flags := uintptr(0) diff --git a/vendor/go.podman.io/storage/drivers/overlay/overlay.go b/vendor/go.podman.io/storage/drivers/overlay/overlay.go index 00974c890..b12366852 100644 --- a/vendor/go.podman.io/storage/drivers/overlay/overlay.go +++ b/vendor/go.podman.io/storage/drivers/overlay/overlay.go @@ -28,6 +28,7 @@ import ( "go.podman.io/storage/drivers/overlayutils" "go.podman.io/storage/drivers/quota" "go.podman.io/storage/internal/dedup" + "go.podman.io/storage/internal/driver" "go.podman.io/storage/internal/staging_lockfile" "go.podman.io/storage/internal/tempdir" "go.podman.io/storage/pkg/archive" @@ -38,7 +39,6 @@ import ( "go.podman.io/storage/pkg/idmap" "go.podman.io/storage/pkg/idtools" "go.podman.io/storage/pkg/mount" - "go.podman.io/storage/pkg/parsers" "go.podman.io/storage/pkg/system" "go.podman.io/storage/pkg/unshare" "golang.org/x/sys/unix" @@ -48,8 +48,7 @@ import ( var untar = chrootarchive.UntarUncompressed const ( - defaultPerms = os.FileMode(0o555) - mountProgramFlagFile = ".has-mount-program" + defaultPerms = os.FileMode(0o555) ) // This backend uses the overlay union filesystem for containers @@ -79,18 +78,32 @@ const ( // syscall. A hard upper limit of 500 lower layers is enforced to ensure // that mounts do not fail due to length. -const ( - linkDir = "l" - stagingDir = "staging" - tempDirName = "tempdirs" - lowerFile = "lower" - maxDepth = 500 +const ( // Paths within the driver’s home directory + mountProgramFlagFile = ".has-mount-program" + linkDir = "l" + stagingDir = "staging" + tempDirName = "tempdirs" +) - stagingLockFile = "staging.lock" +const ( // Paths within a per-layer directory + lowerFile = "lower" + // lowerLayersFile references lower layers directly by layer ID + // instead of going through the l/ symlinks. The code appends + // "/diff" itself when consuming entries. It is preferred over + // lowerFile when present. The old lowerFile is still written + // for backward compatibility with older tools. + lowerLayersFile = "lower-layers" +) +const ( // Keys within DriverWithDifferOutput.Artifacts tocArtifact = "toc" fsVerityDigestsArtifact = "fs-verity-digests" +) + +const stagingLockFile = "staging.lock" +const ( + maxDepth = 500 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the @@ -113,6 +126,7 @@ type overlayOptions struct { ignoreChownErrors bool forceMask *os.FileMode useComposefs bool + syncMode graphdriver.SyncMode } // Driver contains information about the home directory and the list of active mounts that are created using this driver. @@ -243,7 +257,7 @@ func checkAndRecordOverlaySupport(home, runhome string) (bool, error) { return false, errors.New(overlayCacheText) } } else { - supportsDType, err = supportsOverlay(home, 0, 0) + supportsDType, err = supportsOverlay(home, idtools.IDPair{UID: 0, GID: 0}) if err != nil { os.Remove(filepath.Join(home, linkDir)) os.Remove(home) @@ -326,13 +340,22 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - // Create the driver home dir + // Create the driver home dir. + // NOTE: the l/ subdirectory currently also serves as an anchor that + // prevents the home directory from being removed when all layers are + // deleted. If l/ is dropped entirely in the future, an alternative + // mechanism (e.g. a sentinel file) must be put in place to keep the + // home directory around. Without it, supportsOverlay() on the error + // path and checkAndRecordOverlaySupport() would successfully rmdir + // the home, breaking XFS project quotas set on the directory and + // causing ScanPriorDrivers() to no longer detect the overlay driver + // as in use. if err := os.MkdirAll(path.Join(home, linkDir), 0o755); err != nil { return nil, err } if options.ImageStore != "" { - if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil { + if err := idtools.MkdirAllAndChown(path.Join(options.ImageStore, linkDir), 0o755, idtools.IDPair{UID: 0, GID: 0}); err != nil { return nil, err } } @@ -420,6 +443,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } + // Clean up stale tempdirs early, before MakePrivate. + if err := tempdir.RecoverStaleDirs(filepath.Join(home, tempDirName)); err != nil { + return nil, fmt.Errorf("overlay: recover stale temp dirs: %w", err) + } + if !opts.skipMountHome { if err := mount.MakePrivate(home); err != nil { return nil, fmt.Errorf("overlay: failed to make mount private: %w", err) @@ -465,17 +493,20 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } func parseOptions(options []string) (*overlayOptions, error) { - o := &overlayOptions{} + o := &overlayOptions{ + syncMode: graphdriver.SyncModeNone, + } for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) + driver, key, val, err := driver.ParseDriverOption(option) if err != nil { return nil, err } - trimkey := strings.ToLower(key) - trimkey = strings.TrimPrefix(trimkey, "overlay.") - trimkey = strings.TrimPrefix(trimkey, "overlay2.") - trimkey = strings.TrimPrefix(trimkey, ".") - switch trimkey { + if driver != "" && driver != "overlay" && driver != "overlay2" { + // do not parse options meant for another storage driver + continue + } + + switch key { case "override_kernel_check": logrus.Debugf("overlay: override_kernel_check option was specified, but is no longer necessary") case "mountopt": @@ -593,8 +624,24 @@ func parseOptions(options []string) (*overlayOptions, error) { } m := os.FileMode(mask) o.forceMask = &m + case "sync": + logrus.Debugf("overlay: sync=%s", val) + mode, err := graphdriver.ParseSyncMode(val) + if err != nil { + return nil, fmt.Errorf("invalid sync mode for overlay driver: %w", err) + } + // SyncModeNone and SyncModeFilesystem do not need any special handling because + // the overlay storage is always on the same file system as the metadata, thus + // the Syncfs() in layers.go covers also any file written by the overlay driver. + switch mode { + case graphdriver.SyncModeNone, graphdriver.SyncModeFilesystem: + // Nothing to do. + default: + return nil, fmt.Errorf("invalid mode for overlay driver: %q", val) + } + o.syncMode = mode default: - return nil, fmt.Errorf("overlay: unknown option %s", key) + return nil, fmt.Errorf("unknown option %q (%q)", key, option) } } return o, nil @@ -648,10 +695,10 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { // Do nothing. default: needsMountProgram, err := scanForMountProgramIndicators(home) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return false, err } - if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !errors.Is(err, fs.ErrNotExist) { return false, err } if needsMountProgram { @@ -663,7 +710,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { for _, dir := range []string{home, runhome} { if err := fileutils.Exists(dir); err != nil { - _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) + _ = idtools.MkdirAllAndChown(dir, 0o700, idtools.IDPair{UID: 0, GID: 0}) } } @@ -671,7 +718,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { return supportsDType, nil } -func supportsOverlay(home string, rootUID, rootGID int) (supportsDType bool, err error) { +func supportsOverlay(home string, rootIDPair idtools.IDPair) (supportsDType bool, err error) { selinuxLabelTest := selinux.PrivContainerMountLabel() logLevel := logrus.ErrorLevel @@ -714,12 +761,12 @@ func supportsOverlay(home string, rootUID, rootGID int) (supportsDType bool, err _ = os.RemoveAll(layerDir) _ = os.Remove(home) }() - _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID) - _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID) - _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID) - _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAndChown(mergedDir, 0o700, rootIDPair) + _ = idtools.MkdirAndChown(lower1Dir, 0o700, rootIDPair) + _ = idtools.MkdirAndChown(lower2Dir, 0o700, rootIDPair) + _ = idtools.MkdirAndChown(lower2Subdir, 0o700, rootIDPair) + _ = idtools.MkdirAndChown(upperDir, 0o700, rootIDPair) + _ = idtools.MkdirAndChown(workDir, 0o700, rootIDPair) f, err := os.Create(lower2SubdirFile) if err != nil { logrus.Debugf("Unable to create test file: %v", err) @@ -865,6 +912,11 @@ func (d *Driver) Cleanup() error { return mount.Unmount(d.home) } +// SyncMode returns the sync mode configured for the driver. +func (d *Driver) SyncMode() graphdriver.SyncMode { + return d.options.syncMode +} + // pruneStagingDirectories cleans up any staging directory that was leaked. // It returns whether any staging directory is still present. func (d *Driver) pruneStagingDirectories() bool { @@ -951,46 +1003,16 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } - if opts == nil { - opts = &graphdriver.CreateOpts{ - StorageOpt: map[string]string{}, - } - } - if d.options.forceMask != nil && d.options.mountProgram == "" { return fmt.Errorf("overlay: force_mask option for writeable layers is only supported with a mount_program") } - if _, ok := opts.StorageOpt["size"]; !ok { - if opts.StorageOpt == nil { - opts.StorageOpt = map[string]string{} - } - opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) - } - - if _, ok := opts.StorageOpt["inodes"]; !ok { - if opts.StorageOpt == nil { - opts.StorageOpt = map[string]string{} - } - opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10) - } - return d.create(id, parent, opts, false) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - if opts != nil && len(opts.StorageOpt) != 0 { - if _, ok := opts.StorageOpt["size"]; ok { - return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") - } - - if _, ok := opts.StorageOpt["inodes"]; ok { - return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") - } - } - return d.create(id, parent, opts, true) } @@ -1038,6 +1060,11 @@ func (d *Driver) getLayerPermissions(parent string, uidMaps, gidMaps []idtools.I } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) { + quota, err := d.parseStorageOpt(opts, readOnly) // Do this even for read-only layers, to allow rejecting quota options + if err != nil { + return err + } + dir, homedir, _ := d.dir2(id, readOnly) disableQuota := readOnly @@ -1051,7 +1078,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil { + if err := idtools.MkdirAllAndChown(path.Join(homedir, linkDir), 0o755, idtools.IDPair{UID: 0, GID: 0}); err != nil { return err } @@ -1087,19 +1114,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl }() if d.quotaCtl != nil && !disableQuota { - quota := quota.Quota{} - if opts != nil && len(opts.StorageOpt) > 0 { - driver := &Driver{} - if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { - return err - } - if driver.options.quota.Size > 0 { - quota.Size = driver.options.quota.Size - } - if driver.options.quota.Inodes > 0 { - quota.Inodes = driver.options.quota.Inodes - } - } // Set container disk quota limit // If it is set to 0, we will track the disk usage, but not enforce a limit if err := d.quotaCtl.SetQuota(dir, quota); err != nil { @@ -1108,7 +1122,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl } diff := path.Join(dir, "diff") - if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { + if err := idtools.MkdirAndChown(diff, forcedSt.Mode, forcedSt.IDs); err != nil { return err } @@ -1131,88 +1145,82 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl return err } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0o700, forcedSt.IDs); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "merged"), 0o700, forcedSt.IDs); err != nil { return err } // if no parent directory, create a dummy lower directory and skip writing a "lowers" file if parent == "" { - return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID) + return idtools.MkdirAndChown(path.Join(dir, "empty"), 0o700, forcedSt.IDs) } - lower, err := d.getLower(parent) + lower, err := d.getLowerForParent(parent) if err != nil { return err } - if lower != "" { - if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil { - return err - } + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil { + return err + } + + // Write a lower-layers file referencing layers by ID instead of + // l/ symlink references. The reading side appends "/diff" itself. + parentLowerLayerIDs, err := d.getLowerLayerIDs(parent) + if err != nil { + return err + } + layerLowerLayerIDs := strings.Join(append([]string{parent}, parentLowerLayerIDs...), ":") + if err := os.WriteFile(path.Join(dir, lowerLayersFile), []byte(layerLowerLayerIDs), 0o666); err != nil { + return err } return nil } // Parse overlay storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { +func (d *Driver) parseStorageOpt(opts *graphdriver.CreateOpts, readOnly bool) (quota.Quota, error) { + var storageOpt map[string]string = nil // Iterating over a nil map is safe + if opts != nil { + storageOpt = opts.StorageOpt + } + + res := quota.Quota{} + + if !readOnly { + res.Size = d.options.quota.Size + res.Inodes = d.options.quota.Inodes + } + // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": + if readOnly { + return quota.Quota{}, fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } size, err := units.RAMInBytes(val) if err != nil { - return err + return quota.Quota{}, err } - driver.options.quota.Size = uint64(size) + res.Size = uint64(size) case "inodes": + if readOnly { + return quota.Quota{}, fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") + } inodes, err := strconv.ParseUint(val, 10, 64) if err != nil { - return err + return quota.Quota{}, err } - driver.options.quota.Inodes = inodes + res.Inodes = inodes default: - return fmt.Errorf("unknown option %s", key) + return quota.Quota{}, fmt.Errorf("unknown option %s", key) } } - return nil -} - -func (d *Driver) getLower(parent string) (string, error) { - parentDir := d.dir(parent) - - // Ensure parent exists - if err := fileutils.Lexists(parentDir); err != nil { - return "", err - } - - // Read Parent link fileA - parentLink, err := os.ReadFile(path.Join(parentDir, "link")) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) - if err := d.recreateSymlinks(); err != nil { - return "", fmt.Errorf("recreating the links: %w", err) - } - parentLink, err = os.ReadFile(path.Join(parentDir, "link")) - if err != nil { - return "", err - } - } - lowers := []string{path.Join(linkDir, string(parentLink))} - - parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) - if err == nil { - parentLowers := strings.SplitSeq(string(parentLower), ":") - lowers = slices.AppendSeq(lowers, parentLowers) - } - return strings.Join(lowers, ":"), nil + return res, nil } func (d *Driver) dir(id string) string { @@ -1256,38 +1264,98 @@ func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { return newpath, homedir, false } -func (d *Driver) getLowerDirs(id string) ([]string, error) { - var lowersArray []string - lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile)) +// getLowerForParent returns the contents of lowerFile for a child layer of parent. +// +// This should only be used to construct a lowerFile for compatibility; +// new code should rely on lowerLayersFile instead. +func (d *Driver) getLowerForParent(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if err := fileutils.Lexists(parentDir); err != nil { + return "", err + } + + parentLink, err := os.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { - for s := range strings.SplitSeq(string(lowers), ":") { - lower := d.dir(s) - lp, err := os.Readlink(lower) - // if the link does not exist, we lost the symlinks during a sudden reboot. - // Let's go ahead and recreate those symlinks. + parentLowers := strings.SplitSeq(string(parentLower), ":") + lowers = slices.AppendSeq(lowers, parentLowers) + } + return strings.Join(lowers, ":"), nil +} + +// getLowerLayerIDs returns a list of lower layer IDs for a layer id; +// typically the contents of lowerLayersFile, falling back to lowerFile. +// If the layer has neither of the files, returns an empty list without reporting an error. +func (d *Driver) getLowerLayerIDs(id string) ([]string, error) { + dir := d.dir(id) + lowerLayers, err := os.ReadFile(path.Join(dir, lowerLayersFile)) + switch { + case err == nil: + return strings.Split(string(lowerLayers), ":"), nil + + case errors.Is(err, fs.ErrNotExist): + lowers, err := os.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + var res []string + for relLowerLink := range strings.SplitSeq(string(lowers), ":") { + lowerLink := d.dir(relLowerLink) // This is an invalid use of dir() (the input is supposed to be a layer ID) but pre-existing + lp, err := os.Readlink(lowerLink) if err != nil { - if os.IsNotExist(err) { - logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) - if err := d.recreateSymlinks(); err != nil { - return nil, fmt.Errorf("recreating the missing symlinks: %w", err) - } - // let's call Readlink on lower again now that we have recreated the missing symlinks - lp, err = os.Readlink(lower) - if err != nil { - return nil, err - } - } else { - return nil, err - } + return nil, err } - lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) + lowerID := filepath.Base(filepath.Dir(lp)) + res = append(res, lowerID) } - } else if !os.IsNotExist(err) { + return res, nil + + default: return nil, err } +} + +// getLowerDirs returns a list of lower directories for a layer id; +// the directories may be symbolic links (do not call redirectDiffIfAdditionalLayer). +func (d *Driver) getLowerDirs(id string) ([]string, error) { + lowerLayerIDs, err := d.getLowerLayerIDs(id) + if err != nil { + return nil, err + } + lowersArray := make([]string, 0, len(lowerLayerIDs)) + for _, lowerID := range lowerLayerIDs { + lowerDir := d.dir(lowerID) + lowersArray = append(lowersArray, path.Join(lowerDir, "diff")) + } return lowersArray, nil } +// getLowerDiffPaths returns a list of lower diff paths for a layer id; +// the paths have redirectDiffIfAdditionalLayer applied. +func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + for i, l := range layers { + layers[i], err = redirectDiffIfAdditionalLayer(l, false) + if err != nil { + return nil, err + } + } + return layers, nil +} + func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { if uidMaps != nil { var uids, gids bytes.Buffer @@ -1329,13 +1397,13 @@ func (d *Driver) removeCommon(id string, cleanup func(string) error) error { if err == nil { linkPath := path.Join(d.home, linkDir, string(lid)) if err := cleanup(linkPath); err != nil { - logrus.Debugf("Failed to remove link: %v", err) + logrus.Warnf("Failed to remove link: %v", err) } } d.releaseAdditionalLayerByID(id) - if err := cleanup(dir); err != nil && !os.IsNotExist(err) { + if err := cleanup(dir); err != nil && !errors.Is(err, fs.ErrNotExist) { return err } if d.quotaCtl != nil { @@ -1390,112 +1458,6 @@ func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { return t.Cleanup, nil } -// recreateSymlinks goes through the driver's home directory and checks if the diff directory -// under each layer has a symlink created for it under the linkDir. If the symlink does not -// exist, it creates them -func (d *Driver) recreateSymlinks() error { - // We have at most 3 corrective actions per layer, so 10 iterations is plenty. - const maxIterations = 10 - - // List all the directories under the home directory - dirs, err := os.ReadDir(d.home) - if err != nil { - return fmt.Errorf("reading driver home directory %q: %w", d.home, err) - } - // This makes the link directory if it doesn't exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { - return err - } - // Keep looping as long as we take some corrective action in each iteration - var errs error - madeProgress := true - iterations := 0 - for madeProgress { - errs = nil - madeProgress = false - // Check that for each layer, there's a link in "l" with the name in - // the layer's "link" file that points to the layer's "diff" directory. - for _, dir := range dirs { - // Skip over the linkDir, stagingDir, tempDirName and anything that is not a directory - if dir.Name() == linkDir || dir.Name() == stagingDir || dir.Name() == tempDirName || !dir.IsDir() { - continue - } - // Read the "link" file under each layer to get the name of the symlink - data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link")) - if err != nil { - errs = errors.Join(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) - continue - } - linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) - // Check if the symlink exists, and if it doesn't, create it again with the - // name we got from the "link" file - err = fileutils.Lexists(linkPath) - if err != nil && os.IsNotExist(err) { - if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { - errs = errors.Join(errs, err) - continue - } - madeProgress = true - } else if err != nil { - errs = errors.Join(errs, err) - continue - } - } - - // linkDirFullPath is the full path to the linkDir - linkDirFullPath := filepath.Join(d.home, "l") - // Now check if we somehow lost a "link" file, by making sure - // that each symlink we have corresponds to one. - links, err := os.ReadDir(linkDirFullPath) - if err != nil { - errs = errors.Join(errs, err) - continue - } - // Go through all of the symlinks in the "l" directory - for _, link := range links { - // Read the symlink's target, which should be "../$layer/diff" - target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name())) - if err != nil { - errs = errors.Join(errs, err) - continue - } - targetComponents := strings.Split(target, string(os.PathSeparator)) - if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { - errs = errors.Join(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) - // force the link to be recreated on the next pass - if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil { - if !os.IsNotExist(err) { - errs = errors.Join(errs, fmt.Errorf("removing link %q: %w", link, err)) - } // else don’t report any error, but also don’t set madeProgress. - continue - } - madeProgress = true - continue - } - // Reconstruct the name of the target's link file and check that - // it has the basename of our symlink in it. - targetID := targetComponents[1] - linkFile := filepath.Join(d.dir(targetID), "link") - data, err := os.ReadFile(linkFile) - if err != nil || string(data) != link.Name() { - // NOTE: If two or more links point to the same target, we will update linkFile - // with every value of link.Name(), and set madeProgress = true every time. - if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil { - errs = errors.Join(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) - continue - } - madeProgress = true - } - } - iterations++ - if iterations >= maxIterations { - errs = errors.Join(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) - break - } - } - return errs -} - // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return d.get(id, false, options) @@ -1515,10 +1477,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if err != nil { return "", err } + rootIDs := idtools.IDPair{UID: rootUID, GID: rootGID} mergedDir := d.getMergedDir(id, dir, inAdditionalStore) // Attempt to create the merged dir if it doesn't exist, but don't chown an already existing directory (it might be in an additional store) - if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, rootIDs); err != nil && !os.IsExist(err) { return "", err } @@ -1589,12 +1552,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO readWrite = false } - lowers, err := os.ReadFile(path.Join(dir, lowerFile)) - if err != nil && !os.IsNotExist(err) { + lowerLayerIDs, err := d.getLowerLayerIDs(id) + if err != nil { return "", err } - splitLowers := strings.Split(string(lowers), ":") - if len(splitLowers) > maxDepth { + if len(lowerLayerIDs) > maxDepth { return "", errors.New("max depth exceeded") } @@ -1645,7 +1607,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) if err := fileutils.Exists(composefsBlob); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return "", nil } return "", err @@ -1684,50 +1646,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // For each lower, resolve its path, and append it and any additional diffN // directories to the lowers list. - for i, l := range splitLowers { - if l == "" { - continue + for i, lowerID := range lowerLayerIDs { + lower := filepath.Join(d.dir(lowerID), "diff") + st, err := os.Stat(lower) + if err != nil { + return "", fmt.Errorf("can't stat (or find?) lower layer %q: %w", lower, err) } - - lower := "" - newpath := path.Join(d.home, l) - if st, err := os.Stat(newpath); err != nil { - for _, p := range d.getAllImageStores() { - lower = path.Join(p, d.name, l) - if st2, err2 := os.Stat(lower); err2 == nil { - if !permsKnown { - perms = st2.Mode() - permsKnown = true - } - break - } - lower = "" - } - // if it is a "not found" error, that means the symlinks were lost in a sudden reboot - // so call the recreateSymlinks function to go through all the layer dirs and recreate - // the symlinks with the name from their respective "link" files - if lower == "" && os.IsNotExist(err) { - logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) - if err := d.recreateSymlinks(); err != nil { - return "", fmt.Errorf("recreating the missing symlinks: %w", err) - } - lower = newpath - } else if lower == "" { - return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err) - } - } else { - if !permsKnown { - perms = st.Mode() - permsKnown = true - } - lower = newpath + if !permsKnown { + perms = st.Mode() + permsKnown = true } - linkContent, err := os.Readlink(lower) - if err != nil { - return "", err - } - lowerID := filepath.Base(filepath.Dir(linkContent)) composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite) if err != nil { return "", err @@ -1768,7 +1697,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = append(absLowers, path.Join(dir, "empty")) } - if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(diffDir, perms, rootIDs); err != nil { if !inAdditionalStore { return "", err } @@ -1983,7 +1912,7 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !errors.Is(err, fs.ErrNotExist) { return err } @@ -2017,18 +1946,14 @@ func (d *Driver) Put(id string) error { // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all // pending changes are propagated to the file system if !unmounted { - fd, err := unix.Open(mountpoint, unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err == nil { - if err := unix.Syncfs(fd); err != nil { - logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) - } - unix.Close(fd) + if err := system.Syncfs(mountpoint); err != nil { + logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) } } } if !unmounted { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !errors.Is(err, fs.ErrNotExist) { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) if !errors.Is(err, unix.EINVAL) { return fmt.Errorf("unmounting %q: %w", mountpoint, err) @@ -2045,17 +1970,17 @@ func (d *Driver) Put(id string) error { } } } else { - uid, gid := int(0), int(0) + idPair := idtools.IDPair{UID: 0, GID: 0} fi, err := os.Stat(mountpoint) if err != nil { return err } if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - uid, gid = int(stat.Uid), int(stat.Gid) + idPair = idtools.IDPair{UID: int(stat.Uid), GID: int(stat.Gid)} } tmpMountpoint := path.Join(dir, "merged.1") - if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { + if err := idtools.MkdirAndChown(tmpMountpoint, 0o700, idPair); err != nil && !errors.Is(err, os.ErrExist) { return err } // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains @@ -2101,23 +2026,15 @@ func (d *Driver) ListLayers() ([]string, error) { // isParent returns if the passed in parent is the direct parent of the passed in layer func (d *Driver) isParent(id, parent string) bool { - lowers, err := d.getLowerDirs(id) + lowerLayerIDs, err := d.getLowerLayerIDs(id) if err != nil { return false } - if parent == "" && len(lowers) > 0 { - return false - } - - parentDir := d.dir(parent) - var ld string - if len(lowers) > 0 { - ld = filepath.Dir(lowers[0]) + actualParent := "" + if len(lowerLayerIDs) > 0 { + actualParent = lowerLayerIDs[0] } - if ld == "" && parent == "" { - return true - } - return ld == parentDir + return parent == actualParent } func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { @@ -2205,7 +2122,7 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) if Err != nil { for _, f := range composefsMounts { f.Close() - if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) { + if err := unix.Rmdir(f.Name()); err != nil && !errors.Is(err, fs.ErrNotExist) { logrus.Warnf("Failed to remove %s: %v", f.Name(), err) } } @@ -2378,7 +2295,7 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr return err } } - if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(diffPath); err != nil && !errors.Is(err, fs.ErrNotExist) { return err } @@ -2419,7 +2336,7 @@ func (d *Driver) StartStagingDiffToApply(parent string, options graphdriver.Appl return t.Cleanup, nil, -1, err } - if err := idtools.MkdirAs(sa.Path, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil { + if err := idtools.MkdirAndChown(sa.Path, forcedSt.Mode, forcedSt.IDs); err != nil { return t.Cleanup, nil, -1, err } @@ -2501,20 +2418,6 @@ func (d *Driver) getDiffPath(id string) (string, error) { return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false) } -func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { - layers, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - for i, l := range layers { - layers[i], err = redirectDiffIfAdditionalLayer(l, false) - if err != nil { - return nil, err - } - } - return layers, nil -} - // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. @@ -2596,12 +2499,13 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp dir := d.dir(id) diffDir := filepath.Join(dir, "diff") - rootUID, rootGID := 0, 0 + rootIDs := idtools.IDPair{UID: 0, GID: 0} if toHost != nil { - rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + rootUID, rootGID, err := idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) if err != nil { return err } + rootIDs = idtools.IDPair{UID: rootUID, GID: rootGID} } // Mount the new layer and handle ownership changes and possible copy_ups in it. @@ -2652,13 +2556,13 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp // to the old upper layer in the index. workDir := filepath.Join(dir, "work") if err := os.RemoveAll(workDir); err == nil { - if err := idtools.MkdirAs(workDir, defaultPerms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(workDir, defaultPerms, rootIDs); err != nil { return err } } // Re-create the directory that we're going to use as the upper layer. - if err := idtools.MkdirAs(diffDir, perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(diffDir, perms, rootIDs); err != nil { return err } return nil @@ -2750,7 +2654,7 @@ func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (st func (d *Driver) releaseAdditionalLayerByID(id string) { if al, err := d.getAdditionalLayerPathByID(id); err == nil { notifyReleaseAdditionalLayer(al) - } else if !os.IsNotExist(err) { + } else if !errors.Is(err, fs.ErrNotExist) { logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err) } } @@ -2827,7 +2731,7 @@ func notifyUseAdditionalLayer(al string) { } useFile := path.Join(al, "use") f, err := os.Create(useFile) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return } else if err == nil { f.Close() @@ -2849,7 +2753,7 @@ func notifyReleaseAdditionalLayer(al string) { } // tell the additional layer store that we don't use this layer anymore. err := unix.Rmdir(al) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return } logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) diff --git a/vendor/go.podman.io/storage/drivers/vfs/driver.go b/vendor/go.podman.io/storage/drivers/vfs/driver.go index b90c2046c..32bf67859 100644 --- a/vendor/go.podman.io/storage/drivers/vfs/driver.go +++ b/vendor/go.podman.io/storage/drivers/vfs/driver.go @@ -15,12 +15,12 @@ import ( "github.com/vbatts/tar-split/tar/storage" graphdriver "go.podman.io/storage/drivers" "go.podman.io/storage/internal/dedup" + "go.podman.io/storage/internal/driver" "go.podman.io/storage/internal/tempdir" "go.podman.io/storage/pkg/archive" "go.podman.io/storage/pkg/directory" "go.podman.io/storage/pkg/fileutils" "go.podman.io/storage/pkg/idtools" - "go.podman.io/storage/pkg/parsers" "go.podman.io/storage/pkg/system" ) @@ -40,32 +40,53 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) name: "vfs", home: home, imageStore: options.ImageStore, + syncMode: graphdriver.SyncModeNone, } if err := os.MkdirAll(filepath.Join(home, "dir"), 0o700); err != nil { return nil, err } for _, option := range options.DriverOptions { - key, val, err := parsers.ParseKeyValueOpt(option) + driver, key, val, err := driver.ParseDriverOption(option) if err != nil { return nil, err } - key = strings.ToLower(key) + if driver != "" && driver != "vfs" { + // do not parse options meant for another storage driver + continue + } + switch key { - case "vfs.imagestore", ".imagestore": + case "imagestore": d.additionalHomes = slices.AppendSeq(d.additionalHomes, strings.SplitSeq(val, ",")) continue - case "vfs.mountopt": + case "mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") - case ".ignore_chown_errors", "vfs.ignore_chown_errors": + case "ignore_chown_errors": logrus.Debugf("vfs: ignore_chown_errors=%s", val) var err error d.ignoreChownErrors, err = strconv.ParseBool(val) if err != nil { return nil, err } + case "vfs.sync", ".sync": + logrus.Debugf("vfs: sync=%s", val) + var err error + d.syncMode, err = graphdriver.ParseSyncMode(val) + if err != nil { + return nil, fmt.Errorf("invalid sync mode for vfs driver: %w", err) + } + // SyncModeNone and SyncModeFilesystem do not need any special handling because + // the vfs storage is always on the same file system as the metadata, thus the + // Syncfs() in layers.go covers also any file written by the vfs driver. + switch d.syncMode { + case graphdriver.SyncModeNone, graphdriver.SyncModeFilesystem: + // Nothing to do. + default: + return nil, fmt.Errorf("invalid mode for vfs driver: %q", val) + } default: - return nil, fmt.Errorf("vfs driver does not support %s options", key) + return nil, fmt.Errorf("unknown option %q (%q)", key, option) } } @@ -84,6 +105,7 @@ type Driver struct { home string additionalHomes []string ignoreChownErrors bool + syncMode graphdriver.SyncMode naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater imageStore string @@ -108,6 +130,11 @@ func (d *Driver) Cleanup() error { return nil } +// SyncMode returns the sync mode configured for the driver. +func (d *Driver) SyncMode() graphdriver.SyncMode { + return d.syncMode +} + type fileGetNilCloser struct { storage.FileGetter } @@ -225,7 +252,7 @@ func (d *Driver) dir2(id string, useImageStore bool) string { homedir = filepath.Join(d.home, "dir", filepath.Base(id)) } if err := fileutils.Exists(homedir); err != nil { - additionalHomes := d.additionalHomes[:] + additionalHomes := d.additionalHomes if d.imageStore != "" { additionalHomes = append(additionalHomes, d.imageStore) } diff --git a/vendor/go.podman.io/storage/drivers/zfs/zfs.go b/vendor/go.podman.io/storage/drivers/zfs/zfs.go index b994278bb..48820247c 100644 --- a/vendor/go.podman.io/storage/drivers/zfs/zfs.go +++ b/vendor/go.podman.io/storage/drivers/zfs/zfs.go @@ -16,11 +16,11 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" graphdriver "go.podman.io/storage/drivers" + "go.podman.io/storage/internal/driver" "go.podman.io/storage/internal/tempdir" "go.podman.io/storage/pkg/directory" "go.podman.io/storage/pkg/idtools" "go.podman.io/storage/pkg/mount" - "go.podman.io/storage/pkg/parsers" "golang.org/x/sys/unix" ) @@ -123,18 +123,22 @@ func parseOptions(opt []string) (zfsOptions, error) { var options zfsOptions options.fsName = "" for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) + driver, key, val, err := driver.ParseDriverOption(option) if err != nil { return options, err } - key = strings.ToLower(key) + if driver != "" && driver != "zfs" { + // do not parse options meant for another storage driver + continue + } + switch key { - case "zfs.fsname": + case "fsname": options.fsName = val - case "zfs.mountopt": + case "mountopt": options.mountOptions = val default: - return options, fmt.Errorf("unknown option %s", key) + return options, fmt.Errorf("unknown option %q (%q)", key, option) } } return options, nil @@ -183,6 +187,12 @@ func (d *Driver) Cleanup() error { return nil } +// SyncMode returns the sync mode configured for the driver. +// ZFS does not support sync mode configuration, always returns SyncModeNone. +func (d *Driver) SyncMode() graphdriver.SyncMode { + return graphdriver.SyncModeNone +} + // Status returns information about the ZFS filesystem. It returns a two dimensional array of information // such as pool name, dataset name, disk usage, parent quota and compression used. // Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', @@ -304,13 +314,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { return err } if parent == "" { - var rootUID, rootGID int + rootIDs := idtools.IDPair{UID: 0, GID: 0} var mountLabel string if opts != nil { - rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) + rootUID, rootGID, err := idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) if err != nil { return fmt.Errorf("failed to get root uid/gid: %w", err) } + rootIDs = idtools.IDPair{UID: rootUID, GID: rootGID} mountLabel = opts.MountLabel } mountoptions := map[string]string{"mountpoint": "legacy"} @@ -323,7 +334,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { d.Unlock() } - if err := idtools.MkdirAllAs(mountpoint, defaultPerms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(mountpoint, defaultPerms, rootIDs); err != nil { return err } defer func() { @@ -349,7 +360,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { // this is our first mount after creation of the filesystem, and the root dir may still have root // permissions instead of the remapped root uid:gid (if user namespaces are enabled): - if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + if err := os.Chown(mountpoint, rootIDs.UID, rootIDs.GID); err != nil { return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err) } diff --git a/vendor/go.podman.io/storage/images.go b/vendor/go.podman.io/storage/images.go index e535b541e..673f7634c 100644 --- a/vendor/go.podman.io/storage/images.go +++ b/vendor/go.podman.io/storage/images.go @@ -88,7 +88,7 @@ type Image struct { // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value // is set before using it. - Created time.Time `json:"created,omitempty"` + Created time.Time `json:"created"` // ReadOnly is true if this image resides in a read-only layer store. ReadOnly bool `json:"-"` diff --git a/vendor/go.podman.io/storage/internal/driver/driver.go b/vendor/go.podman.io/storage/internal/driver/driver.go new file mode 100644 index 000000000..dd2500ba3 --- /dev/null +++ b/vendor/go.podman.io/storage/internal/driver/driver.go @@ -0,0 +1,43 @@ +package driver + +import ( + "fmt" + "strings" + + "go.podman.io/storage/pkg/parsers" +) + +// isKnownDriverName checks if the given driver is known to this code base. +func isKnownDriverName(driver string) bool { + // Note we do not use the drivers map here because we want all known drivers + // not just the ones that were compiled in. + // Also we can use that to handle the overlay2 special case because the option + // parser accepts option with that name. + switch driver { + case "overlay", "overlay2", "btrfs", "zfs", "vfs": + return true + } + return false +} + +// ParseDriverOption parses the given option string of the format "[driver].optname=val" +// and returns the driver name (can be empty in which case the option should be parsed +// by all drivers) +func ParseDriverOption(option string) (string, string, string, error) { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return "", "", "", err + } + + key = strings.ToLower(key) + driver, optName, ok := strings.Cut(key, ".") + if !ok { + optName = driver + driver = "" + } else if driver != "" { + if !isKnownDriverName(driver) { + return "", "", "", fmt.Errorf("unknown driver %q in option %q", driver, option) + } + } + return driver, optName, val, nil +} diff --git a/vendor/go.podman.io/storage/layers.go b/vendor/go.podman.io/storage/layers.go index 22bebc131..1f556818e 100644 --- a/vendor/go.podman.io/storage/layers.go +++ b/vendor/go.podman.io/storage/layers.go @@ -121,7 +121,7 @@ type Layer struct { // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value // is set before using it. - Created time.Time `json:"created,omitempty"` + Created time.Time `json:"created"` // CompressedDigest is the digest of the blob that was last passed to // ApplyDiff() or create(), as it was presented to us. @@ -823,17 +823,60 @@ func (r *layerStore) GarbageCollect() error { } // Remove layer and any related data of unreferenced id + logrus.Debugf("removing driver layer %q", id) if err := r.driver.Remove(id); err != nil { - logrus.Debugf("removing driver layer %q", id) return err } + // Best-effort removal of orphaned metadata; the driver layer is + // already gone, so warn but don't fail the overall GC. + if err := os.Remove(r.tspath(id)); err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Failed to remove tar-split file %q: %v", r.tspath(id), err) + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + logrus.Warnf("Failed to remove data directory %q: %v", r.datadir(id), err) + } + } - logrus.Debugf("removing %q", r.tspath(id)) - os.Remove(r.tspath(id)) - logrus.Debugf("removing %q", r.datadir(id)) - os.RemoveAll(r.datadir(id)) + // Clean up any orphaned tar-split or data files in the layer metadata + // directory that don't correspond to a known layer. + entries, err := os.ReadDir(r.layerdir) + if err != nil { + return err } - return nil + for _, entry := range entries { + name := entry.Name() + var id string + var isDataDir bool + if strings.HasSuffix(name, tarSplitSuffix) { + id = strings.TrimSuffix(name, tarSplitSuffix) + } else if stringid.ValidateID(name) == nil { + id = name + isDataDir = true + } else { + continue + } + if stringid.ValidateID(id) != nil { + continue + } + if r.byid[id] != nil { + continue + } + p := filepath.Join(r.layerdir, name) + logrus.Debugf("removing %q", p) + if isDataDir { + moreErr := os.RemoveAll(p) + if moreErr != nil && err == nil { + err = moreErr + } + } else { + moreErr := os.Remove(p) + if moreErr != nil && err == nil { + err = moreErr + } + } + } + + return err } func (r *layerStore) mountspath() string { @@ -1026,7 +1069,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { } modifiedLocations |= layer.location } - if err := r.saveLayers(modifiedLocations); err != nil { + if err := r.saveLayers(modifiedLocations, false); err != nil { return false, err } if incompleteDeletionErrors != nil { @@ -1078,27 +1121,36 @@ func (r *layerStore) loadMounts() error { } // save saves the contents of the store to disk. +// If needsSyncfs is true, all pending writes are flushed to disk before +// saving the layer metadata. Set it to false for metadata-only changes // The caller must hold r.lockfile locked for writing. // The caller must hold r.inProcessLock for WRITING. -func (r *layerStore) save(saveLocations layerLocations) error { +func (r *layerStore) save(saveLocations layerLocations, needsSyncfs bool) error { r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - if err := r.saveLayers(saveLocations); err != nil { + if err := r.saveLayers(saveLocations, needsSyncfs); err != nil { return err } return r.saveMounts() } // saveFor saves the contents of the store relevant for modifiedLayer to disk. +// If needsSyncfs is true, all pending writes are flushed to disk before +// saving the layer metadata. Set it to false for metadata-only changes +// (e.g. setting a flag, changing names). // The caller must hold r.lockfile locked for writing. // The caller must hold r.inProcessLock for WRITING. -func (r *layerStore) saveFor(modifiedLayer *Layer) error { - return r.save(modifiedLayer.location) +func (r *layerStore) saveFor(modifiedLayer *Layer, needsSyncfs bool) error { + return r.save(modifiedLayer.location, needsSyncfs) } +// saveLayers writes the layer metadata to disk. +// If needsSyncfs is true, all pending writes are flushed to disk before +// saving the layer metadata. Set it to false for metadata-only changes +// (e.g. setting a flag, changing names). // The caller must hold r.lockfile locked for writing. // The caller must hold r.inProcessLock for WRITING. -func (r *layerStore) saveLayers(saveLocations layerLocations) error { +func (r *layerStore) saveLayers(saveLocations layerLocations, needsSyncfs bool) error { if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) } @@ -1139,6 +1191,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { if location == volatileLayerLocation { opts.NoSync = true } + // If the underlying storage driver is using sync and we are writing data (not just metadata), + // make sure we sync everything before saving the layer data, this ensures that all + // files/directories are properly created and written. + if needsSyncfs { + switch r.driver.SyncMode() { + case drivers.SyncModeNone: + // Nothing to do. + case drivers.SyncModeFilesystem: + if err := system.Syncfs(filepath.Dir(rpath)); err != nil { + return err + } + default: + return fmt.Errorf("unknown sync mode: %q", r.driver.SyncMode().String()) + } + } if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil { return err } @@ -1338,7 +1405,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error { return ErrLayerUnknown } delete(layer.Flags, flag) - return r.saveFor(layer) + return r.saveFor(layer, false) } // Requires startWriting. @@ -1354,7 +1421,7 @@ func (r *layerStore) SetFlag(id string, flag string, value any) error { layer.Flags = make(map[string]any) } layer.Flags[flag] = value - return r.saveFor(layer) + return r.saveFor(layer, false) } func (r *layerStore) Status() ([][2]string, error) { @@ -1409,7 +1476,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s if layer.TOCDigest != "" { r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) } - if err := r.saveFor(layer); err != nil { + if err := r.saveFor(layer, true); err != nil { if e := r.deleteWhileHoldingLock(layer.ID); e != nil { logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e) } @@ -1562,7 +1629,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } }() - if err = r.saveFor(layer); err != nil { + if err = r.saveFor(layer, false); err != nil { cleanupFailureContext = "saving incomplete layer metadata" return nil, -1, err } @@ -1623,7 +1690,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount cleanupFailureContext = "creating tar-split parent directory for a copy from template" return nil, -1, err } - if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + if err = os.WriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { cleanupFailureContext = "creating a tar-split copy from template" return nil, -1, err } @@ -1670,7 +1737,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } delete(layer.Flags, incompleteFlag) - if err = r.saveFor(layer); err != nil { + if err = r.saveFor(layer, true); err != nil { cleanupFailureContext = "saving finished layer metadata" return nil, -1, err } @@ -1904,7 +1971,7 @@ func (r *layerStore) updateNames(id string, names []string, op updateNameOperati r.byname[name] = layer } layer.Names = names - return r.saveFor(layer) + return r.saveFor(layer, false) } func (r *layerStore) datadir(id string) string { @@ -1967,7 +2034,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error if !slices.Contains(layer.BigDataNames, key) { layer.BigDataNames = append(layer.BigDataNames, key) - return r.saveFor(layer) + return r.saveFor(layer, false) } return nil } @@ -1996,7 +2063,7 @@ func (r *layerStore) SetMetadata(id, metadata string) error { } if layer, ok := r.lookup(id); ok { layer.Metadata = metadata - return r.saveFor(layer) + return r.saveFor(layer, false) } return ErrLayerUnknown } @@ -2036,7 +2103,7 @@ func (r *layerStore) internalDelete(id string) ([]tempdir.CleanupTempDirFunc, er layer.Flags = make(map[string]any) } layer.Flags[incompleteFlag] = true - if err := r.saveFor(layer); err != nil { + if err := r.saveFor(layer, false); err != nil { return nil, err } } @@ -2054,7 +2121,6 @@ func (r *layerStore) internalDelete(id string) ([]tempdir.CleanupTempDirFunc, er return cleanFunctions, err } - cleanFunctions = append(cleanFunctions, tempDirectory.Cleanup) if err := tempDirectory.StageDeletion(r.tspath(id)); err != nil && !errors.Is(err, os.ErrNotExist) { return cleanFunctions, err } @@ -2136,7 +2202,7 @@ func (r *layerStore) deferredDelete(id string) ([]tempdir.CleanupTempDirFunc, er if err != nil { return cleanFunctions, err } - return cleanFunctions, r.saveFor(layer) + return cleanFunctions, r.saveFor(layer, false) } // Requires startReading or startWriting. @@ -2162,8 +2228,8 @@ func (r *layerStore) Wipe() error { for id := range r.byid { ids = append(ids, id) } - sort.Slice(ids, func(i, j int) bool { - return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created) + slices.SortFunc(ids, func(a, b string) int { + return -r.byid[a].Created.Compare(r.byid[b].Created) }) for _, id := range ids { if err := r.deleteWhileHoldingLock(id); err != nil { @@ -2695,7 +2761,7 @@ func applyDiff(layerOptions *LayerOptions, diff io.Reader, tarSplitFile *os.File } // Requires startWriting. -func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (_ int64, retErr error) { +func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (int64, error) { if !r.lockfile.IsReadWrite() { return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly) } @@ -2709,11 +2775,10 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if err != nil { return -1, err } - // make sure to check for errors on close and return that one. + tarSplitClosed := false defer func() { - closeErr := tarSplitFile.Close() - if retErr == nil { - retErr = closeErr + if !tarSplitClosed { + tarSplitFile.Close() } }() @@ -2729,13 +2794,14 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, return -1, err } - if err := tarSplitFile.Sync(); err != nil { - return -1, fmt.Errorf("sync tar-split file: %w", err) + tarSplitClosed = true + if err := tarSplitFile.Close(); err != nil { + return -1, err } r.applyDiffResultToLayer(layer, result) - err = r.saveFor(layer) + err = r.saveFor(layer, true) return result.size, err } @@ -2767,7 +2833,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { } // Requires startWriting. -func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) (retErr error) { +func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return ErrNotSupported @@ -2806,20 +2872,16 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver } maps.Copy(layer.Flags, options.Flags) } - if err = r.saveFor(layer); err != nil { - return err - } if diffOutput.TarSplit != nil { tarSplitFile, err := createTarSplitFile(r, layer.ID) if err != nil { return err } - // make sure to check for errors on close and return that one. + tarSplitClosed := false defer func() { - closeErr := tarSplitFile.Close() - if retErr == nil { - retErr = closeErr + if !tarSplitClosed { + tarSplitFile.Close() } }() tarSplitWriter := pools.BufioWriter32KPool.Get(tarSplitFile) @@ -2845,10 +2907,12 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver if err := tarSplitWriter.Flush(); err != nil { return fmt.Errorf("failed to flush tar-split writer buffer: %w", err) } - if err := tarSplitFile.Sync(); err != nil { - return fmt.Errorf("sync tar-split file: %w", err) + tarSplitClosed = true + if err := tarSplitFile.Close(); err != nil { + return err } } + for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { if err2 := r.deleteWhileHoldingLock(id); err2 != nil { @@ -2857,6 +2921,10 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver return err } } + + if err = r.saveFor(layer, true); err != nil { + return err + } return err } diff --git a/vendor/go.podman.io/storage/pkg/archive/archive.go b/vendor/go.podman.io/storage/pkg/archive/archive.go index 79343ba83..bce24e5af 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive.go @@ -1590,8 +1590,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap defer contentWriter.Close() var hashError error var hashWorker sync.WaitGroup - hashWorker.Add(1) - go func() { + hashWorker.Go(func() { t := tar.NewReader(contentReader) _, err := t.Next() if err != nil { @@ -1600,8 +1599,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { hashError = err } - hashWorker.Done() - }() + }) if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } diff --git a/vendor/go.podman.io/storage/pkg/archive/changes.go b/vendor/go.podman.io/storage/pkg/archive/changes.go index dc1aa5902..d433ed5ce 100644 --- a/vendor/go.podman.io/storage/pkg/archive/changes.go +++ b/vendor/go.podman.io/storage/pkg/archive/changes.go @@ -3,12 +3,13 @@ package archive import ( "archive/tar" "bytes" + "cmp" "fmt" "io" "maps" "os" "path/filepath" - "sort" + "slices" "strings" "syscall" "time" @@ -57,12 +58,9 @@ func (change *Change) String() string { return fmt.Sprintf("%s %s", change.Kind, change.Path) } -// changesByPath implements sort.Interface. -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } +func compareChangesByPath(a, b Change) int { + return cmp.Compare(a.Path, b.Path) +} // Gnu tar and the go tar writer don't have sub-second mtime // precision, which is problematic when we apply changes via tar @@ -455,7 +453,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) - sort.Sort(changesByPath(changes)) + slices.SortFunc(changes, compareChangesByPath) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue diff --git a/vendor/go.podman.io/storage/pkg/archive/changes_linux.go b/vendor/go.podman.io/storage/pkg/archive/changes_linux.go index 343f3e686..2473d86ba 100644 --- a/vendor/go.podman.io/storage/pkg/archive/changes_linux.go +++ b/vendor/go.podman.io/storage/pkg/archive/changes_linux.go @@ -2,11 +2,12 @@ package archive import ( "bytes" + "cmp" "errors" "fmt" "os" "path/filepath" - "sort" + "slices" "strings" "syscall" "unsafe" @@ -234,12 +235,6 @@ type nameIno struct { ino uint64 } -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - // readdirnames is a hacked-apart version of the Go stdlib code, exposing inode // numbers further up the stack when reading directory contents. Unlike // os.Readdirnames, which returns a list of filenames, this function returns a @@ -281,9 +276,10 @@ func readdirnames(dirname string) (names []nameIno, err error) { bufp += nb } - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil + slices.SortFunc(names, func(a, b nameIno) int { + return cmp.Compare(a.name, b.name) + }) + return names, nil } // parseDirent is a minor modification of unix.ParseDirent (linux version) diff --git a/vendor/go.podman.io/storage/pkg/chrootarchive/archive.go b/vendor/go.podman.io/storage/pkg/chrootarchive/archive.go index e144ba789..9f732eea7 100644 --- a/vendor/go.podman.io/storage/pkg/chrootarchive/archive.go +++ b/vendor/go.podman.io/storage/pkg/chrootarchive/archive.go @@ -124,8 +124,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap defer contentWriter.Close() var hashError error var hashWorker sync.WaitGroup - hashWorker.Add(1) - go func() { + hashWorker.Go(func() { t := stdtar.NewReader(contentReader) _, err := t.Next() if err != nil { @@ -134,8 +133,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { hashError = err } - hashWorker.Done() - }() + }) if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } diff --git a/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go b/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go index 87bd065a8..275a9f9c3 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go @@ -9,6 +9,7 @@ import ( "io" "os" "runtime" + "slices" "sort" "strings" "sync" @@ -410,9 +411,7 @@ func bloomFilterFromTags(tags [][]byte, digestLen int) *bloomFilter { } func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][]byte, tagLen, digestLen int, vdata, fnames bytes.Buffer, tagsBuffer *bytes.Buffer) error { - sort.Slice(tags, func(i, j int) bool { - return bytes.Compare(tags[i], tags[j]) == -1 - }) + slices.SortFunc(tags, bytes.Compare) for _, t := range tags { if _, err := tagsBuffer.Write(t); err != nil { return err diff --git a/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go b/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go index ef26a812b..85aced457 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go @@ -143,7 +143,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { if rc.pendingHole > 0 { toCopy := min(rc.pendingHole, int64(len(b))) rc.pendingHole -= toCopy - for i := int64(0); i < toCopy; i++ { + for i := range b[:toCopy] { b[i] = 0 } diff --git a/vendor/go.podman.io/storage/pkg/chunked/dump/dump.go b/vendor/go.podman.io/storage/pkg/chunked/dump/dump.go index facd7a169..1d004023b 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/dump/dump.go +++ b/vendor/go.podman.io/storage/pkg/chunked/dump/dump.go @@ -9,6 +9,7 @@ import ( "io" "path/filepath" "reflect" + "strings" "time" "github.com/opencontainers/go-digest" @@ -42,7 +43,7 @@ func escaped(val []byte, escape int) string { return c > 32 && c < 127 } - var result string + var result strings.Builder for _, c := range val { hexEscape := false var special string @@ -67,14 +68,14 @@ func escaped(val []byte, escape int) string { } if special != "" { - result += special + result.WriteString(special) } else if hexEscape { - result += fmt.Sprintf("\\x%.2x", c) + fmt.Fprintf(&result, "\\x%.2x", c) } else { - result += string(c) + result.WriteString(string(c)) } } - return result + return result.String() } func escapedOptional(val []byte, escape int) string { diff --git a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go index 2e6180d4a..e42359d84 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go @@ -2,6 +2,7 @@ package chunked import ( archivetar "archive/tar" + "cmp" "context" "encoding/base64" "errors" @@ -14,7 +15,6 @@ import ( "path/filepath" "reflect" "slices" - "sort" "strings" "sync" "syscall" @@ -1069,8 +1069,8 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { } lastOffset = i } - sort.Slice(requestGaps, func(i, j int) bool { - return requestGaps[i].cost < requestGaps[j].cost + slices.SortFunc(requestGaps, func(a, b gap) int { + return cmp.Compare(a.cost, b.cost) }) toMergeMap := make([]bool, len(missingParts)) remainingToMerge := numberSourceChunks - target @@ -1613,18 +1613,15 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff }() for range copyGoRoutines { - wg.Add(1) jobs := copyFileJobs - - go func() { - defer wg.Done() + wg.Go(func() { for job := range jobs { found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) job.err = err job.found = found copyResults[job.njob] = job } - }() + }) } filesToWaitFor := 0 diff --git a/vendor/go.podman.io/storage/pkg/config/config.go b/vendor/go.podman.io/storage/pkg/config/config.go index bf350c43c..1b2a5e70c 100644 --- a/vendor/go.podman.io/storage/pkg/config/config.go +++ b/vendor/go.podman.io/storage/pkg/config/config.go @@ -3,6 +3,8 @@ package config import ( "fmt" "os" + + "go.podman.io/storage/pkg/configfile" ) type BtrfsOptionsConfig struct { @@ -31,12 +33,16 @@ type OverlayOptionsConfig struct { // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories ForceMask string `toml:"force_mask,omitempty"` + // Sync controls filesystem sync during layer creation + Sync string `toml:"sync,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + // Sync controls filesystem sync during layer creation + Sync string `toml:"sync,omitempty"` } type ZfsOptionsConfig struct { @@ -53,7 +59,7 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` + AdditionalImageStores configfile.Slice `toml:"additionalimagestores,omitempty"` // ImageStore is the location of image store which is separated from the // container store. Usually this is not recommended unless users wants @@ -65,7 +71,7 @@ type OptionsConfig struct { // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` + AdditionalLayerStores configfile.Slice `toml:"additionallayerstores,omitempty"` // Size Size string `toml:"size,omitempty"` @@ -96,9 +102,6 @@ type OptionsConfig struct { // Btrfs container options to be handed to btrfs drivers Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` - // Thinpool container options to be handed to thinpool drivers (NOP) - Thinpool struct{} `toml:"thinpool,omitempty"` - // Overlay container options to be handed to overlay drivers Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` @@ -126,78 +129,60 @@ type OptionsConfig struct { } // GetGraphDriverOptions returns the driver specific options -func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { +func GetGraphDriverOptions(options OptionsConfig) []string { var doptions []string - switch driverName { - case "btrfs": - if options.Btrfs.MinSpace != "" { - return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) - } - if options.Btrfs.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size)) - } else if options.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) - } - - case "overlay", "overlay2": - // Specify whether composefs must be used to mount the data layers - if options.Overlay.IgnoreChownErrors != "" { - doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) - } else if options.IgnoreChownErrors != "" { - doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) - } - if options.Overlay.MountProgram != "" { - doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram)) - } else if options.MountProgram != "" { - doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram)) - } - if options.Overlay.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt)) - } else if options.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) - } - if options.Overlay.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size)) - } else if options.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) - } - if options.Overlay.Inodes != "" { - doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes)) - } - if options.Overlay.SkipMountHome != "" { - doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) - } else if options.SkipMountHome != "" { - doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) - } - if options.Overlay.ForceMask != "" { - doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask)) - } else if options.ForceMask != 0 { - doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) - } - if options.Overlay.UseComposefs != "" { - doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs)) - } - case "vfs": - if options.Vfs.IgnoreChownErrors != "" { - doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) - } else if options.IgnoreChownErrors != "" { - doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) - } - - case "zfs": - if options.Zfs.Name != "" { - doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name)) - } - if options.Zfs.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt)) - } else if options.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) - } - if options.Zfs.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size)) - } else if options.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) - } + if options.Btrfs.MinSpace != "" { + doptions = append(doptions, fmt.Sprintf("btrfs.min_space=%s", options.Btrfs.MinSpace)) + } + if options.Btrfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("btrfs.size=%s", options.Btrfs.Size)) + } + + // Specify whether composefs must be used to mount the data layers + if options.Overlay.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("overlay.ignore_chown_errors=%s", options.Overlay.IgnoreChownErrors)) + } + if options.Overlay.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("overlay.mount_program=%s", options.Overlay.MountProgram)) + } + if options.Overlay.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("overlay.mountopt=%s", options.Overlay.MountOpt)) + } + if options.Overlay.Size != "" { + doptions = append(doptions, fmt.Sprintf("overlay.size=%s", options.Overlay.Size)) + } + if options.Overlay.Inodes != "" { + doptions = append(doptions, fmt.Sprintf("overlay.inodes=%s", options.Overlay.Inodes)) + } + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("overlay.skip_mount_home=%s", options.Overlay.SkipMountHome)) } + if options.Overlay.ForceMask != "" { + doptions = append(doptions, fmt.Sprintf("overlay.force_mask=%s", options.Overlay.ForceMask)) + } + if options.Overlay.UseComposefs != "" { + doptions = append(doptions, fmt.Sprintf("overlay.use_composefs=%s", options.Overlay.UseComposefs)) + } + if options.Overlay.Sync != "" { + doptions = append(doptions, fmt.Sprintf("overlay.sync=%s", options.Overlay.Sync)) + } + + if options.Vfs.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("vfs.ignore_chown_errors=%s", options.Vfs.IgnoreChownErrors)) + } + if options.Vfs.Sync != "" { + doptions = append(doptions, fmt.Sprintf("vfs.sync=%s", options.Vfs.Sync)) + } + + if options.Zfs.Name != "" { + doptions = append(doptions, fmt.Sprintf("zfs.fsname=%s", options.Zfs.Name)) + } + if options.Zfs.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("zfs.mountopt=%s", options.Zfs.MountOpt)) + } + if options.Zfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("zfs.size=%s", options.Zfs.Size)) + } + return doptions } diff --git a/vendor/go.podman.io/storage/pkg/configfile/doc.go b/vendor/go.podman.io/storage/pkg/configfile/doc.go new file mode 100644 index 000000000..59fbf1536 --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/doc.go @@ -0,0 +1,7 @@ +// Package configfile provides the utilities for our config file parsing. +// +// Note the API here is not considered stable and can and will change and we see fit. +// The purpose is to use this only for our own config file parsing such as +// containers.conf, storage.conf and registries.conf. We will not consider use cases +// for external consumers. +package configfile diff --git a/vendor/go.podman.io/storage/pkg/configfile/parse.go b/vendor/go.podman.io/storage/pkg/configfile/parse.go new file mode 100644 index 000000000..eb397dcd9 --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/parse.go @@ -0,0 +1,492 @@ +package configfile + +import ( + "errors" + "fmt" + "io" + "io/fs" + "iter" + "maps" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + + "github.com/BurntSushi/toml" + "github.com/sirupsen/logrus" +) + +const _configPathName = "containers" + +var ( + // systemConfigPath is the location for the default config files shipped by the distro/vendor. + // + // This can be overridden at build time with the following go linker flag: + // -ldflags '-X go.podman.io/storage/pkg/configfile.systemConfigPath=$your_path' + systemConfigPath = builtinSystemConfigPath + + // adminOverrideConfigPath is the location for admin local override config files. + // + // This can be overridden at build time with the following go linker flag: + // -ldflags '-X go.podman.io/storage/pkg/configfile.adminOverrideConfigPath=$your_path' + adminOverrideConfigPath = getAdminOverrideConfigPath() + + // ErrConfigFileNotFound is returned when ErrorIfNotFound is true and no config + // file could be loaded. + ErrConfigFileNotFound = errors.New("config file not found") +) + +type File struct { + // The name of the config file WITHOUT the extension (i.e. no .conf). + // Must not be empty and must not contain the path separator. + Name string + + // Extension is the file extension of the config file, i.e. "conf" or "yaml". + // Must not be empty and must not contain the path separator. + Extension string + + // EnvironmentName is the name of environment variable that can be set to specify the override. + // If EnvironmentName is set, the variable with _OVERRIDE suffix is also checked for an override + // unless DoNotLoadDropInFiles is set. + // Optional. + EnvironmentName string + + // RootForImplicitAbsolutePaths is the path to an alternate root + // If not "", prefixed to any absolute paths used by default in the package. + // NOTE: This does NOT affect paths starting by $HOME or environment variables paths. + RootForImplicitAbsolutePaths string + + // CustomConfigFilePath is the path to a specific file that will be parsed as main file instead + // of the default location files. Unlike the regular parsing logic if set this file must exists + // or ErrNotExist will be returned. Note when just using this option without also + // CustomConfigFileDropInDirectory it means the regular drop in directories are still searched + // assuming DoNotLoadDropInFiles is not set. + // This has higher priority over the EnvironmentName variable, so if set the env is ignored. + // RootForImplicitAbsolutePaths will not be used for this path. + // Optional. + CustomConfigFilePath string + + // CustomConfigFileDropInDirectory is the path to a specific drop in directory that will be searched + // instead of the default location. Note when just using this option without also + // CustomConfigFilePath it means the regular main file location is still being read assuming + // DoNotLoadMainFiles is not set. + // This has higher priority over the EnvironmentName + "_OVERRIDE" variable, so if set the env is ignored. + // RootForImplicitAbsolutePaths will not be used for this path. + // Optional. + CustomConfigFileDropInDirectory string + + // DoNotLoadMainFiles should be set if only the Drop In files should be loaded. + DoNotLoadMainFiles bool + + // DoNotLoadDropInFiles should be set if only the main files should be loaded. + // If DoNotLoadDropInFiles is set, the _OVERRIDE environment variable is ignored. + DoNotLoadDropInFiles bool + + // DoNotUseExtensionForConfigName makes it so that the extension is only consulted for the drop in + // file names but not the main config file name search path. + DoNotUseExtensionForConfigName bool + + // UserId is the id of the user running this. Used to know where to search in the + // different "rootful" and "rootless" drop in lookup paths. + UserId int + + // Modules is a list of names of full paths which are loaded after all the other files. + // Note the modules concept exists only for containers.conf. + // For compatibility reasons this field is written to with the fully resolved paths + // of each module as this is what podman expects today. + Modules []string + + // ErrorIfNotFound is true if an error should be returned if no file is found. + ErrorIfNotFound bool +} + +// Item is a single config file that is being read once at a time and returned by the iterator from [Read]. +type Item struct { + // Reader is the reader from the file content. The Reader is only valid during + Reader io.Reader + // Name is the full filepath to the filename being read. + Name string +} + +type SearchPaths struct { + // MainFiles are the main config file paths, ordered from highest priority to lower ones. + // For example: $HOME/..., then /etc/..., then /usr/... + // Can be empty if there are no main files for the given config. + MainFiles []string + // DropInDirectories is the list of drop in directories read by this config file, again + // ordered from highest priority to lower ones. + // Can be empty if there are no drop in directories for the given config. + DropInDirectories []string + // ModuleDirectories is the list of module directories checked by this config file, again + // ordered from highest priority to lower ones. + // Will be empty if no modules were request for the given conf. + ModuleDirectories []string + // The file path from conf.EnvironmentName + "_OVERRIDE" env if it must be parsed for the given config. + // Can be empty. + ExtraOverrideFile string +} + +func (f *File) getConfName() string { + if f.DoNotUseExtensionForConfigName { + return f.Name + } + return f.Name + "." + f.Extension +} + +// GetSearchPaths returns the list of files which will be tried to be parsed. +// See the doc of [SearchPaths] for more information. +func GetSearchPaths(conf *File) (SearchPaths, error) { + paths, _, err := getSearchPaths(conf) + return paths, err +} + +func getSearchPaths(conf *File) (SearchPaths, bool, error) { + configFileName := conf.getConfName() + + // Note this can be empty which is a valid case and should be simply ignored then. + defaultConfig := systemConfigPath + if defaultConfig != "" { + defaultConfig = filepath.Join(defaultConfig, configFileName) + if conf.RootForImplicitAbsolutePaths != "" { + defaultConfig = filepath.Join(conf.RootForImplicitAbsolutePaths, defaultConfig) + } + } + + // Same here this can be empty. + overrideConfig := adminOverrideConfigPath + if overrideConfig != "" { + overrideConfig = filepath.Join(overrideConfig, configFileName) + if conf.RootForImplicitAbsolutePaths != "" { + overrideConfig = filepath.Join(conf.RootForImplicitAbsolutePaths, overrideConfig) + } + } + + // userConfig can be empty as well + userConfig, err := UserConfigPath() + if err != nil { + return SearchPaths{}, false, err + } + if userConfig != "" { + userConfig = filepath.Join(userConfig, configFileName) + } + + // main files + ignoreENOENT := true + shouldLoadDropIns := true + var mainFiles []string + if !conf.DoNotLoadMainFiles { + if conf.CustomConfigFilePath != "" { + mainFiles = append(mainFiles, conf.CustomConfigFilePath) + ignoreENOENT = false + // Only consider the env if no custom path was explicitly set. + // As this path often comes from cli options it is important it wins over the env value. + } else if path := os.Getenv(conf.EnvironmentName); path != "" && conf.EnvironmentName != "" { + mainFiles = append(mainFiles, path) + ignoreENOENT = false + // Also when the env is set skip the loading of drop in files, modules and _OVERRIDE env are still read though. + shouldLoadDropIns = false + } else { + // default search paths + if userConfig != "" { + mainFiles = append(mainFiles, userConfig) + } + if overrideConfig != "" { + mainFiles = append(mainFiles, overrideConfig) + } + if defaultConfig != "" { + mainFiles = append(mainFiles, defaultConfig) + } + } + } + + // drop in dirs + var dropInDirs []string + var extraOverrideFilePath string + if !conf.DoNotLoadDropInFiles { + if shouldLoadDropIns { + if conf.CustomConfigFileDropInDirectory != "" { + dropInDirs = append(dropInDirs, conf.CustomConfigFileDropInDirectory) + } else { + // default search paths + dropInDirs = getDropInPaths(defaultConfig, overrideConfig, userConfig, "."+conf.Extension, conf.UserId) + } + } + + if conf.EnvironmentName != "" && conf.CustomConfigFileDropInDirectory == "" { + if path := os.Getenv(conf.EnvironmentName + "_OVERRIDE"); path != "" { + extraOverrideFilePath = path + } + } + } + + // modules + var modDirs []string + if len(conf.Modules) > 0 { + modDirs = moduleDirectories(defaultConfig, overrideConfig, userConfig) + } + + return SearchPaths{ + MainFiles: mainFiles, + DropInDirectories: dropInDirs, + ModuleDirectories: modDirs, + ExtraOverrideFile: extraOverrideFilePath, + }, + ignoreENOENT, + nil +} + +// Read parses all config files with the specified options and returns an iterator which returns all files as Item in the right order. +// If an error is returned by the iterator then this must be treated as fatal error and must fail the config file parsing. +// Expected ENOENT errors are already ignored in this function and must not be handled again by callers. +// The given File options must not be nil and populated with valid options. +func Read(conf *File) iter.Seq2[*Item, error] { + return func(yield func(*Item, error) bool) { + paths, ignoreMainENOENT, err := getSearchPaths(conf) + if err != nil { + yield(nil, err) + return + } + + usedPaths := make([]string, 0, 8) + foundAny := false + + yieldAndClose := func(f *os.File) bool { + foundAny = true + ok := yield(&Item{ + Reader: f, + Name: f.Name(), + }, nil) + // Once yield returns always close the file as the consumer should be done with it. + if err := f.Close(); err != nil { + if ok { + // don't yield again if the previous yield returned false + yield(nil, err) + } + return false + } + return ok + } + + for _, path := range paths.MainFiles { + if path == "" { + continue + } + usedPaths = append(usedPaths, path) + f, err := os.Open(path) + if err != nil { + // only ignore ErrNotExist when needed, all other errors get return to the caller via yield + if ignoreMainENOENT && errors.Is(err, fs.ErrNotExist) { + continue + } + yield(nil, err) + return + } + + if !yieldAndClose(f) { + return + } + // we only read the first found file + break + } + + if len(paths.DropInDirectories) > 0 { + suffix := "." + conf.Extension + files, err := readDropInsFromPaths(paths.DropInDirectories, suffix) + if err != nil { + // return error via iterator + yield(nil, err) + return + } + for _, file := range files { + usedPaths = append(usedPaths, file) + f, err := os.Open(file) + // always ignore ErrNotExist, all other errors get return to the caller via yield + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } + yield(nil, err) + return + } + + if !yieldAndClose(f) { + return + } + } + } + + if len(conf.Modules) > 0 { + resolvedModules := make([]string, 0, len(conf.Modules)) + for _, module := range conf.Modules { + f, err := resolveModule(module, paths.ModuleDirectories, &usedPaths) + if err != nil { + yield(nil, fmt.Errorf("could not resolve module: %w", err)) + return + } + resolvedModules = append(resolvedModules, f.Name()) + if !yieldAndClose(f) { + return + } + } + conf.Modules = resolvedModules + } + + if paths.ExtraOverrideFile != "" { + // The _OVERRIDE env must be appended after loading all files, even modules. + usedPaths = append(usedPaths, paths.ExtraOverrideFile) + f, err := os.Open(paths.ExtraOverrideFile) + // Do not ignore ErrNotExist here, we want to hard error if users set a wrong path here. + if err != nil { + yield(nil, err) + return + } + if !yieldAndClose(f) { + return + } + } + + if conf.ErrorIfNotFound && !foundAny { + yield(nil, fmt.Errorf("%w: no %s file found; searched paths: %q", ErrConfigFileNotFound, conf.getConfName(), usedPaths)) + return + } + } +} + +const dropInSuffix = ".d" + +func getDropInPaths(defaultConfig, overrideConfig, userConfig, suffix string, uid int) []string { + paths := make([]string, 0, 7) + + if userConfig != "" { + // the $HOME config only has one .d path not the rootful/rootless ones. + paths = append(paths, userConfig+dropInSuffix) + } + if overrideConfig != "" { + paths = append(paths, getDropInPathsUnderMain(overrideConfig, suffix, uid)...) + } + if defaultConfig != "" { + paths = append(paths, getDropInPathsUnderMain(defaultConfig, suffix, uid)...) + } + + return paths +} + +func readDropInsFromPaths(paths []string, suffix string) ([]string, error) { + dropInMap := make(map[string]string) + + for _, path := range slices.Backward(paths) { + entries, err := os.ReadDir(path) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } + return nil, err + } + for _, entry := range entries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), suffix) { + dropInMap[entry.Name()] = filepath.Join(path, entry.Name()) + } + } + } + + sortedNames := slices.Sorted(maps.Keys(dropInMap)) + files := make([]string, 0, len(sortedNames)) + for _, file := range sortedNames { + files = append(files, dropInMap[file]) + } + return files, nil +} + +func getDropInPathsUnderMain(mainPath, suffix string, uid int) []string { + paths := make([]string, 0, 3) + paths = append(paths, mainPath+dropInSuffix) + + rootless := uid > 0 + var specialName string + if rootless { + specialName = "rootless" + } else { + specialName = "rootful" + } + // insert the name after the main config name but before the extension if it has one. + mainPath, cut := strings.CutSuffix(mainPath, suffix) + specialPath := mainPath + "." + specialName + if cut { + specialPath += suffix + } + specialPath += dropInSuffix + paths = append(paths, specialPath) + if rootless { + paths = append(paths, filepath.Join(specialPath, strconv.Itoa(uid))) + } + return paths +} + +func moduleDirectories(defaultConfig, overrideConfig, userConfig string) []string { + const moduleSuffix = ".modules" + modules := make([]string, 0, 3) + if userConfig != "" { + modules = append(modules, userConfig+moduleSuffix) + } + if overrideConfig != "" { + modules = append(modules, overrideConfig+moduleSuffix) + } + if defaultConfig != "" { + modules = append(modules, defaultConfig+moduleSuffix) + } + return modules +} + +// Resolve the specified path to a module. +func resolveModule(path string, dirs []string, usedPaths *[]string) (*os.File, error) { + if filepath.IsAbs(path) { + if usedPaths != nil { + *usedPaths = append(*usedPaths, path) + } + return os.Open(path) + } + + // Collect all errors to avoid suppressing important errors (e.g., + // permission errors). + var multiErr error + for _, d := range dirs { + candidate := filepath.Join(d, path) + if usedPaths != nil { + *usedPaths = append(*usedPaths, candidate) + } + + f, err := os.Open(candidate) + if err == nil { + return f, nil + } + multiErr = errors.Join(multiErr, err) + } + return nil, multiErr +} + +// ParseTOML parses the given config according to the rules in by [Read]. +// Note the given configStruct must be a pointer to a struct that describes +// the toml config fields and is modified in place. +// If an error is returned the struct should not be used. +func ParseTOML(configStruct any, conf *File) error { + for item, err := range Read(conf) { + if err != nil { + return err + } + meta, err := toml.NewDecoder(item.Reader).Decode(configStruct) + if err != nil { + return fmt.Errorf("decode configuration %q: %w", item.Name, err) + } + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Debugf("Failed to decode the keys %q from %q", keys, item.Name) + } + + logrus.Debugf("Read config file %q", item.Name) + // This prints large potentially large structs so keep it to trace level only. + // It can however be useful to figure out which setting come from which file. + logrus.Tracef("Merged new config: %+v", configStruct) + } + return nil +} diff --git a/vendor/go.podman.io/storage/pkg/configfile/path.go b/vendor/go.podman.io/storage/pkg/configfile/path.go new file mode 100644 index 000000000..d9443dd08 --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/path.go @@ -0,0 +1,15 @@ +//go:build !windows && !freebsd + +package configfile + +const ( + // builtinSystemConfigPath is the location for the default config files shipped by the distro/vendor. + builtinSystemConfigPath = "/usr/share/containers" + + // builtinAdminOverrideConfigPath is the location for admin local override config files. + builtinAdminOverrideConfigPath = "/etc/containers" +) + +func getAdminOverrideConfigPath() string { + return builtinAdminOverrideConfigPath +} diff --git a/vendor/go.podman.io/storage/pkg/configfile/path_freebsd.go b/vendor/go.podman.io/storage/pkg/configfile/path_freebsd.go new file mode 100644 index 000000000..c7fe48b0f --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/path_freebsd.go @@ -0,0 +1,13 @@ +package configfile + +const ( + // builtinSystemConfigPath is the location for the default config files shipped by the distro/vendor. + builtinSystemConfigPath = "/usr/local/share/" + _configPathName + + // builtinAdminOverrideConfigPath is the location for admin local override config files. + builtinAdminOverrideConfigPath = "/usr/local/etc/" + _configPathName +) + +func getAdminOverrideConfigPath() string { + return builtinAdminOverrideConfigPath +} diff --git a/vendor/go.podman.io/storage/pkg/configfile/path_unix.go b/vendor/go.podman.io/storage/pkg/configfile/path_unix.go new file mode 100644 index 000000000..d83b7491a --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/path_unix.go @@ -0,0 +1,25 @@ +//go:build !windows + +package configfile + +import ( + "os" + "path/filepath" + + "go.podman.io/storage/pkg/unshare" +) + +// UserConfigPath returns the path to the users local config that is +// not shared with other users. It uses $XDG_CONFIG_HOME/containers... +// if set or $HOME/.config/containers... if not. +func UserConfigPath() (string, error) { + if configHome, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { + return filepath.Join(configHome, _configPathName), nil + } + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, ".config", _configPathName), nil +} diff --git a/vendor/go.podman.io/storage/pkg/configfile/path_windows.go b/vendor/go.podman.io/storage/pkg/configfile/path_windows.go new file mode 100644 index 000000000..79f34343b --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/path_windows.go @@ -0,0 +1,28 @@ +package configfile + +import ( + "os" + "path/filepath" +) + +const ( + // builtinSystemConfigPath is the location for the default config files shipped by the distro/vendor. + // On windows there is no /usr equivalent so leave it empty. + builtinSystemConfigPath = "" +) + +func getAdminOverrideConfigPath() string { + if env, ok := os.LookupEnv("ProgramData"); ok { + return filepath.Join(env, _configPathName) + } + return "" +} + +// UserConfigPath returns the path to the users local config that is +// not shared with other users. It uses $APPDATA/containers... +func UserConfigPath() (string, error) { + if env, ok := os.LookupEnv("APPDATA"); ok { + return filepath.Join(env, _configPathName), nil + } + return "", nil +} diff --git a/vendor/go.podman.io/storage/pkg/configfile/slice.go b/vendor/go.podman.io/storage/pkg/configfile/slice.go new file mode 100644 index 000000000..62ea8fe75 --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/configfile/slice.go @@ -0,0 +1,101 @@ +package configfile + +import ( + "bytes" + "fmt" + + "github.com/BurntSushi/toml" +) + +// Slice allows for extending a TOML string array with custom +// attributes that control how the array is marshaled into a Go string. +// +// Specifically, an Slice can be configured to avoid it being +// overridden by a subsequent unmarshal sequence. When the `append` attribute +// is specified, the array will be appended instead (e.g., `array=["9", +// {append=true}]`). +type Slice struct { // A "mixed-type array" in TOML. + // Note that the fields below _must_ be exported. Otherwise the TOML + // encoder would fail during type reflection. + Values []string + Attributes struct { // Using a struct allows for adding more attributes in the future. + Append *bool // Nil if not set by the user + } +} + +// NewSlice creates a new slice with the specified values. +func NewSlice(values []string) Slice { + return Slice{Values: values} +} + +// Get returns the Slice values or an empty string slice. +func (a *Slice) Get() []string { + if a.Values == nil { + return []string{} + } + return a.Values +} + +// Set overrides the values of the Slice. +func (a *Slice) Set(values []string) { + a.Values = values +} + +// UnmarshalTOML is the custom unmarshal method for Slice. +func (a *Slice) UnmarshalTOML(data any) error { + iFaceSlice, ok := data.([]any) + if !ok { + return fmt.Errorf("unable to cast to interface array: %v", data) + } + + var loadedStrings []string + for _, x := range iFaceSlice { // Iterate over each item in the slice. + switch val := x.(type) { + case string: // Strings are directly appended to the slice. + loadedStrings = append(loadedStrings, val) + case map[string]any: // The attribute struct is represented as a map. + for k, v := range val { // Iterate over all _supported_ keys. + switch k { + case "append": + boolVal, ok := v.(bool) + if !ok { + return fmt.Errorf("unable to cast append to bool: %v", k) + } + a.Attributes.Append = &boolVal + default: // Unsupported map key. + return fmt.Errorf("unsupported key %q in map: %v", k, val) + } + } + default: // Unsupported item. + return fmt.Errorf("unsupported item in attributed string slice: %v", x) + } + } + + if a.Attributes.Append != nil && *a.Attributes.Append { // If _explicitly_ configured, append the loaded slice. + a.Values = append(a.Values, loadedStrings...) + } else { // Default: override the existing Slice. + a.Values = loadedStrings + } + return nil +} + +// MarshalTOML is the custom marshal method for Slice. +func (a *Slice) MarshalTOML() ([]byte, error) { + iFaceSlice := make([]any, 0, len(a.Values)) + + for _, x := range a.Values { + iFaceSlice = append(iFaceSlice, x) + } + + if a.Attributes.Append != nil { + attributes := map[string]any{"append": *a.Attributes.Append} + iFaceSlice = append(iFaceSlice, attributes) + } + + buf := new(bytes.Buffer) + enc := toml.NewEncoder(buf) + if err := enc.Encode(iFaceSlice); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools.go b/vendor/go.podman.io/storage/pkg/idtools/idtools.go index 6fcba9b33..81b7f66bf 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools.go +++ b/vendor/go.podman.io/storage/pkg/idtools/idtools.go @@ -2,13 +2,14 @@ package idtools import ( "bufio" + "cmp" "errors" "fmt" "io/fs" "os" "os/user" "runtime" - "sort" + "slices" "strconv" "strings" "sync" @@ -32,11 +33,9 @@ type subIDRange struct { Length int } -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } +func compareRanges(a, b subIDRange) int { + return cmp.Compare(a.Start, b.Start) +} const ( subuidFileName string = "/etc/subuid" @@ -47,16 +46,18 @@ const ( // MkdirAllAs creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. +// // Deprecated: Use MkdirAllAndChown func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) + return MkdirAllAndChown(path, mode, IDPair{UID: ownerUID, GID: ownerGID}) } // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership +// // Deprecated: Use MkdirAndChown with a IDPair func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) + return MkdirAndChown(path, mode, IDPair{UID: ownerUID, GID: ownerGID}) } // MkdirAllAndChown creates a directory (include any along the path) and then modifies @@ -296,11 +297,11 @@ func (i *IDMappings) GIDs() []IDMap { return i.gids } -func createIDMap(subidRanges ranges) []IDMap { +func createIDMap(subidRanges []subIDRange) []IDMap { idMap := []IDMap{} // sort the ranges by lowest ID first - sort.Sort(subidRanges) + slices.SortFunc(subidRanges, compareRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ @@ -316,9 +317,9 @@ func createIDMap(subidRanges ranges) []IDMap { // parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) // and return all found ranges for a specified username. If the special value // "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { +func parseSubidFile(path, username string) ([]subIDRange, error) { var ( - rangeList ranges + rangeList []subIDRange uidstr string ) if u, err := user.Lookup(username); err == nil { @@ -582,18 +583,6 @@ func SafeLchown(name string, uid, gid int) error { return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) } -type sortByHostID []IDMap - -func (e sortByHostID) Len() int { return len(e) } -func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID } - -type sortByContainerID []IDMap - -func (e sortByContainerID) Len() int { return len(e) } -func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID } - // IsContiguous checks if the specified mapping is contiguous and doesn't // have any hole. func IsContiguous(mappings []IDMap) bool { @@ -601,18 +590,20 @@ func IsContiguous(mappings []IDMap) bool { return true } - var mh sortByHostID = mappings[:] - sort.Sort(mh) - for i := 1; i < len(mh); i++ { - if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size { + slices.SortFunc(mappings, func(a, b IDMap) int { + return cmp.Compare(a.HostID, b.HostID) + }) + for i := 1; i < len(mappings); i++ { + if mappings[i].HostID != mappings[i-1].HostID+mappings[i-1].Size { return false } } - var mc sortByContainerID = mappings[:] - sort.Sort(mc) - for i := 1; i < len(mc); i++ { - if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size { + slices.SortFunc(mappings, func(a, b IDMap) int { + return cmp.Compare(a.ContainerID, b.ContainerID) + }) + for i := 1; i < len(mappings); i++ { + if mappings[i].ContainerID != mappings[i-1].ContainerID+mappings[i-1].Size { return false } } diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go index 8a3076a0f..c1ef5ac33 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go +++ b/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go @@ -37,8 +37,8 @@ import "C" var onceInit sync.Once -func readSubid(username string, isUser bool) (ranges, error) { - var ret ranges +func readSubid(username string, isUser bool) ([]subIDRange, error) { + var ret []subIDRange uidstr := "" if username == "ALL" { @@ -88,10 +88,10 @@ func readSubid(username string, isUser bool) (ranges, error) { return ret, nil } -func readSubuid(username string) (ranges, error) { +func readSubuid(username string) ([]subIDRange, error) { return readSubid(username, true) } -func readSubgid(username string) (ranges, error) { +func readSubgid(username string) ([]subIDRange, error) { return readSubid(username, false) } diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go index e6f5c1ba6..8bdd9aa57 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go +++ b/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go @@ -2,10 +2,10 @@ package idtools -func readSubuid(username string) (ranges, error) { +func readSubuid(username string) ([]subIDRange, error) { return parseSubidFile(subuidFileName, username) } -func readSubgid(username string) (ranges, error) { +func readSubgid(username string) ([]subIDRange, error) { return parseSubidFile(subgidFileName, username) } diff --git a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go index ee80ce6a7..9b91f9862 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go @@ -3,7 +3,6 @@ package idtools import ( "fmt" "slices" - "sort" "strconv" "strings" "sync" @@ -124,7 +123,7 @@ func findNextUIDRange() (int, error) { if err != nil { return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) } - sort.Sort(ranges) + slices.SortFunc(ranges, compareRanges) return findNextRangeStart(ranges) } @@ -133,11 +132,11 @@ func findNextGIDRange() (int, error) { if err != nil { return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) } - sort.Sort(ranges) + slices.SortFunc(ranges, compareRanges) return findNextRangeStart(ranges) } -func findNextRangeStart(rangeList ranges) (int, error) { +func findNextRangeStart(rangeList []subIDRange) (int, error) { startID := defaultRangeStart for _, arange := range rangeList { if wouldOverlap(arange, startID) { diff --git a/vendor/go.podman.io/storage/pkg/mount/mount.go b/vendor/go.podman.io/storage/pkg/mount/mount.go index 23c5c44ac..8f20a0cab 100644 --- a/vendor/go.podman.io/storage/pkg/mount/mount.go +++ b/vendor/go.podman.io/storage/pkg/mount/mount.go @@ -1,9 +1,12 @@ package mount import ( - "sort" + "cmp" + "slices" "strconv" "strings" + + "github.com/sirupsen/logrus" ) // mountError holds an error from a mount or unmount operation @@ -84,21 +87,23 @@ func RecursiveUnmount(target string) error { } // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + slices.SortFunc(mounts, func(a, b *Info) int { + return -cmp.Compare(len(a.Mountpoint), len(b.Mountpoint)) }) - for i, m := range mounts { + var lastErr error + for _, m := range mounts { if !strings.HasPrefix(m.Mountpoint, target) { continue } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - return err + if err := Unmount(m.Mountpoint); err != nil { // Ignore errors for submounts and continue trying to unmount others // The final unmount should fail if there are any submounts remaining + logrus.Warnf("Failed to unmount %s: %v", m.Mountpoint, err) + lastErr = err } } - return nil + return lastErr } // ForceUnmount lazily unmounts a filesystem on supported platforms, diff --git a/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go b/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go deleted file mode 100644 index 2d9e75ea1..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -package mount - -import ( - "fmt" - "os" - - "github.com/moby/sys/mountinfo" -) - -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return mountinfo.GetMountsFromReader(f, nil) -} diff --git a/vendor/go.podman.io/storage/pkg/system/rm.go b/vendor/go.podman.io/storage/pkg/system/rm.go index c151c1449..8ab7ed263 100644 --- a/vendor/go.podman.io/storage/pkg/system/rm.go +++ b/vendor/go.podman.io/storage/pkg/system/rm.go @@ -36,9 +36,10 @@ func EnsureRemoveAll(dir string) error { return nil } - // Attempt to unmount anything beneath this dir first + // Best-effort: if unmounting fails, the RemoveAll loop below may + // still succeed (or will surface its own, more specific error). if err := mount.RecursiveUnmount(dir); err != nil { - logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err) + logrus.Warnf("RecursiveUnmount on %s failed: %v", dir, err) } for { diff --git a/vendor/go.podman.io/storage/pkg/system/syncfs_linux.go b/vendor/go.podman.io/storage/pkg/system/syncfs_linux.go new file mode 100644 index 000000000..93dd3f9e6 --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/system/syncfs_linux.go @@ -0,0 +1,23 @@ +//go:build linux + +package system + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +// Syncfs synchronizes the filesystem containing the given path. +func Syncfs(path string) error { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return fmt.Errorf("open for syncfs: %w", err) + } + defer unix.Close(fd) + + if err := unix.Syncfs(fd); err != nil { + return fmt.Errorf("syncfs: %w", err) + } + return nil +} diff --git a/vendor/go.podman.io/storage/pkg/system/syncfs_unix.go b/vendor/go.podman.io/storage/pkg/system/syncfs_unix.go new file mode 100644 index 000000000..d4e5a778a --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/system/syncfs_unix.go @@ -0,0 +1,12 @@ +//go:build unix && !linux + +package system + +import "golang.org/x/sys/unix" + +// Syncfs synchronizes the filesystem containing the given path. +// On non-Linux Unix platforms, this falls back to sync(2) which +// syncs all filesystems. +func Syncfs(path string) error { + return unix.Sync() +} diff --git a/vendor/go.podman.io/storage/pkg/system/syncfs_windows.go b/vendor/go.podman.io/storage/pkg/system/syncfs_windows.go new file mode 100644 index 000000000..1b208ba2d --- /dev/null +++ b/vendor/go.podman.io/storage/pkg/system/syncfs_windows.go @@ -0,0 +1,10 @@ +//go:build windows + +package system + +import "errors" + +// Syncfs is not supported on Windows. +func Syncfs(path string) error { + return errors.New("syncfs is not supported on Windows") +} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare.go b/vendor/go.podman.io/storage/pkg/unshare/unshare.go index 00f397f35..58eba5487 100644 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare.go +++ b/vendor/go.podman.io/storage/pkg/unshare/unshare.go @@ -7,26 +7,18 @@ import ( "sync" ) -var ( - homeDirOnce sync.Once - homeDirErr error - homeDir string -) +var lookupHomeDir = sync.OnceValues(func() (string, error) { + usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) + if err != nil { + return "", fmt.Errorf("unable to resolve HOME directory: %w", err) + } + return usr.HomeDir, nil +}) // HomeDir returns the home directory for the current user. func HomeDir() (string, error) { - homeDirOnce.Do(func() { - home := os.Getenv("HOME") - if home == "" { - usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) - if err != nil { - homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) - return - } - homeDir, homeDirErr = usr.HomeDir, nil - return - } - homeDir, homeDirErr = home, nil - }) - return homeDir, homeDirErr + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + return lookupHomeDir() } diff --git a/vendor/go.podman.io/storage/storage.conf b/vendor/go.podman.io/storage/storage.conf index 2fff0cecf..e89de49ad 100644 --- a/vendor/go.podman.io/storage/storage.conf +++ b/vendor/go.podman.io/storage/storage.conf @@ -4,20 +4,15 @@ # container/storage library do not inherit fields from other storage.conf # files. # -# Note: The storage.conf file overrides other storage.conf files based on this precedence: -# /usr/containers/storage.conf -# /etc/containers/storage.conf -# $HOME/.config/containers/storage.conf -# $XDG_CONFIG_HOME/containers/storage.conf (if XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "storage" table contains all of the server options. [storage] -# Default storage driver, must be set for proper operation. -driver = "overlay" +# Default storage driver. Optional. +# driver = "overlay" # Temporary storage location -runroot = "/run/containers/storage" +# runroot = "/run/containers/storage" # Priority list for the storage drivers that will be tested one # after the other to pick the storage driver if it is not defined. @@ -29,7 +24,7 @@ runroot = "/run/containers/storage" # following commands: # semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH # restorecon -R -v /NEWSTORAGEPATH -graphroot = "/var/lib/containers/storage" +# graphroot = "/var/lib/containers/storage" # Optional alternate location of image store if a location separate from the # container store is required. If set, it must be different than graphroot. @@ -51,8 +46,7 @@ graphroot = "/var/lib/containers/storage" # AdditionalImageStores is used to pass paths to additional Read/Only image stores # Must be comma separated list. -additionalimagestores = [ -] +# additionalimagestores = [] # Options controlling how storage is populated when pulling images. [storage.options.pull_options] @@ -176,3 +170,12 @@ mountopt = "nodev" # "force_mask" permissions. # # force_mask = "" + +# Sync filesystem before marking layer as present. Filesystem must support syncfs. +# Values: "none", "filesystem" +# sync = "none" + +[storage.options.vfs] +# Sync filesystem before marking layer as present. Filesystem must support syncfs. +# Values: "none", "filesystem" +# sync = "none" diff --git a/vendor/go.podman.io/storage/storage.conf-freebsd b/vendor/go.podman.io/storage/storage.conf-freebsd index 4181aa655..f33e17a8c 100644 --- a/vendor/go.podman.io/storage/storage.conf-freebsd +++ b/vendor/go.podman.io/storage/storage.conf-freebsd @@ -4,23 +4,18 @@ # container/storage library do not inherit fields from other storage.conf # files. # -# Note: The storage.conf file overrides other storage.conf files based on this precedence: -# /usr/local/share/containers/storage.conf -# /usr/local/etc/containers/storage.conf -# $HOME/.config/containers/storage.conf -# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] -# Default Storage Driver, Must be set for proper operation. -driver = "zfs" +# Default storage driver. Optional. +# driver = "zfs" # Temporary storage location -runroot = "/var/run/containers/storage" +# runroot = "/var/run/containers/storage" # Primary Read/Write location of container storage -graphroot = "/var/db/containers/storage" +# graphroot = "/var/db/containers/storage" # Optional value for image storage location # If set, it must be different than graphroot. @@ -36,8 +31,7 @@ graphroot = "/var/db/containers/storage" # AdditionalImageStores is used to pass paths to additional Read/Only image stores # Must be comma separated list. -additionalimagestores = [ -] +# additionalimagestores = [] # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned diff --git a/vendor/go.podman.io/storage/store.go b/vendor/go.podman.io/storage/store.go index 3d8ea5075..36ffbafe4 100644 --- a/vendor/go.podman.io/storage/store.go +++ b/vendor/go.podman.io/storage/store.go @@ -25,13 +25,13 @@ import ( "github.com/sirupsen/logrus" drivers "go.podman.io/storage/drivers" "go.podman.io/storage/internal/dedup" + "go.podman.io/storage/internal/driver" "go.podman.io/storage/internal/tempdir" "go.podman.io/storage/pkg/archive" "go.podman.io/storage/pkg/directory" "go.podman.io/storage/pkg/idtools" "go.podman.io/storage/pkg/ioutils" "go.podman.io/storage/pkg/lockfile" - "go.podman.io/storage/pkg/parsers" "go.podman.io/storage/pkg/stringutils" "go.podman.io/storage/pkg/system" "go.podman.io/storage/types" @@ -807,7 +807,7 @@ type store struct { // return // } func GetStore(options types.StoreOptions) (Store, error) { - defaultOpts, err := types.Options() + defaultOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err } @@ -861,14 +861,6 @@ func GetStore(options types.StoreOptions) (Store, error) { return nil, err } } - if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil { - return nil, err - } - if options.ImageStore != "" { - if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil { - return nil, err - } - } graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) if err != nil { @@ -977,6 +969,16 @@ func (s *store) load() error { }(); err != nil { return err } + + if err := os.MkdirAll(filepath.Join(s.graphRoot, s.graphDriverName), 0o700); err != nil { + return err + } + if s.imageStoreDir != "" { + if err := os.MkdirAll(filepath.Join(s.imageStoreDir, s.graphDriverName), 0o700); err != nil { + return err + } + } + driverPrefix := s.graphDriverName + "-" imgStoreRoot := s.imageStoreDir @@ -1271,6 +1273,26 @@ func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, e return zeroRes, false, nil } +// readPrimaryLayerStore is a helper for working with store.getLayerStore(): +// It locks the store for reading, checks for updates, and calls fn() +// It returns the return value of fn, or its own error initializing the store. +// +// Most callers should call readAllLayerStores instead. +func readPrimaryLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) { + var zeroRes T // A zero value of T + + store, err := s.getLayerStore() + if err != nil { + return zeroRes, err + } + + if err := store.startReading(); err != nil { + return zeroRes, err + } + defer store.stopReading() + return fn(store) +} + // writeToLayerStore is a helper for working with store.getLayerStore(): // It locks the store for writing, checks for updates, and calls fn() // It returns the return value of fn, or its own error initializing the store. @@ -2646,7 +2668,7 @@ func (s *store) DeleteLayer(id string) (retErr error) { }() return s.writeToAllStores(func(rlstore rwLayerStore) error { if rlstore.Exists(id) { - if l, err := rlstore.Get(id); err != nil { + if l, err := rlstore.Get(id); err == nil { id = l.ID } layers, err := rlstore.Layers() @@ -3083,16 +3105,9 @@ func (s *store) Mounted(id string) (int, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - rlstore, err := s.getLayerStore() - if err != nil { - return 0, err - } - if err := rlstore.startReading(); err != nil { - return 0, err - } - defer rlstore.stopReading() - - return rlstore.Mounted(id) + return readPrimaryLayerStore(s, func(store rwLayerStore) (int, error) { + return store.Mounted(id) + }) } func (s *store) UnmountImage(id string, force bool) (bool, error) { @@ -3387,41 +3402,48 @@ func (s *store) LayerSize(id string) (int64, error) { } func (s *store) LayerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.getLayerStore() - if err != nil { - return nil, nil, err - } - if err := rlstore.startReading(); err != nil { + var parentUIDs, parentGIDs []int + if _, err := readPrimaryLayerStore(s, func(store rwLayerStore) (struct{}, error) { + if store.Exists(id) { + u, g, err := store.ParentOwners(id) + if err != nil { + return struct{}{}, err + } + parentUIDs = u + parentGIDs = g + return struct{}{}, nil + } + return struct{}{}, ErrLayerUnknown + }); err != nil { return nil, nil, err } - defer rlstore.stopReading() - if rlstore.Exists(id) { - return rlstore.ParentOwners(id) - } - return nil, nil, ErrLayerUnknown + return parentUIDs, parentGIDs, nil } func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.getLayerStore() - if err != nil { - return nil, nil, err - } - if err := rlstore.startReading(); err != nil { - return nil, nil, err - } - defer rlstore.stopReading() - if err := s.containerStore.startReading(); err != nil { - return nil, nil, err - } - defer s.containerStore.stopReading() - container, err := s.containerStore.Get(id) - if err != nil { + var parentUIDs, parentGIDs []int + if _, err := readPrimaryLayerStore(s, func(store rwLayerStore) (struct{}, error) { + _, _, err := readContainerStore(s, func() (struct{}, bool, error) { + container, err := s.containerStore.Get(id) + if err != nil { + return struct{}{}, true, err + } + if store.Exists(container.LayerID) { + u, g, err := store.ParentOwners(container.LayerID) + if err != nil { + return struct{}{}, true, err + } + parentUIDs = u + parentGIDs = g + return struct{}{}, true, nil + } + return struct{}{}, true, ErrLayerUnknown + }) + return struct{}{}, err + }); err != nil { return nil, nil, err } - if rlstore.Exists(container.LayerID) { - return rlstore.ParentOwners(container.LayerID) - } - return nil, nil, ErrLayerUnknown + return parentUIDs, parentGIDs, nil } func (s *store) Layers() ([]Layer, error) { @@ -3499,6 +3521,12 @@ func (s *store) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) } return nil, err } + succeeded := false + defer func() { + if !succeeded { + al.Release() + } + }() info, err := al.Info() if err != nil { return nil, err @@ -3508,6 +3536,7 @@ func (s *store) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) if err := json.NewDecoder(info).Decode(&layer); err != nil { return nil, err } + succeeded = true return &additionalLayer{&layer, al, s}, nil } @@ -3923,27 +3952,9 @@ const AutoUserNsMaxSize = 65536 // creating a user namespace. const RootAutoUserNsUser = "containers" -// SetDefaultConfigFilePath sets the default configuration to the specified path, and loads the file. -// Deprecated: Use types.SetDefaultConfigFilePath, which can return an error. -func SetDefaultConfigFilePath(path string) { - _ = types.SetDefaultConfigFilePath(path) -} - -// DefaultConfigFile returns the path to the storage config file used -func DefaultConfigFile() (string, error) { - return types.DefaultConfigFile() -} - -// ReloadConfigurationFile parses the specified configuration file and overrides -// the configuration in storeOptions. -// Deprecated: Use types.ReloadConfigurationFile, which can return an error. -func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { - _ = types.ReloadConfigurationFile(configFile, storeOptions) -} - // GetDefaultMountOptions returns the default mountoptions defined in container/storage func GetDefaultMountOptions() ([]string, error) { - defaultStoreOptions, err := types.Options() + defaultStoreOptions, err := types.DefaultStoreOptions() if err != nil { return nil, err } @@ -3951,18 +3962,13 @@ func GetDefaultMountOptions() ([]string, error) { } // GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions -func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) { - mountOpts := []string{ - ".mountopt", - fmt.Sprintf("%s.mountopt", driver), - } +func GetMountOptions(usedDriver string, graphDriverOptions []string) ([]string, error) { for _, option := range graphDriverOptions { - key, val, err := parsers.ParseKeyValueOpt(option) + optDriver, key, val, err := driver.ParseDriverOption(option) if err != nil { return nil, err } - key = strings.ToLower(key) - if slices.Contains(mountOpts, key) { + if (optDriver == "" || optDriver == usedDriver) && key == "mountopt" { return strings.Split(val, ","), nil } } diff --git a/vendor/go.podman.io/storage/types/options.go b/vendor/go.podman.io/storage/types/options.go index 8af7c9c40..bb6046136 100644 --- a/vendor/go.podman.io/storage/types/options.go +++ b/vendor/go.podman.io/storage/types/options.go @@ -1,19 +1,16 @@ package types import ( - "errors" "fmt" "os" "path/filepath" - "slices" + "strconv" "strings" "sync" - "time" - "github.com/BurntSushi/toml" "github.com/sirupsen/logrus" cfg "go.podman.io/storage/pkg/config" - "go.podman.io/storage/pkg/fileutils" + "go.podman.io/storage/pkg/configfile" "go.podman.io/storage/pkg/homedir" "go.podman.io/storage/pkg/idtools" "go.podman.io/storage/pkg/unshare" @@ -23,7 +20,7 @@ import ( type TomlConfig struct { Storage struct { Driver string `toml:"driver,omitempty"` - DriverPriority []string `toml:"driver_priority,omitempty"` + DriverPriority configfile.Slice `toml:"driver_priority,omitempty"` RunRoot string `toml:"runroot,omitempty"` ImageStore string `toml:"imagestore,omitempty"` GraphRoot string `toml:"graphroot,omitempty"` @@ -39,89 +36,6 @@ const ( storageConfEnv = "CONTAINERS_STORAGE_CONF" ) -var ( - defaultStoreOptionsOnce sync.Once - loadDefaultStoreOptionsErr error - once sync.Once - storeOptions StoreOptions - storeError error - defaultConfigFileSet bool - // defaultConfigFile path to the system wide storage.conf file - defaultConfigFile = SystemConfigFile - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions -) - -func loadDefaultStoreOptions() { - defaultStoreOptions.GraphDriverName = "" - - setDefaults := func() { - // reload could set values to empty for run and graph root if config does not contains anything - if defaultStoreOptions.RunRoot == "" { - defaultStoreOptions.RunRoot = defaultRunRoot - } - if defaultStoreOptions.GraphRoot == "" { - defaultStoreOptions.GraphRoot = defaultGraphRoot - } - } - setDefaults() - - if path, ok := os.LookupEnv(storageConfEnv); ok { - defaultOverrideConfigFile = path - if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil { - loadDefaultStoreOptionsErr = err - return - } - setDefaults() - return - } - - if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { - homeConfigFile := filepath.Join(path, "containers", "storage.conf") - if err := fileutils.Exists(homeConfigFile); err == nil { - // user storage.conf in XDG_CONFIG_HOME if it exists - defaultOverrideConfigFile = homeConfigFile - } else { - if !os.IsNotExist(err) { - loadDefaultStoreOptionsErr = err - return - } - } - } - - err := fileutils.Exists(defaultOverrideConfigFile) - if err == nil { - // The DefaultConfigFile() function returns the path - // of the used storage.conf file, by returning defaultConfigFile - // If override exists containers/storage uses it by default. - defaultConfigFile = defaultOverrideConfigFile - if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil { - loadDefaultStoreOptionsErr = err - return - } - setDefaults() - return - } - - if !os.IsNotExist(err) { - logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) - } - if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { - loadDefaultStoreOptionsErr = err - return - } - setDefaults() -} - -// loadStoreOptions returns the default storage ops for containers -func loadStoreOptions() (StoreOptions, error) { - storageConf, err := DefaultConfigFile() - if err != nil { - return defaultStoreOptions, err - } - return loadStoreOptionsFromConfFile(storageConf) -} - // usePerUserStorage returns whether the user private storage must be used. // We cannot simply use the unshare.IsRootless() condition, because // that checks only if the current process needs a user namespace to @@ -132,96 +46,16 @@ func usePerUserStorage() bool { return unshare.IsRootless() && unshare.GetRootlessUID() != 0 } -// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing. -// Everyone but the tests this is intended for should only call loadStoreOptions, never this function. -func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { - var ( - defaultRootlessRunRoot string - defaultRootlessGraphRoot string - err error - ) - - defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) - if loadDefaultStoreOptionsErr != nil { - return StoreOptions{}, loadDefaultStoreOptionsErr - } - storageOpts := defaultStoreOptions - if usePerUserStorage() { - storageOpts, err = getRootlessStorageOpts(storageOpts) - if err != nil { - return storageOpts, err - } - } - err = fileutils.Exists(storageConf) - if err != nil && !os.IsNotExist(err) { - return storageOpts, err - } - if err == nil && !defaultConfigFileSet { - defaultRootlessRunRoot = storageOpts.RunRoot - defaultRootlessGraphRoot = storageOpts.GraphRoot - storageOpts = StoreOptions{} - reloadConfigurationFileIfNeeded(storageConf, &storageOpts) - // If the file did not specify a graphroot or runroot, - // set sane defaults so we don't try and use root-owned - // directories - if storageOpts.RunRoot == "" { - storageOpts.RunRoot = defaultRootlessRunRoot - } - if storageOpts.GraphRoot == "" { - if storageOpts.RootlessStoragePath != "" { - storageOpts.GraphRoot = storageOpts.RootlessStoragePath - } else { - storageOpts.GraphRoot = defaultRootlessGraphRoot - } - } - } - if storageOpts.RunRoot == "" { - return storageOpts, fmt.Errorf("runroot must be set") - } - rootlessUID := unshare.GetRootlessUID() - runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) - if err != nil { - return storageOpts, err - } - storageOpts.RunRoot = runRoot - - if storageOpts.GraphRoot == "" { - return storageOpts, fmt.Errorf("graphroot must be set") - } - graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID) - if err != nil { - return storageOpts, err - } - storageOpts.GraphRoot = graphRoot - - if storageOpts.RootlessStoragePath != "" { - storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID) - if err != nil { - return storageOpts, err - } - storageOpts.RootlessStoragePath = storagePath - } - - if storageOpts.ImageStore != "" && storageOpts.ImageStore == storageOpts.GraphRoot { - return storageOpts, fmt.Errorf("imagestore %s must either be not set or be a different than graphroot", storageOpts.ImageStore) - } - - return storageOpts, nil -} - -// UpdateOptions should be called iff container engine received a SIGHUP, -// otherwise use DefaultStoreOptions -func UpdateStoreOptions() (StoreOptions, error) { - storeOptions, storeError = loadStoreOptions() - return storeOptions, storeError -} +// defaultStoreOptions is kept private so external callers can not reassign the value +var defaultStoreOptions = sync.OnceValues(func() (StoreOptions, error) { + return LoadStoreOptions(LoadOptions{}) +}) -// DefaultStoreOptions returns the default storage ops for containers +// DefaultStoreOptions is returning the default [StoreOptions] parsed with the storage.conf files. +// This function caches the result so multiple callers always will get the same result. +// In order to parse storage.conf files at a later point use [LoadStoreOptions]. func DefaultStoreOptions() (StoreOptions, error) { - once.Do(func() { - storeOptions, storeError = loadStoreOptions() - }) - return storeOptions, storeError + return defaultStoreOptions() } // StoreOptions is used for passing initialization options to GetStore(), for @@ -237,9 +71,6 @@ type StoreOptions struct { // Image Store is the alternate location of image store if a location // separate from the container store is required. ImageStore string `json:"imagestore,omitempty"` - // RootlessStoragePath is the storage path for rootless users - // default $HOME/.local/share/containers/storage - RootlessStoragePath string `toml:"rootless_storage_path"` // If the driver is not specified, the best suited driver will be picked // either from GraphDriverPriority, if specified, or from the platform // dependent priority list (in that order). @@ -272,213 +103,100 @@ type StoreOptions struct { TransientStore bool `json:"transient_store,omitempty"` } -// isRootlessDriver returns true if the given storage driver is valid for containers running as non root -func isRootlessDriver(driver string) bool { - validDrivers := map[string]bool{ - "btrfs": true, - "overlay": true, - "overlay2": true, - "vfs": true, - } - return validDrivers[driver] -} - -// getRootlessStorageOpts returns the storage opts for containers running as non root -func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { - var opts StoreOptions - - rootlessUID := unshare.GetRootlessUID() - +// setDefaultRootlessStoreOptions sets the storage opts for containers running as non root +func setDefaultRootlessStoreOptions(opts *StoreOptions) error { dataDir, err := homedir.GetDataHome() if err != nil { - return opts, err + return err } rootlessRuntime, err := homedir.GetRuntimeDir() if err != nil { - return opts, err + return err } + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") opts.RunRoot = filepath.Join(rootlessRuntime, "containers") - if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil { - return opts, fmt.Errorf("unable to make rootless runtime: %w", err) - } - - opts.PullOptions = systemOpts.PullOptions - if systemOpts.RootlessStoragePath != "" { - opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) - if err != nil { - return opts, err - } - } else { - opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") - } - if driver := systemOpts.GraphDriverName; isRootlessDriver(driver) { - opts.GraphDriverName = driver - } - if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { - opts.GraphDriverName = driver - } - if opts.GraphDriverName == overlay2 { - logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") - opts.GraphDriverName = overlayDriver - } - - // If the configuration file was explicitly set, then copy all the options - // present. - if defaultConfigFileSet { - opts.GraphDriverOptions = systemOpts.GraphDriverOptions - opts.ImageStore = systemOpts.ImageStore - } else if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } - } - } - if opts.GraphDriverName == "" { - if len(systemOpts.GraphDriverPriority) == 0 { - dirEntries, err := os.ReadDir(opts.GraphRoot) - if err == nil { - for _, entry := range dirEntries { - if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok { - opts.GraphDriverName = name - break - } - } - } - - if opts.GraphDriverName == "" { - if canUseRootlessOverlay() { - opts.GraphDriverName = overlayDriver - } else { - opts.GraphDriverName = "vfs" - } - } - } else { - opts.GraphDriverPriority = systemOpts.GraphDriverPriority - } - } - - if os.Getenv("STORAGE_OPTS") != "" { - opts.GraphDriverOptions = slices.AppendSeq(opts.GraphDriverOptions, strings.SplitSeq(os.Getenv("STORAGE_OPTS"), ",")) - } - - return opts, nil + return nil } -var prevReloadConfig = struct { - storeOptions *StoreOptions - mod time.Time - mutex sync.Mutex - configFile string -}{} - -// SetDefaultConfigFilePath sets the default configuration to the specified path -func SetDefaultConfigFilePath(path string) error { - defaultConfigFile = path - defaultConfigFileSet = true - return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) +type LoadOptions struct { + // RootForImplicitAbsolutePaths is the path to an alternate root + // If not "", prefixed to any absolute paths used by default in the package. + // NOTE: This does NOT affect paths starting by $HOME or environment variables paths. + RootForImplicitAbsolutePaths string } -func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error { - prevReloadConfig.mutex.Lock() - defer prevReloadConfig.mutex.Unlock() +// LoadStoreOptions is returning the default [StoreOptions] parsed with the storage.conf files. +// [LoadOptions] can be used to supply an alternative root for the storage.conf files. +// On each call all files will be parsed again, to only get the correct system defaults +// and have them cached consider using [DefaultStoreOptions] instead. +func LoadStoreOptions(opts LoadOptions) (StoreOptions, error) { + config := new(TomlConfig) - fi, err := os.Stat(configFile) + rootlessUID := unshare.GetRootlessUID() + err := configfile.ParseTOML(config, &configfile.File{ + Name: "storage", + Extension: "conf", + EnvironmentName: storageConfEnv, + UserId: rootlessUID, + RootForImplicitAbsolutePaths: opts.RootForImplicitAbsolutePaths, + }) if err != nil { - return err - } - - mtime := fi.ModTime() - if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile { - *storeOptions = *prevReloadConfig.storeOptions - return nil + return StoreOptions{}, err } - if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { - return err + storeOptions := StoreOptions{ + GraphRoot: defaultGraphRoot, + RunRoot: defaultRunRoot, } - - cOptions := *storeOptions - prevReloadConfig.storeOptions = &cOptions - prevReloadConfig.mod = mtime - prevReloadConfig.configFile = configFile - return nil -} - -// ReloadConfigurationFile parses the specified configuration file and overrides -// the configuration in storeOptions. -func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error { - config := new(TomlConfig) - - meta, err := toml.DecodeFile(configFile, &config) - if err == nil { - keys := meta.Undecoded() - if len(keys) > 0 { - logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile) - } - } else { - if !os.IsNotExist(err) { - logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) - return err + if usePerUserStorage() { + err := setDefaultRootlessStoreOptions(&storeOptions) + if err != nil { + return StoreOptions{}, err } } - // Clear storeOptions of previous settings - *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver } - if os.Getenv("STORAGE_DRIVER") != "" { - config.Storage.Driver = os.Getenv("STORAGE_DRIVER") - storeOptions.GraphDriverName = config.Storage.Driver + if val := os.Getenv("STORAGE_DRIVER"); val != "" { + storeOptions.GraphDriverName = val } if storeOptions.GraphDriverName == overlay2 { logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") storeOptions.GraphDriverName = overlayDriver } - storeOptions.GraphDriverPriority = config.Storage.DriverPriority - if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 { - logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile) - } - if config.Storage.RunRoot != "" { - storeOptions.RunRoot = config.Storage.RunRoot - } - if config.Storage.GraphRoot != "" { - storeOptions.GraphRoot = config.Storage.GraphRoot - } + storeOptions.GraphDriverPriority = config.Storage.DriverPriority.Values + if config.Storage.ImageStore != "" { storeOptions.ImageStore = config.Storage.ImageStore } - if config.Storage.RootlessStoragePath != "" { - storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath - } - for _, s := range config.Storage.Options.AdditionalImageStores { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + + for _, s := range config.Storage.Options.AdditionalImageStores.Values { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "imagestore="+s) } - for _, s := range config.Storage.Options.AdditionalLayerStores { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.additionallayerstore=%s", config.Storage.Driver, s)) + for _, s := range config.Storage.Options.AdditionalLayerStores.Values { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "additionallayerstore="+s) } if config.Storage.Options.Size != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "size="+config.Storage.Options.Size) } if config.Storage.Options.MountProgram != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "mount_program="+config.Storage.Options.MountProgram) } if config.Storage.Options.SkipMountHome != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "skip_mount_home="+config.Storage.Options.SkipMountHome) } if config.Storage.Options.IgnoreChownErrors != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "ignore_chown_errors="+config.Storage.Options.IgnoreChownErrors) } if config.Storage.Options.ForceMask != 0 { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "force_mask="+strconv.FormatUint(uint64(config.Storage.Options.ForceMask), 8)) } if config.Storage.Options.MountOpt != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, "mountopt="+config.Storage.Options.MountOpt) } storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser if config.Storage.Options.AutoUsernsMinSize > 0 { @@ -494,7 +212,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile storeOptions.TransientStore = config.Storage.TransientStore - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(config.Storage.Options)...) if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok { storeOptions.GraphDriverOptions = strings.Split(opts, ",") @@ -502,46 +220,46 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { storeOptions.GraphDriverOptions = nil } - return nil -} - -func Options() (StoreOptions, error) { - defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) - return defaultStoreOptions, loadDefaultStoreOptionsErr -} -// Save overwrites the tomlConfig in storage.conf with the given conf -func Save(conf TomlConfig) error { - configFile, err := DefaultConfigFile() - if err != nil { - return err + if config.Storage.RunRoot != "" { + runRoot, err := expandEnvPath(config.Storage.RunRoot, rootlessUID) + if err != nil { + return storeOptions, err + } + storeOptions.RunRoot = runRoot } - - if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { - return err + if storeOptions.RunRoot == "" { + return storeOptions, fmt.Errorf("runroot must be set") } - f, err := os.Create(configFile) - if err != nil { - return err + // Parse this before graphroot which means any graphroot setting overwrites the + // rootless_storage_path, so if the main config has both set only graphroot is used. + if config.Storage.RootlessStoragePath != "" && usePerUserStorage() { + storagePath, err := expandEnvPath(config.Storage.RootlessStoragePath, rootlessUID) + if err != nil { + return storeOptions, err + } + storeOptions.GraphRoot = storagePath } - return toml.NewEncoder(f).Encode(conf) -} - -// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it -func StorageConfig() (*TomlConfig, error) { - config := new(TomlConfig) + if config.Storage.GraphRoot != "" { + if config.Storage.RootlessStoragePath != "" { + logrus.Warn("Both rootless_storage_path and graphroot are set, using graphroot setting only, rootless_storage_path is deprecated and should be removed from the config") + } + graphRoot, err := expandEnvPath(config.Storage.GraphRoot, rootlessUID) + if err != nil { + return storeOptions, err + } + storeOptions.GraphRoot = graphRoot + } - configFile, err := DefaultConfigFile() - if err != nil { - return nil, err + if storeOptions.GraphRoot == "" { + return storeOptions, fmt.Errorf("graphroot must be set") } - _, err = toml.DecodeFile(configFile, &config) - if err != nil { - return nil, err + if storeOptions.ImageStore != "" && storeOptions.ImageStore == storeOptions.GraphRoot { + return storeOptions, fmt.Errorf("imagestore %s must either be not set or be different than graphroot", storeOptions.ImageStore) } - return config, nil + return storeOptions, nil } diff --git a/vendor/go.podman.io/storage/types/options_bsd.go b/vendor/go.podman.io/storage/types/options_bsd.go index 040fdc797..a93ebc341 100644 --- a/vendor/go.podman.io/storage/types/options_bsd.go +++ b/vendor/go.podman.io/storage/types/options_bsd.go @@ -7,15 +7,4 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/var/run/containers/storage" defaultGraphRoot string = "/var/db/containers/storage" - SystemConfigFile = "/usr/local/share/containers/storage.conf" ) - -// defaultConfigFile path to the system wide storage.conf file -var ( - defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" -) - -// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay() bool { - return false -} diff --git a/vendor/go.podman.io/storage/types/options_darwin.go b/vendor/go.podman.io/storage/types/options_darwin.go index 27ba6a061..1f70b9c7a 100644 --- a/vendor/go.podman.io/storage/types/options_darwin.go +++ b/vendor/go.podman.io/storage/types/options_darwin.go @@ -5,12 +5,4 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" - SystemConfigFile = "/usr/share/containers/storage.conf" ) - -var defaultOverrideConfigFile = "/etc/containers/storage.conf" - -// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay() bool { - return false -} diff --git a/vendor/go.podman.io/storage/types/options_linux.go b/vendor/go.podman.io/storage/types/options_linux.go index 09cbae54b..1f70b9c7a 100644 --- a/vendor/go.podman.io/storage/types/options_linux.go +++ b/vendor/go.podman.io/storage/types/options_linux.go @@ -1,52 +1,8 @@ package types -import ( - "os/exec" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - const ( // these are default path for run and graph root for rootful users // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" - SystemConfigFile = "/usr/share/containers/storage.conf" ) - -// defaultConfigFile path to the system wide storage.conf file -var ( - defaultOverrideConfigFile = "/etc/containers/storage.conf" -) - -// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay() bool { - // we check first for fuse-overlayfs since it is cheaper. - if path, _ := exec.LookPath("fuse-overlayfs"); path != "" { - return true - } - - // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman - // before we enter the user namespace and the driver we pick here is written in the podman database. - // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL - // but this is just an heuristic and on RHEL we always install the storage.conf file. - // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check - // that the kernel is >= 5.13. - var uts unix.Utsname - if err := unix.Uname(&uts); err == nil { - parts := strings.Split(string(uts.Release[:]), ".") - major, _ := strconv.Atoi(parts[0]) - if major >= 6 { - return true - } - if major == 5 && len(parts) > 1 { - minor, _ := strconv.Atoi(parts[1]) - if minor >= 13 { - return true - } - } - } - return false -} diff --git a/vendor/go.podman.io/storage/types/options_windows.go b/vendor/go.podman.io/storage/types/options_windows.go index 99a67ff21..1f70b9c7a 100644 --- a/vendor/go.podman.io/storage/types/options_windows.go +++ b/vendor/go.podman.io/storage/types/options_windows.go @@ -5,15 +5,4 @@ const ( // for rootless path is constructed via getRootlessStorageOpts defaultRunRoot string = "/run/containers/storage" defaultGraphRoot string = "/var/lib/containers/storage" - SystemConfigFile = "/usr/share/containers/storage.conf" ) - -// defaultConfigFile path to the system wide storage.conf file -var ( - defaultOverrideConfigFile = "/etc/containers/storage.conf" -) - -// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers -func canUseRootlessOverlay() bool { - return false -} diff --git a/vendor/go.podman.io/storage/types/storage_test.conf b/vendor/go.podman.io/storage/types/storage_test.conf index 761b3a795..be074b942 100644 --- a/vendor/go.podman.io/storage/types/storage_test.conf +++ b/vendor/go.podman.io/storage/types/storage_test.conf @@ -8,14 +8,14 @@ driver = "" # Temporary storage location -runroot = "$HOME/$UID/containers/storage" +runroot = "/run/$UID/containers/storage" # Primary Read/Write location of container storage graphroot = "$HOME/$UID/containers/storage" # Storage path for rootless users # -rootless_storage_path = "$HOME/$UID/containers/storage" +rootless_storage_path = "$HOME/$UID/rootless/storage" [storage.options] # Storage options to be passed to underlying storage drivers diff --git a/vendor/go.podman.io/storage/types/utils.go b/vendor/go.podman.io/storage/types/utils.go index fd25eaa07..d19959ee4 100644 --- a/vendor/go.podman.io/storage/types/utils.go +++ b/vendor/go.podman.io/storage/types/utils.go @@ -1,15 +1,10 @@ package types import ( - "errors" "os" "path/filepath" "strconv" "strings" - - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/homedir" ) func expandEnvPath(path string, rootlessUID int) (string, error) { @@ -22,56 +17,3 @@ func expandEnvPath(path string, rootlessUID int) (string, error) { } return newpath, nil } - -func DefaultConfigFile() (string, error) { - if defaultConfigFileSet { - return defaultConfigFile, nil - } - - if path, ok := os.LookupEnv(storageConfEnv); ok { - return path, nil - } - if !usePerUserStorage() { - if err := fileutils.Exists(defaultOverrideConfigFile); err == nil { - return defaultOverrideConfigFile, nil - } - return defaultConfigFile, nil - } - - if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { - return filepath.Join(configHome, "containers/storage.conf"), nil - } - home := homedir.Get() - if home == "" { - return "", errors.New("cannot determine user's homedir") - } - return filepath.Join(home, ".config/containers/storage.conf"), nil -} - -func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { - prevReloadConfig.mutex.Lock() - defer prevReloadConfig.mutex.Unlock() - - fi, err := os.Stat(configFile) - if err != nil { - if !os.IsNotExist(err) { - logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) - } - return - } - - mtime := fi.ModTime() - if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile { - *storeOptions = *prevReloadConfig.storeOptions - return - } - - if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { - logrus.Warningf("Failed to reload %q %v\n", configFile, err) - return - } - - prevReloadConfig.storeOptions = storeOptions - prevReloadConfig.mod = mtime - prevReloadConfig.configFile = configFile -} diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 3ca814f54..1157b06d8 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -163,42 +163,7 @@ func (p *Proc) Addr() uintptr { // (according to the semantics of the specific function being called) before consulting // the error. The error will be guaranteed to contain windows.Errno. func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - switch len(a) { - case 0: - return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) - case 1: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) - case 2: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) - case 3: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) - case 4: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) - case 5: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) - case 6: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) - case 7: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) - case 8: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) - case 9: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) - case 10: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) - case 11: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) - case 12: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) - case 13: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) - case 14: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) - case 15: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) - default: - panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") - } + return syscall.SyscallN(p.Addr(), a...) } // A LazyDLL implements access to a single DLL. diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index a8b0364c7..6c955cea1 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1438,13 +1438,17 @@ func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } // GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security -// descriptor result on the Go heap. +// descriptor result on the Go heap. The security descriptor might be nil, even when err is nil, if the object exists +// but has no security descriptor. func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { var winHeapSD *SECURITY_DESCRIPTOR err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) if err != nil { return } + if winHeapSD == nil { + return nil, nil + } defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) return winHeapSD.copySelfRelativeSecurityDescriptor(), nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 079650e69..697ed2b2a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,8 +128,8 @@ github.com/inconshreveable/mousetrap # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.4 -## explicit; go 1.23 +# github.com/klauspost/compress v1.18.6 +## explicit; go 1.24 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -334,8 +334,8 @@ go.podman.io/image/v5/transports go.podman.io/image/v5/transports/alltransports go.podman.io/image/v5/types go.podman.io/image/v5/version -# go.podman.io/storage v1.62.1-0.20260310180906-9819c3739308 -## explicit; go 1.24.0 +# go.podman.io/storage v1.62.1-0.20260430194920-3ceb1b29d72d +## explicit; go 1.25.0 go.podman.io/storage go.podman.io/storage/drivers go.podman.io/storage/drivers/btrfs @@ -347,6 +347,7 @@ go.podman.io/storage/drivers/register go.podman.io/storage/drivers/vfs go.podman.io/storage/drivers/zfs go.podman.io/storage/internal/dedup +go.podman.io/storage/internal/driver go.podman.io/storage/internal/rawfilelock go.podman.io/storage/internal/staging_lockfile go.podman.io/storage/internal/tempdir @@ -359,6 +360,7 @@ go.podman.io/storage/pkg/chunked/internal/minimal go.podman.io/storage/pkg/chunked/internal/path go.podman.io/storage/pkg/chunked/toc go.podman.io/storage/pkg/config +go.podman.io/storage/pkg/configfile go.podman.io/storage/pkg/directory go.podman.io/storage/pkg/fileutils go.podman.io/storage/pkg/fsutils @@ -396,7 +398,7 @@ golang.org/x/net/context ## explicit; go 1.25.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.42.0 +# golang.org/x/sys v0.43.0 ## explicit; go 1.25.0 golang.org/x/sys/unix golang.org/x/sys/windows