浏览代码

vendor: github.com/klauspost/compress v1.15.12

full diff: https://github.com/klauspost/compress/compare/v1.15.9...v1.15.12

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn 2 年之前
父节点
当前提交
38adf7a694
共有 26 个文件被更改,包括 253 次插入95 次删除
  1. 1 1
      vendor.mod
  2. 2 2
      vendor.sum
  3. 28 3
      vendor/github.com/klauspost/compress/README.md
  4. 22 14
      vendor/github.com/klauspost/compress/huff0/decompress.go
  5. 4 0
      vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
  6. 0 1
      vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
  7. 11 7
      vendor/github.com/klauspost/compress/huff0/decompress_generic.go
  8. 5 1
      vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
  9. 2 0
      vendor/github.com/klauspost/compress/zstd/README.md
  10. 2 3
      vendor/github.com/klauspost/compress/zstd/blockdec.go
  11. 1 2
      vendor/github.com/klauspost/compress/zstd/bytebuf.go
  12. 35 9
      vendor/github.com/klauspost/compress/zstd/decoder.go
  13. 35 9
      vendor/github.com/klauspost/compress/zstd/decoder_options.go
  14. 1 0
      vendor/github.com/klauspost/compress/zstd/enc_best.go
  15. 16 7
      vendor/github.com/klauspost/compress/zstd/enc_better.go
  16. 5 2
      vendor/github.com/klauspost/compress/zstd/enc_dfast.go
  17. 5 3
      vendor/github.com/klauspost/compress/zstd/enc_fast.go
  18. 29 7
      vendor/github.com/klauspost/compress/zstd/framedec.go
  19. 2 1
      vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
  20. 0 1
      vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
  21. 9 12
      vendor/github.com/klauspost/compress/zstd/history.go
  22. 20 2
      vendor/github.com/klauspost/compress/zstd/seqdec.go
  23. 14 3
      vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
  24. 0 1
      vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
  25. 2 2
      vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
  26. 2 2
      vendor/modules.txt

+ 1 - 1
vendor.mod

@@ -48,7 +48,7 @@ require (
 	github.com/hashicorp/serf v0.8.5
 	github.com/hashicorp/serf v0.8.5
 	github.com/imdario/mergo v0.3.12
 	github.com/imdario/mergo v0.3.12
 	github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
 	github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
-	github.com/klauspost/compress v1.15.9
+	github.com/klauspost/compress v1.15.12
 	github.com/miekg/dns v1.1.43
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 	github.com/moby/buildkit v0.10.6
 	github.com/moby/buildkit v0.10.6

+ 2 - 2
vendor.sum

@@ -733,8 +733,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
-github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
+github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

+ 28 - 3
vendor/github.com/klauspost/compress/README.md

@@ -17,6 +17,30 @@ This package provides various compression algorithms.
 
 
 # changelog
 # changelog
 
 
+* Sept 26, 2022 (v1.15.11)
+
+	* flate: Improve level 1-3 compression  https://github.com/klauspost/compress/pull/678
+	* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
+	* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
+	* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
+
+* Sept 16, 2022 (v1.15.10)
+
+	* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+	* Add Go 1.19 - deprecate Go 1.16  https://github.com/klauspost/compress/pull/651
+	* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+	* zstd: Improve "better" compresssion  https://github.com/klauspost/compress/pull/657
+	* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+	* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+	* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+	* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+	* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+	* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+	* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
 * July 13, 2022 (v1.15.8)
 * July 13, 2022 (v1.15.8)
 
 
 	* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
 	* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
@@ -91,15 +115,15 @@ This package provides various compression algorithms.
 	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
 	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
 	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
 	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
 
 
-<details>
-	<summary>See  Details</summary>
 Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
 Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
 
 
 Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
 Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
 
 
 While the release has been extensively tested, it is recommended to testing when upgrading.
 While the release has been extensively tested, it is recommended to testing when upgrading.
-</details>
 
 
+<details>
+	<summary>See changes to v1.14.x</summary>
+	
 * Feb 22, 2022 (v1.14.4)
 * Feb 22, 2022 (v1.14.4)
 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
@@ -125,6 +149,7 @@ While the release has been extensively tested, it is recommended to testing when
 	* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
 	* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
 	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
 	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
 	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
 	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+</details>
 
 
 <details>
 <details>
 	<summary>See changes to v1.13.x</summary>
 	<summary>See changes to v1.13.x</summary>

+ 22 - 14
vendor/github.com/klauspost/compress/huff0/decompress.go

@@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 		}
 	}
 	}
 	if off > 0 {
 	if off > 0 {
@@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
 			}
+
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			// copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 		}
 	}
 	}
 	if off > 0 {
 	if off > 0 {

+ 4 - 0
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go

@@ -14,12 +14,14 @@ import (
 
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog > 8.
 // of Decompress4X when tablelog > 8.
+//
 //go:noescape
 //go:noescape
 func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 
 
 // decompress4x_8b_loop_x86 is an x86 assembler implementation
 // decompress4x_8b_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog <= 8 which decodes 4 entries
 // of Decompress4X when tablelog <= 8 which decodes 4 entries
 // per loop.
 // per loop.
+//
 //go:noescape
 //go:noescape
 func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 
 
@@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress1X when tablelog > 8.
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 //go:noescape
 func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 
 
 // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 // of Decompress1X when tablelog > 8.
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 //go:noescape
 func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 
 

+ 0 - 1
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
 // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
 
 
 //go:build amd64 && !appengine && !noasm && gc
 //go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
 
 
 // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_main_loop_amd64(SB), $0-8
 TEXT ·decompress4x_main_loop_amd64(SB), $0-8

+ 11 - 7
vendor/github.com/klauspost/compress/huff0/decompress_generic.go

@@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			//copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 		}
 	}
 	}
 	if off > 0 {
 	if off > 0 {

+ 5 - 1
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go

@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
 // emitLiteral writes a literal chunk and returns the number of bytes written.
 // emitLiteral writes a literal chunk and returns the number of bytes written.
 //
 //
 // It assumes that:
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	dst is long enough to hold the encoded bytes
 //	1 <= len(lit) && len(lit) <= 65536
 //	1 <= len(lit) && len(lit) <= 65536
 func emitLiteral(dst, lit []byte) int {
 func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
 // emitCopy writes a copy chunk and returns the number of bytes written.
 // emitCopy writes a copy chunk and returns the number of bytes written.
 //
 //
 // It assumes that:
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	dst is long enough to hold the encoded bytes
 //	1 <= offset && offset <= 65535
 //	1 <= offset && offset <= 65535
 //	4 <= length && length <= 65535
 //	4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
 // src[i:i+k-j] and src[j:k] have the same contents.
 // src[i:i+k-j] and src[j:k] have the same contents.
 //
 //
 // It assumes that:
 // It assumes that:
+//
 //	0 <= i && i < j && j <= len(src)
 //	0 <= i && i < j && j <= len(src)
 func extendMatch(src []byte, i, j int) int {
 func extendMatch(src []byte, i, j int) int {
 	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
 	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
 // been written.
 // been written.
 //
 //
 // It also assumes that:
 // It also assumes that:
+//
 //	len(dst) >= MaxEncodedLen(len(src)) &&
 //	len(dst) >= MaxEncodedLen(len(src)) &&
-// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+//	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
 func encodeBlock(dst, src []byte) (d int) {
 func encodeBlock(dst, src []byte) (d int) {
 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
 	// The table element type is uint16, as s < sLimit and sLimit < len(src)
 	// The table element type is uint16, as s < sLimit and sLimit < len(src)

+ 2 - 0
vendor/github.com/klauspost/compress/zstd/README.md

@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
 
 
 Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
 Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
 
 
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
 ## Installation
 ## Installation
 
 
 Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
 Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.

+ 2 - 3
vendor/github.com/klauspost/compress/zstd/blockdec.go

@@ -10,7 +10,6 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
 	"sync"
 	"sync"
@@ -233,7 +232,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
 			if b.lowMem {
 			if b.lowMem {
 				b.dst = make([]byte, b.RLESize)
 				b.dst = make([]byte, b.RLESize)
 			} else {
 			} else {
-				b.dst = make([]byte, maxBlockSize)
+				b.dst = make([]byte, maxCompressedBlockSize)
 			}
 			}
 		}
 		}
 		b.dst = b.dst[:b.RLESize]
 		b.dst = b.dst[:b.RLESize]
@@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
 		buf.Write(in)
 		buf.Write(in)
-		ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+		os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
 	}
 	}
 
 
 	return nil
 	return nil

+ 1 - 2
vendor/github.com/klauspost/compress/zstd/bytebuf.go

@@ -7,7 +7,6 @@ package zstd
 import (
 import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"io/ioutil"
 )
 )
 
 
 type byteBuffer interface {
 type byteBuffer interface {
@@ -124,7 +123,7 @@ func (r *readerWrapper) readByte() (byte, error) {
 }
 }
 
 
 func (r *readerWrapper) skipN(n int64) error {
 func (r *readerWrapper) skipN(n int64) error {
-	n2, err := io.CopyN(ioutil.Discard, r.r, n)
+	n2, err := io.CopyN(io.Discard, r.r, n)
 	if n2 != n {
 	if n2 != n {
 		err = io.ErrUnexpectedEOF
 		err = io.ErrUnexpectedEOF
 	}
 	}

+ 35 - 9
vendor/github.com/klauspost/compress/zstd/decoder.go

@@ -35,6 +35,7 @@ type Decoder struct {
 		br           readerWrapper
 		br           readerWrapper
 		enabled      bool
 		enabled      bool
 		inFrame      bool
 		inFrame      bool
+		dstBuf       []byte
 	}
 	}
 
 
 	frame *frameDec
 	frame *frameDec
@@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
 	}
 	}
 
 
 	// If bytes buffer and < 5MB, do sync decoding anyway.
 	// If bytes buffer and < 5MB, do sync decoding anyway.
-	if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+	if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
 		bb2 := bb
 		bb2 := bb
 		if debugDecoder {
 		if debugDecoder {
 			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
 			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
 		}
 		}
 		b := bb2.Bytes()
 		b := bb2.Bytes()
 		var dst []byte
 		var dst []byte
-		if cap(d.current.b) > 0 {
-			dst = d.current.b
+		if cap(d.syncStream.dstBuf) > 0 {
+			dst = d.syncStream.dstBuf[:0]
 		}
 		}
 
 
-		dst, err := d.DecodeAll(b, dst[:0])
+		dst, err := d.DecodeAll(b, dst)
 		if err == nil {
 		if err == nil {
 			err = io.EOF
 			err = io.EOF
 		}
 		}
+		// Save output buffer
+		d.syncStream.dstBuf = dst
 		d.current.b = dst
 		d.current.b = dst
 		d.current.err = err
 		d.current.err = err
 		d.current.flushed = true
 		d.current.flushed = true
@@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error {
 	d.current.err = nil
 	d.current.err = nil
 	d.current.flushed = false
 	d.current.flushed = false
 	d.current.d = nil
 	d.current.d = nil
+	d.syncStream.dstBuf = nil
 
 
 	// Ensure no-one else is still running...
 	// Ensure no-one else is still running...
 	d.streamWg.Wait()
 	d.streamWg.Wait()
@@ -312,6 +316,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 	// Grab a block decoder and frame decoder.
 	// Grab a block decoder and frame decoder.
 	block := <-d.decoders
 	block := <-d.decoders
 	frame := block.localFrame
 	frame := block.localFrame
+	initialSize := len(dst)
 	defer func() {
 	defer func() {
 		if debugDecoder {
 		if debugDecoder {
 			printf("re-adding decoder: %p", block)
 			printf("re-adding decoder: %p", block)
@@ -354,7 +359,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 			return dst, ErrWindowSizeExceeded
 			return dst, ErrWindowSizeExceeded
 		}
 		}
 		if frame.FrameContentSize != fcsUnknown {
 		if frame.FrameContentSize != fcsUnknown {
-			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+				}
+				return dst, ErrDecoderSizeExceeded
+			}
+			if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+				}
 				return dst, ErrDecoderSizeExceeded
 				return dst, ErrDecoderSizeExceeded
 			}
 			}
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +378,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 			}
 			}
 		}
 		}
 
 
-		if cap(dst) == 0 {
+		if cap(dst) == 0 && !d.o.limitToCap {
 			// Allocate len(input) * 2 by default if nothing is provided
 			// Allocate len(input) * 2 by default if nothing is provided
 			// and we didn't get frame content size.
 			// and we didn't get frame content size.
 			size := len(input) * 2
 			size := len(input) * 2
@@ -382,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 		if err != nil {
 		if err != nil {
 			return dst, err
 			return dst, err
 		}
 		}
+		if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+			return dst, ErrDecoderSizeExceeded
+		}
 		if len(frame.bBuf) == 0 {
 		if len(frame.bBuf) == 0 {
 			if debugDecoder {
 			if debugDecoder {
 				println("frame dbuf empty")
 				println("frame dbuf empty")
@@ -667,6 +684,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 				if debugDecoder {
 				if debugDecoder {
 					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 				}
 				}
+				hist.reset()
 				hist.decoders = block.async.newHist.decoders
 				hist.decoders = block.async.newHist.decoders
 				hist.recentOffsets = block.async.newHist.recentOffsets
 				hist.recentOffsets = block.async.newHist.recentOffsets
 				hist.windowSize = block.async.newHist.windowSize
 				hist.windowSize = block.async.newHist.windowSize
@@ -698,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 			seqExecute <- block
 			seqExecute <- block
 		}
 		}
 		close(seqExecute)
 		close(seqExecute)
+		hist.reset()
 	}()
 	}()
 
 
 	var wg sync.WaitGroup
 	var wg sync.WaitGroup
@@ -721,6 +740,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 				if debugDecoder {
 				if debugDecoder {
 					println("Async 2: new history")
 					println("Async 2: new history")
 				}
 				}
+				hist.reset()
 				hist.windowSize = block.async.newHist.windowSize
 				hist.windowSize = block.async.newHist.windowSize
 				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 				if block.async.newHist.dict != nil {
 				if block.async.newHist.dict != nil {
@@ -750,7 +770,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 					if block.lowMem {
 					if block.lowMem {
 						block.dst = make([]byte, block.RLESize)
 						block.dst = make([]byte, block.RLESize)
 					} else {
 					} else {
-						block.dst = make([]byte, maxBlockSize)
+						block.dst = make([]byte, maxCompressedBlockSize)
 					}
 					}
 				}
 				}
 				block.dst = block.dst[:block.RLESize]
 				block.dst = block.dst[:block.RLESize]
@@ -802,13 +822,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 		if debugDecoder {
 		if debugDecoder {
 			println("decoder goroutines finished")
 			println("decoder goroutines finished")
 		}
 		}
+		hist.reset()
 	}()
 	}()
 
 
+	var hist history
 decodeStream:
 decodeStream:
 	for {
 	for {
-		var hist history
 		var hasErr bool
 		var hasErr bool
-
+		hist.reset()
 		decodeBlock := func(block *blockDec) {
 		decodeBlock := func(block *blockDec) {
 			if hasErr {
 			if hasErr {
 				if block != nil {
 				if block != nil {
@@ -852,6 +873,10 @@ decodeStream:
 			}
 			}
 		}
 		}
 		if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
 		if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+			if debugDecoder {
+				println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+			}
+
 			err = ErrDecoderSizeExceeded
 			err = ErrDecoderSizeExceeded
 		}
 		}
 		if err != nil {
 		if err != nil {
@@ -920,5 +945,6 @@ decodeStream:
 	}
 	}
 	close(seqDecode)
 	close(seqDecode)
 	wg.Wait()
 	wg.Wait()
+	hist.reset()
 	d.frame.history.b = frameHistCache
 	d.frame.history.b = frameHistCache
 }
 }

+ 35 - 9
vendor/github.com/klauspost/compress/zstd/decoder_options.go

@@ -14,20 +14,23 @@ type DOption func(*decoderOptions) error
 
 
 // options retains accumulated state of multiple options.
 // options retains accumulated state of multiple options.
 type decoderOptions struct {
 type decoderOptions struct {
-	lowMem         bool
-	concurrent     int
-	maxDecodedSize uint64
-	maxWindowSize  uint64
-	dicts          []dict
-	ignoreChecksum bool
+	lowMem          bool
+	concurrent      int
+	maxDecodedSize  uint64
+	maxWindowSize   uint64
+	dicts           []dict
+	ignoreChecksum  bool
+	limitToCap      bool
+	decodeBufsBelow int
 }
 }
 
 
 func (o *decoderOptions) setDefault() {
 func (o *decoderOptions) setDefault() {
 	*o = decoderOptions{
 	*o = decoderOptions{
 		// use less ram: true for now, but may change.
 		// use less ram: true for now, but may change.
-		lowMem:        true,
-		concurrent:    runtime.GOMAXPROCS(0),
-		maxWindowSize: MaxWindowSize,
+		lowMem:          true,
+		concurrent:      runtime.GOMAXPROCS(0),
+		maxWindowSize:   MaxWindowSize,
+		decodeBufsBelow: 128 << 10,
 	}
 	}
 	if o.concurrent > 4 {
 	if o.concurrent > 4 {
 		o.concurrent = 4
 		o.concurrent = 4
@@ -114,6 +117,29 @@ func WithDecoderMaxWindow(size uint64) DOption {
 	}
 	}
 }
 }
 
 
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+	return func(o *decoderOptions) error {
+		o.limitToCap = b
+		return nil
+	}
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+	return func(o *decoderOptions) error {
+		o.decodeBufsBelow = size
+		return nil
+	}
+}
+
 // IgnoreChecksum allows to forcibly ignore checksum checking.
 // IgnoreChecksum allows to forcibly ignore checksum checking.
 func IgnoreChecksum(b bool) DOption {
 func IgnoreChecksum(b bool) DOption {
 	return func(o *decoderOptions) error {
 	return func(o *decoderOptions) error {

+ 1 - 0
vendor/github.com/klauspost/compress/zstd/enc_best.go

@@ -32,6 +32,7 @@ type match struct {
 	length int32
 	length int32
 	rep    int32
 	rep    int32
 	est    int32
 	est    int32
+	_      [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
 }
 }
 
 
 const highScore = 25000
 const highScore = 25000

+ 16 - 7
vendor/github.com/klauspost/compress/zstd/enc_better.go

@@ -416,15 +416,23 @@ encodeLoop:
 
 
 		// Try to find a better match by searching for a long match at the end of the current best match
 		// Try to find a better match by searching for a long match at the end of the current best match
 		if s+matched < sLimit {
 		if s+matched < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is around 3 bytes, but depends on input.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 3
+
 			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
 			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
-			cv := load3232(src, s)
+			s2 := s + skipBeginning
+			cv := load3232(src, s2)
 			candidateL := e.longTable[nextHashL]
 			candidateL := e.longTable[nextHashL]
-			coffsetL := candidateL.offset - e.cur - matched
-			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+			coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+			if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 				// Found a long match, at least 4 bytes.
 				// Found a long match, at least 4 bytes.
-				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 				if matchedNext > matched {
 				if matchedNext > matched {
 					t = coffsetL
 					t = coffsetL
+					s = s2
 					matched = matchedNext
 					matched = matchedNext
 					if debugMatches {
 					if debugMatches {
 						println("long match at end-of-match")
 						println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
 
 
 			// Check prev long...
 			// Check prev long...
 			if true {
 			if true {
-				coffsetL = candidateL.prev - e.cur - matched
-				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+				if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 					// Found a long match, at least 4 bytes.
 					// Found a long match, at least 4 bytes.
-					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 					if matchedNext > matched {
 					if matchedNext > matched {
 						t = coffsetL
 						t = coffsetL
+						s = s2
 						matched = matchedNext
 						matched = matchedNext
 						if debugMatches {
 						if debugMatches {
 							println("prev long match at end-of-match")
 							println("prev long match at end-of-match")

+ 5 - 2
vendor/github.com/klauspost/compress/zstd/enc_dfast.go

@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
 	}
 	}
 
 
 	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
 	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
-		copy(e.longTable[:], e.dictLongTable)
+		//copy(e.longTable[:], e.dictLongTable)
+		e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
 		for i := range e.longTableShardDirty {
 		for i := range e.longTableShardDirty {
 			e.longTableShardDirty[i] = false
 			e.longTableShardDirty[i] = false
 		}
 		}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
 			continue
 			continue
 		}
 		}
 
 
-		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
 		e.longTableShardDirty[i] = false
 		e.longTableShardDirty[i] = false
 	}
 	}
 }
 }

+ 5 - 3
vendor/github.com/klauspost/compress/zstd/enc_fast.go

@@ -304,7 +304,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
 		minNonLiteralBlockSize = 1 + 1 + inputMargin
 		minNonLiteralBlockSize = 1 + 1 + inputMargin
 	)
 	)
 	if debugEncoder {
 	if debugEncoder {
-		if len(src) > maxBlockSize {
+		if len(src) > maxCompressedBlockSize {
 			panic("src too big")
 			panic("src too big")
 		}
 		}
 	}
 	}
@@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 	const shardCnt = tableShardCnt
 	const shardCnt = tableShardCnt
 	const shardSize = tableShardSize
 	const shardSize = tableShardSize
 	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
 	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
-		copy(e.table[:], e.dictTable)
+		//copy(e.table[:], e.dictTable)
+		e.table = *(*[tableSize]tableEntry)(e.dictTable)
 		for i := range e.tableShardDirty {
 		for i := range e.tableShardDirty {
 			e.tableShardDirty[i] = false
 			e.tableShardDirty[i] = false
 		}
 		}
@@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 			continue
 			continue
 		}
 		}
 
 
-		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
 		e.tableShardDirty[i] = false
 		e.tableShardDirty[i] = false
 	}
 	}
 	e.allDirty = false
 	e.allDirty = false

+ 29 - 7
vendor/github.com/klauspost/compress/zstd/framedec.go

@@ -261,11 +261,16 @@ func (d *frameDec) reset(br byteBuffer) error {
 	}
 	}
 	d.history.windowSize = int(d.WindowSize)
 	d.history.windowSize = int(d.WindowSize)
 	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
 	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
-		// Alloc 2x window size if not low-mem, or very small window size.
+		// Alloc 2x window size if not low-mem, or window size below 2MB.
 		d.history.allocFrameBuffer = d.history.windowSize * 2
 		d.history.allocFrameBuffer = d.history.windowSize * 2
 	} else {
 	} else {
-		// Alloc with one additional block
-		d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		if d.o.lowMem {
+			// Alloc with 1MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+		} else {
+			// Alloc with 2MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		}
 	}
 	}
 
 
 	if debugDecoder {
 	if debugDecoder {
@@ -343,7 +348,7 @@ func (d *frameDec) consumeCRC() error {
 	return nil
 	return nil
 }
 }
 
 
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
 func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	saved := d.history.b
 	saved := d.history.b
 
 
@@ -353,12 +358,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	// Store input length, so we only check new data.
 	// Store input length, so we only check new data.
 	crcStart := len(dst)
 	crcStart := len(dst)
 	d.history.decoders.maxSyncLen = 0
 	d.history.decoders.maxSyncLen = 0
+	if d.o.limitToCap {
+		d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+	}
 	if d.FrameContentSize != fcsUnknown {
 	if d.FrameContentSize != fcsUnknown {
-		d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+			d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		}
 		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
 		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+			if debugDecoder {
+				println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+			}
 			return dst, ErrDecoderSizeExceeded
 			return dst, ErrDecoderSizeExceeded
 		}
 		}
-		if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+		if debugDecoder {
+			println("maxSyncLen:", d.history.decoders.maxSyncLen)
+		}
+		if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
 			// Alloc for output
 			// Alloc for output
 			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 			copy(dst2, dst)
 			copy(dst2, dst)
@@ -378,7 +394,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 		if err != nil {
 		if err != nil {
 			break
 			break
 		}
 		}
-		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+		if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+			println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.o.limitToCap && len(d.history.b) > cap(dst) {
+			println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
 			err = ErrDecoderSizeExceeded
 			err = ErrDecoderSizeExceeded
 			break
 			break
 		}
 		}

+ 2 - 1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go

@@ -21,7 +21,8 @@ type buildDtableAsmContext struct {
 
 
 // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 // Function returns non-zero exit code on error.
 // Function returns non-zero exit code on error.
-// go:noescape
+//
+//go:noescape
 func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 
 
 // please keep in sync with _generate/gen_fse.go
 // please keep in sync with _generate/gen_fse.go

+ 0 - 1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
 // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
 
 
 //go:build !appengine && !noasm && gc && !noasm
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 
 // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 TEXT ·buildDtable_asm(SB), $0-24
 TEXT ·buildDtable_asm(SB), $0-24

+ 9 - 12
vendor/github.com/klauspost/compress/zstd/history.go

@@ -37,24 +37,21 @@ func (h *history) reset() {
 	h.ignoreBuffer = 0
 	h.ignoreBuffer = 0
 	h.error = false
 	h.error = false
 	h.recentOffsets = [3]int{1, 4, 8}
 	h.recentOffsets = [3]int{1, 4, 8}
-	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
+	h.decoders.freeDecoders()
 	h.decoders = sequenceDecs{br: h.decoders.br}
 	h.decoders = sequenceDecs{br: h.decoders.br}
+	h.freeHuffDecoder()
+	h.huffTree = nil
+	h.dict = nil
+	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
 	if h.huffTree != nil {
 	if h.huffTree != nil {
 		if h.dict == nil || h.dict.litEnc != h.huffTree {
 		if h.dict == nil || h.dict.litEnc != h.huffTree {
 			huffDecoderPool.Put(h.huffTree)
 			huffDecoderPool.Put(h.huffTree)
+			h.huffTree = nil
 		}
 		}
 	}
 	}
-	h.huffTree = nil
-	h.dict = nil
-	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
 }
 }
 
 
 func (h *history) setDict(dict *dict) {
 func (h *history) setDict(dict *dict) {

+ 20 - 2
vendor/github.com/klauspost/compress/zstd/seqdec.go

@@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
 	return nil
 	return nil
 }
 }
 
 
+func (s *sequenceDecs) freeDecoders() {
+	if f := s.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.litLengths.fse = nil
+	}
+	if f := s.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.offsets.fse = nil
+	}
+	if f := s.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.matchLengths.fse = nil
+	}
+}
+
 // execute will execute the decoded sequence with the provided history.
 // execute will execute the decoded sequence with the provided history.
 // The sequence must be evaluated before being sent.
 // The sequence must be evaluated before being sent.
 func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
 func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
 		}
 		}
 		size := ll + ml + len(out)
 		size := ll + ml + len(out)
 		if size-startSize > maxBlockSize {
 		if size-startSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+			if size-startSize == 424242 {
+				panic("here")
+			}
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		}
 		if size > cap(out) {
 		if size > cap(out) {
 			// Not enough size, which can happen under high volume block streaming conditions
 			// Not enough size, which can happen under high volume block streaming conditions
@@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
 
 
 	// Check if space for literals
 	// Check if space for literals
 	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
 	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	}
 
 
 	// Add final literals
 	// Add final literals

+ 14 - 3
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go

@@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
 // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 //
 //
 // Please refer to seqdec_generic.go for the reference implementation.
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 
 // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
 // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 
 // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
 // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 
 // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
 // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 
@@ -135,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 		if debugDecoder {
 		if debugDecoder {
 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 		}
 		}
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 
 
 	default:
 	default:
 		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
 		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
@@ -143,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 
 
 	s.seqSize += ctx.litRemain
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
 	if s.seqSize > maxBlockSize {
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
 	}
 	}
 	err := br.close()
 	err := br.close()
 	if err != nil {
 	if err != nil {
@@ -201,20 +206,24 @@ const errorNotEnoughSpace = 5
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 //
 // Please refer to seqdec_generic.go for the reference implementation.
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 //
 // Please refer to seqdec_generic.go for the reference implementation.
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 
@@ -281,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 
 
 	s.seqSize += ctx.litRemain
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	}
 	err := br.close()
 	err := br.close()
 	if err != nil {
 	if err != nil {
@@ -308,10 +317,12 @@ type executeAsmContext struct {
 // Returns false if a match offset is too big.
 // Returns false if a match offset is too big.
 //
 //
 // Please refer to seqdec_generic.go for the reference implementation.
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 
 
 // Same as above, but with safe memcopies
 // Same as above, but with safe memcopies
+//
 //go:noescape
 //go:noescape
 func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 
 

+ 0 - 1
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
 // Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
 
 
 //go:build !appengine && !noasm && gc && !noasm
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 // Requires: CMOV

+ 2 - 2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go

@@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 		}
 		}
 		s.seqSize += ll + ml
 		s.seqSize += ll + ml
 		if s.seqSize > maxBlockSize {
 		if s.seqSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		}
 		litRemain -= ll
 		litRemain -= ll
 		if litRemain < 0 {
 		if litRemain < 0 {
@@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 	}
 	}
 	s.seqSize += litRemain
 	s.seqSize += litRemain
 	if s.seqSize > maxBlockSize {
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	}
 	err := br.close()
 	err := br.close()
 	if err != nil {
 	if err != nil {

+ 2 - 2
vendor/modules.txt

@@ -467,8 +467,8 @@ github.com/ishidawataru/sctp
 # github.com/jmespath/go-jmespath v0.4.0
 # github.com/jmespath/go-jmespath v0.4.0
 ## explicit; go 1.14
 ## explicit; go 1.14
 github.com/jmespath/go-jmespath
 github.com/jmespath/go-jmespath
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.15.12
+## explicit; go 1.17
 github.com/klauspost/compress
 github.com/klauspost/compress
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/huff0
 github.com/klauspost/compress/huff0