Procházet zdrojové kódy

vendor: github.com/klauspost/compress v1.17.2

fixes data corruption with zstd output in "best"

- 1.17.2 diff: https://github.com/klauspost/compress/compare/v1.17.1...v1.17.2
- full diff: https://github.com/klauspost/compress/compare/v1.16.5...v1.17.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn před 1 rokem
rodič
revize
f2c67ea82f
35 změnil soubory, kde provedl 703 přidání a 230 odebrání
  1. 1 1
      vendor.mod
  2. 2 2
      vendor.sum
  3. 3 17
      vendor/github.com/klauspost/compress/.goreleaser.yml
  4. 29 0
      vendor/github.com/klauspost/compress/README.md
  5. 25 0
      vendor/github.com/klauspost/compress/SECURITY.md
  6. 1 2
      vendor/github.com/klauspost/compress/fse/bitwriter.go
  7. 2 1
      vendor/github.com/klauspost/compress/fse/compress.go
  8. 1 10
      vendor/github.com/klauspost/compress/huff0/bitwriter.go
  9. 6 14
      vendor/github.com/klauspost/compress/huff0/compress.go
  10. 1 1
      vendor/github.com/klauspost/compress/huff0/decompress.go
  11. 0 12
      vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
  12. 1 1
      vendor/github.com/klauspost/compress/zstd/README.md
  13. 15 19
      vendor/github.com/klauspost/compress/zstd/bitreader.go
  14. 1 2
      vendor/github.com/klauspost/compress/zstd/bitwriter.go
  15. 1 1
      vendor/github.com/klauspost/compress/zstd/blockdec.go
  16. 22 7
      vendor/github.com/klauspost/compress/zstd/blockenc.go
  17. 1 1
      vendor/github.com/klauspost/compress/zstd/decoder_options.go
  18. 376 3
      vendor/github.com/klauspost/compress/zstd/dict.go
  19. 1 0
      vendor/github.com/klauspost/compress/zstd/enc_base.go
  20. 6 5
      vendor/github.com/klauspost/compress/zstd/enc_best.go
  21. 1 1
      vendor/github.com/klauspost/compress/zstd/enc_dfast.go
  22. 5 12
      vendor/github.com/klauspost/compress/zstd/enc_fast.go
  23. 4 9
      vendor/github.com/klauspost/compress/zstd/encoder.go
  24. 1 1
      vendor/github.com/klauspost/compress/zstd/encoder_options.go
  25. 4 4
      vendor/github.com/klauspost/compress/zstd/framedec.go
  26. 2 2
      vendor/github.com/klauspost/compress/zstd/frameenc.go
  27. 16 0
      vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
  28. 68 0
      vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
  29. 33 0
      vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
  30. 6 11
      vendor/github.com/klauspost/compress/zstd/seqdec.go
  31. 64 64
      vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
  32. 1 1
      vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
  33. 2 3
      vendor/github.com/klauspost/compress/zstd/snappy.go
  34. 0 22
      vendor/github.com/klauspost/compress/zstd/zstd.go
  35. 1 1
      vendor/modules.txt

+ 1 - 1
vendor.mod

@@ -58,7 +58,7 @@ require (
 	github.com/hashicorp/serf v0.8.5
 	github.com/hashicorp/serf v0.8.5
 	github.com/imdario/mergo v0.3.13
 	github.com/imdario/mergo v0.3.13
 	github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2
 	github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2
-	github.com/klauspost/compress v1.16.5
+	github.com/klauspost/compress v1.17.2
 	github.com/miekg/dns v1.1.43
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs/v3 v3.0.1
 	github.com/mistifyio/go-zfs/v3 v3.0.1
 	github.com/mitchellh/copystructure v1.2.0
 	github.com/mitchellh/copystructure v1.2.0

+ 2 - 2
vendor.sum

@@ -825,8 +825,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
-github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
+github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
 github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

+ 3 - 17
vendor/github.com/klauspost/compress/.goreleaser.yml

@@ -3,7 +3,7 @@
 before:
 before:
   hooks:
   hooks:
     - ./gen.sh
     - ./gen.sh
-    - go install mvdan.cc/garble@v0.9.3
+    - go install mvdan.cc/garble@v0.10.1
 
 
 builds:
 builds:
   -
   -
@@ -92,16 +92,7 @@ builds:
 archives:
 archives:
   -
   -
     id: s2-binaries
     id: s2-binaries
-    name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
-    replacements:
-      aix: AIX
-      darwin: OSX
-      linux: Linux
-      windows: Windows
-      386: i386
-      amd64: x86_64
-      freebsd: FreeBSD
-      netbsd: NetBSD
+    name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     format_overrides:
     format_overrides:
       - goos: windows
       - goos: windows
         format: zip
         format: zip
@@ -125,7 +116,7 @@ changelog:
 
 
 nfpms:
 nfpms:
   -
   -
-    file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+    file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     vendor: Klaus Post
     vendor: Klaus Post
     homepage: https://github.com/klauspost/compress
     homepage: https://github.com/klauspost/compress
     maintainer: Klaus Post <klauspost@gmail.com>
     maintainer: Klaus Post <klauspost@gmail.com>
@@ -134,8 +125,3 @@ nfpms:
     formats:
     formats:
       - deb
       - deb
       - rpm
       - rpm
-    replacements:
-      darwin: Darwin
-      linux: Linux
-      freebsd: FreeBSD
-      amd64: x86_64

+ 29 - 0
vendor/github.com/klauspost/compress/README.md

@@ -16,6 +16,28 @@ This package provides various compression algorithms.
 
 
 # changelog
 # changelog
 
 
+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
+	* Add experimental dictionary builder  https://github.com/klauspost/compress/pull/853
+	* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
+	* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
+	* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
+	* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
+	* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+   
+* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
+	* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
+	* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
+
+* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
+	* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
+	* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
+	* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
+	* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
+
+* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
+	* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
+	* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
+
 * Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
 * Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
 	* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
 	* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
 	* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
 	* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
@@ -40,6 +62,9 @@ This package provides various compression algorithms.
 	* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
 	* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
 	* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
 	* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
 
 
+<details>
+	<summary>See changes to v1.15.x</summary>
+	
 * Jan 21st, 2023 (v1.15.15)
 * Jan 21st, 2023 (v1.15.15)
 	* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
 	* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
 	* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
 	* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
@@ -166,6 +191,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
 
 
 While the release has been extensively tested, it is recommended to testing when upgrading.
 While the release has been extensively tested, it is recommended to testing when upgrading.
 
 
+</details>
+
 <details>
 <details>
 	<summary>See changes to v1.14.x</summary>
 	<summary>See changes to v1.14.x</summary>
 	
 	
@@ -626,6 +653,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
 * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
 * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
 * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
 * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
 * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
 * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
+* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
 
 
 # license
 # license
 
 

+ 25 - 0
vendor/github.com/klauspost/compress/SECURITY.md

@@ -0,0 +1,25 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Vulnerability Definition
+
+A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
+
+Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
+
+Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
+
+It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
+
+Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.

+ 1 - 2
vendor/github.com/klauspost/compress/fse/bitwriter.go

@@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
 
 
 // close will write the alignment bit and write the final byte(s)
 // close will write the alignment bit and write the final byte(s)
 // to the output.
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	// End mark
 	b.addBits16Clean(1, 1)
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	// flush until next byte.
 	b.flushAlign()
 	b.flushAlign()
-	return nil
 }
 }
 
 
 // reset and continue writing by appending to out.
 // reset and continue writing by appending to out.

+ 2 - 1
vendor/github.com/klauspost/compress/fse/compress.go

@@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
 	c2.flush(s.actualTableLog)
 	c2.flush(s.actualTableLog)
 	c1.flush(s.actualTableLog)
 	c1.flush(s.actualTableLog)
 
 
-	return s.bw.close()
+	s.bw.close()
+	return nil
 }
 }
 
 
 // writeCount will write the normalized histogram count to header.
 // writeCount will write the normalized histogram count to header.

+ 1 - 10
vendor/github.com/klauspost/compress/huff0/bitwriter.go

@@ -13,14 +13,6 @@ type bitWriter struct {
 	out          []byte
 	out          []byte
 }
 }
 
 
-// bitMask16 is bitmasks. Has extra to avoid bounds check.
-var bitMask16 = [32]uint16{
-	0, 1, 3, 7, 0xF, 0x1F,
-	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
-	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF} /* up to 16 bits */
-
 // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
 // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
 // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
 func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -102,10 +94,9 @@ func (b *bitWriter) flushAlign() {
 
 
 // close will write the alignment bit and write the final byte(s)
 // close will write the alignment bit and write the final byte(s)
 // to the output.
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	// End mark
 	b.addBits16Clean(1, 1)
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	// flush until next byte.
 	b.flushAlign()
 	b.flushAlign()
-	return nil
 }
 }

+ 6 - 14
vendor/github.com/klauspost/compress/huff0/compress.go

@@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
 }
 }
 
 
 func (s *Scratch) compress1X(src []byte) ([]byte, error) {
 func (s *Scratch) compress1X(src []byte) ([]byte, error) {
-	return s.compress1xDo(s.Out, src)
+	return s.compress1xDo(s.Out, src), nil
 }
 }
 
 
-func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+func (s *Scratch) compress1xDo(dst, src []byte) []byte {
 	var bw = bitWriter{out: dst}
 	var bw = bitWriter{out: dst}
 
 
 	// N is length divisible by 4.
 	// N is length divisible by 4.
@@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
 			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 		}
 		}
 	}
 	}
-	err := bw.close()
-	return bw.out, err
+	bw.close()
+	return bw.out
 }
 }
 
 
 var sixZeros [6]byte
 var sixZeros [6]byte
@@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
 		}
 		}
 		src = src[len(toDo):]
 		src = src[len(toDo):]
 
 
-		var err error
 		idx := len(s.Out)
 		idx := len(s.Out)
-		s.Out, err = s.compress1xDo(s.Out, toDo)
-		if err != nil {
-			return nil, err
-		}
+		s.Out = s.compress1xDo(s.Out, toDo)
 		if len(s.Out)-idx > math.MaxUint16 {
 		if len(s.Out)-idx > math.MaxUint16 {
 			// We cannot store the size in the jump table
 			// We cannot store the size in the jump table
 			return nil, ErrIncompressible
 			return nil, ErrIncompressible
@@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
 
 
 	segmentSize := (len(src) + 3) / 4
 	segmentSize := (len(src) + 3) / 4
 	var wg sync.WaitGroup
 	var wg sync.WaitGroup
-	var errs [4]error
 	wg.Add(4)
 	wg.Add(4)
 	for i := 0; i < 4; i++ {
 	for i := 0; i < 4; i++ {
 		toDo := src
 		toDo := src
@@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
 
 
 		// Separate goroutine for each block.
 		// Separate goroutine for each block.
 		go func(i int) {
 		go func(i int) {
-			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
 			wg.Done()
 			wg.Done()
 		}(i)
 		}(i)
 	}
 	}
 	wg.Wait()
 	wg.Wait()
 	for i := 0; i < 4; i++ {
 	for i := 0; i < 4; i++ {
-		if errs[i] != nil {
-			return nil, errs[i]
-		}
 		o := s.tmpOut[i]
 		o := s.tmpOut[i]
 		if len(o) > math.MaxUint16 {
 		if len(o) > math.MaxUint16 {
 			// We cannot store the size in the jump table
 			// We cannot store the size in the jump table

+ 1 - 1
vendor/github.com/klauspost/compress/huff0/decompress.go

@@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
 
 
 	switch d.actualTableLog {
 	switch d.actualTableLog {
 	case 8:
 	case 8:
-		const shift = 8 - 8
+		const shift = 0
 		for br.off >= 4 {
 		for br.off >= 4 {
 			br.fillFast()
 			br.fillFast()
 			v := dt[uint8(br.value>>(56+shift))]
 			v := dt[uint8(br.value>>(56+shift))]

+ 0 - 12
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go

@@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int {
 	return i + 2
 	return i + 2
 }
 }
 
 
-// extendMatch returns the largest k such that k <= len(src) and that
-// src[i:i+k-j] and src[j:k] have the same contents.
-//
-// It assumes that:
-//
-//	0 <= i && i < j && j <= len(src)
-func extendMatch(src []byte, i, j int) int {
-	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
-	}
-	return j
-}
-
 func hash(u, shift uint32) uint32 {
 func hash(u, shift uint32) uint32 {
 	return (u * 0x1e35a7bd) >> shift
 	return (u * 0x1e35a7bd) >> shift
 }
 }

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/README.md

@@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
 
 
 // Create a reader that caches decompressors.
 // Create a reader that caches decompressors.
 // For this operation type we supply a nil Reader.
 // For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
+var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
 
 
 // Decompress a buffer. We don't supply a destination buffer,
 // Decompress a buffer. We don't supply a destination buffer,
 // so it will be allocated by the decoder.
 // so it will be allocated by the decoder.

+ 15 - 19
vendor/github.com/klauspost/compress/zstd/bitreader.go

@@ -17,7 +17,6 @@ import (
 // for aligning the input.
 // for aligning the input.
 type bitReader struct {
 type bitReader struct {
 	in       []byte
 	in       []byte
-	off      uint   // next byte to read is at in[off - 1]
 	value    uint64 // Maybe use [16]byte, but shifting is awkward.
 	value    uint64 // Maybe use [16]byte, but shifting is awkward.
 	bitsRead uint8
 	bitsRead uint8
 }
 }
@@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
 		return errors.New("corrupt stream: too short")
 		return errors.New("corrupt stream: too short")
 	}
 	}
 	b.in = in
 	b.in = in
-	b.off = uint(len(in))
 	// The highest bit of the last byte indicates where to start
 	// The highest bit of the last byte indicates where to start
 	v := in[len(in)-1]
 	v := in[len(in)-1]
 	if v == 0 {
 	if v == 0 {
@@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
 	if b.bitsRead < 32 {
 	if b.bitsRead < 32 {
 		return
 		return
 	}
 	}
-	// 2 bounds checks.
-	v := b.in[b.off-4:]
-	v = v[:4]
+	v := b.in[len(b.in)-4:]
+	b.in = b.in[:len(b.in)-4]
 	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 	b.value = (b.value << 32) | uint64(low)
 	b.value = (b.value << 32) | uint64(low)
 	b.bitsRead -= 32
 	b.bitsRead -= 32
-	b.off -= 4
 }
 }
 
 
 // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
 // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
 func (b *bitReader) fillFastStart() {
 func (b *bitReader) fillFastStart() {
-	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	v := b.in[len(b.in)-8:]
+	b.in = b.in[:len(b.in)-8]
+	b.value = binary.LittleEndian.Uint64(v)
 	b.bitsRead = 0
 	b.bitsRead = 0
-	b.off -= 8
 }
 }
 
 
 // fill() will make sure at least 32 bits are available.
 // fill() will make sure at least 32 bits are available.
@@ -91,25 +87,25 @@ func (b *bitReader) fill() {
 	if b.bitsRead < 32 {
 	if b.bitsRead < 32 {
 		return
 		return
 	}
 	}
-	if b.off >= 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
+	if len(b.in) >= 4 {
+		v := b.in[len(b.in)-4:]
+		b.in = b.in[:len(b.in)-4]
 		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 		b.value = (b.value << 32) | uint64(low)
 		b.value = (b.value << 32) | uint64(low)
 		b.bitsRead -= 32
 		b.bitsRead -= 32
-		b.off -= 4
 		return
 		return
 	}
 	}
-	for b.off > 0 {
-		b.value = (b.value << 8) | uint64(b.in[b.off-1])
-		b.bitsRead -= 8
-		b.off--
+
+	b.bitsRead -= uint8(8 * len(b.in))
+	for len(b.in) > 0 {
+		b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
+		b.in = b.in[:len(b.in)-1]
 	}
 	}
 }
 }
 
 
 // finished returns true if all bits have been read from the bit stream.
 // finished returns true if all bits have been read from the bit stream.
 func (b *bitReader) finished() bool {
 func (b *bitReader) finished() bool {
-	return b.off == 0 && b.bitsRead >= 64
+	return len(b.in) == 0 && b.bitsRead >= 64
 }
 }
 
 
 // overread returns true if more bits have been requested than is on the stream.
 // overread returns true if more bits have been requested than is on the stream.
@@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
 
 
 // remain returns the number of bits remaining.
 // remain returns the number of bits remaining.
 func (b *bitReader) remain() uint {
 func (b *bitReader) remain() uint {
-	return b.off*8 + 64 - uint(b.bitsRead)
+	return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
 }
 }
 
 
 // close the bitstream and returns an error if out-of-buffer reads occurred.
 // close the bitstream and returns an error if out-of-buffer reads occurred.

+ 1 - 2
vendor/github.com/klauspost/compress/zstd/bitwriter.go

@@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
 
 
 // close will write the alignment bit and write the final byte(s)
 // close will write the alignment bit and write the final byte(s)
 // to the output.
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	// End mark
 	b.addBits16Clean(1, 1)
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	// flush until next byte.
 	b.flushAlign()
 	b.flushAlign()
-	return nil
 }
 }
 
 
 // reset and continue writing by appending to out.
 // reset and continue writing by appending to out.

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/blockdec.go

@@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 				}
 				}
 				seq.fse.setRLE(symb)
 				seq.fse.setRLE(symb)
 				if debugDecoder {
 				if debugDecoder {
-					printf("RLE set to %+v, code: %v", symb, v)
+					printf("RLE set to 0x%x, code: %v", symb, v)
 				}
 				}
 			case compModeFSE:
 			case compModeFSE:
 				println("Reading table for", tableIndex(i))
 				println("Reading table for", tableIndex(i))

+ 22 - 7
vendor/github.com/klauspost/compress/zstd/blockenc.go

@@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
 	if len(lits) >= 1024 {
 	if len(lits) >= 1024 {
 		// Use 4 Streams.
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
 		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
-	} else if len(lits) > 32 {
+	} else if len(lits) > 16 {
 		// Use 1 stream
 		// Use 1 stream
 		single = true
 		single = true
 		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
 		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
 	} else {
 	} else {
 		err = huff0.ErrIncompressible
 		err = huff0.ErrIncompressible
 	}
 	}
-
+	if err == nil && len(out)+5 > len(lits) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSizes(len(out), len(lits), single)
+		if len(out)+lh.size() >= len(lits) {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	switch err {
 	case huff0.ErrIncompressible:
 	case huff0.ErrIncompressible:
 		if debugEncoder {
 		if debugEncoder {
@@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
 	if len(b.literals) >= 1024 && !raw {
 	if len(b.literals) >= 1024 && !raw {
 		// Use 4 Streams.
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
-	} else if len(b.literals) > 32 && !raw {
+	} else if len(b.literals) > 16 && !raw {
 		// Use 1 stream
 		// Use 1 stream
 		single = true
 		single = true
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
 		err = huff0.ErrIncompressible
 		err = huff0.ErrIncompressible
 	}
 	}
 
 
+	if err == nil && len(out)+5 > len(b.literals) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSize(len(b.literals))
+		szRaw := lh.size()
+		lh.setSizes(len(out), len(b.literals), single)
+		szComp := lh.size()
+		if len(out)+szComp >= len(b.literals)+szRaw {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	switch err {
 	case huff0.ErrIncompressible:
 	case huff0.ErrIncompressible:
 		lh.setType(literalsBlockRaw)
 		lh.setType(literalsBlockRaw)
@@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
 	ml.flush(mlEnc.actualTableLog)
 	ml.flush(mlEnc.actualTableLog)
 	of.flush(ofEnc.actualTableLog)
 	of.flush(ofEnc.actualTableLog)
 	ll.flush(llEnc.actualTableLog)
 	ll.flush(llEnc.actualTableLog)
-	err = wr.close()
-	if err != nil {
-		return err
-	}
+	wr.close()
 	b.output = wr.out
 	b.output = wr.out
 
 
 	// Maybe even add a bigger margin.
 	// Maybe even add a bigger margin.

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/decoder_options.go

@@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
 	}
 	}
 }
 }
 
 
-// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
+// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
 // The slice content can be arbitrary data.
 // The slice content can be arbitrary data.
 func WithDecoderDictRaw(id uint32, content []byte) DOption {
 func WithDecoderDictRaw(id uint32, content []byte) DOption {
 	return func(o *decoderOptions) error {
 	return func(o *decoderOptions) error {

+ 376 - 3
vendor/github.com/klauspost/compress/zstd/dict.go

@@ -1,10 +1,13 @@
 package zstd
 package zstd
 
 
 import (
 import (
+	"bytes"
 	"encoding/binary"
 	"encoding/binary"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+	"math"
+	"sort"
 
 
 	"github.com/klauspost/compress/huff0"
 	"github.com/klauspost/compress/huff0"
 )
 )
@@ -14,9 +17,8 @@ type dict struct {
 
 
 	litEnc              *huff0.Scratch
 	litEnc              *huff0.Scratch
 	llDec, ofDec, mlDec sequenceDec
 	llDec, ofDec, mlDec sequenceDec
-	//llEnc, ofEnc, mlEnc []*fseEncoder
-	offsets [3]int
-	content []byte
+	offsets             [3]int
+	content             []byte
 }
 }
 
 
 const dictMagic = "\x37\xa4\x30\xec"
 const dictMagic = "\x37\xa4\x30\xec"
@@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
 	d, err := loadDict(b)
 	d, err := loadDict(b)
 	return d, err
 	return d, err
 }
 }
+
+type BuildDictOptions struct {
+	// Dictionary ID.
+	ID uint32
+
+	// Content to use to create dictionary tables.
+	Contents [][]byte
+
+	// History to use for all blocks.
+	History []byte
+
+	// Offsets to use.
+	Offsets [3]int
+
+	// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
+	// See https://github.com/facebook/zstd/issues/3724
+	CompatV155 bool
+
+	// Use the specified encoder level.
+	// The dictionary will be built using the specified encoder level,
+	// which will reflect speed and make the dictionary tailored for that level.
+	// If not set SpeedBestCompression will be used.
+	Level EncoderLevel
+
+	// DebugOut will write stats and other details here if set.
+	DebugOut io.Writer
+}
+
+func BuildDict(o BuildDictOptions) ([]byte, error) {
+	initPredefined()
+	hist := o.History
+	contents := o.Contents
+	debug := o.DebugOut != nil
+	println := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintln(o.DebugOut, args...)
+		}
+	}
+	printf := func(s string, args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintf(o.DebugOut, s, args...)
+		}
+	}
+	print := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprint(o.DebugOut, args...)
+		}
+	}
+
+	if int64(len(hist)) > dictMaxLength {
+		return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
+	}
+	if len(hist) < 8 {
+		return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
+	}
+	if len(contents) == 0 {
+		return nil, errors.New("no content provided")
+	}
+	d := dict{
+		id:      o.ID,
+		litEnc:  nil,
+		llDec:   sequenceDec{},
+		ofDec:   sequenceDec{},
+		mlDec:   sequenceDec{},
+		offsets: o.Offsets,
+		content: hist,
+	}
+	block := blockEnc{lowMem: false}
+	block.init()
+	enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
+	if o.Level != 0 {
+		eOpts := encoderOptions{
+			level:      o.Level,
+			blockSize:  maxMatchLen,
+			windowSize: maxMatchLen,
+			dict:       &d,
+			lowMem:     false,
+		}
+		enc = eOpts.encoder()
+	} else {
+		o.Level = SpeedBestCompression
+	}
+	var (
+		remain [256]int
+		ll     [256]int
+		ml     [256]int
+		of     [256]int
+	)
+	addValues := func(dst *[256]int, src []byte) {
+		for _, v := range src {
+			dst[v]++
+		}
+	}
+	addHist := func(dst *[256]int, src *[256]uint32) {
+		for i, v := range src {
+			dst[i] += int(v)
+		}
+	}
+	seqs := 0
+	nUsed := 0
+	litTotal := 0
+	newOffsets := make(map[uint32]int, 1000)
+	for _, b := range contents {
+		block.reset(nil)
+		if len(b) < 8 {
+			continue
+		}
+		nUsed++
+		enc.Reset(&d, true)
+		enc.Encode(&block, b)
+		addValues(&remain, block.literals)
+		litTotal += len(block.literals)
+		seqs += len(block.sequences)
+		block.genCodes()
+		addHist(&ll, block.coders.llEnc.Histogram())
+		addHist(&ml, block.coders.mlEnc.Histogram())
+		addHist(&of, block.coders.ofEnc.Histogram())
+		for i, seq := range block.sequences {
+			if i > 3 {
+				break
+			}
+			offset := seq.offset
+			if offset == 0 {
+				continue
+			}
+			if offset > 3 {
+				newOffsets[offset-3]++
+			} else {
+				newOffsets[uint32(o.Offsets[offset-1])]++
+			}
+		}
+	}
+	// Find most used offsets.
+	var sortedOffsets []uint32
+	for k := range newOffsets {
+		sortedOffsets = append(sortedOffsets, k)
+	}
+	sort.Slice(sortedOffsets, func(i, j int) bool {
+		a, b := sortedOffsets[i], sortedOffsets[j]
+		if a == b {
+			// Prefer the longer offset
+			return sortedOffsets[i] > sortedOffsets[j]
+		}
+		return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
+	})
+	if len(sortedOffsets) > 3 {
+		if debug {
+			print("Offsets:")
+			for i, v := range sortedOffsets {
+				if i > 20 {
+					break
+				}
+				printf("[%d: %d],", v, newOffsets[v])
+			}
+			println("")
+		}
+
+		sortedOffsets = sortedOffsets[:3]
+	}
+	for i, v := range sortedOffsets {
+		o.Offsets[i] = int(v)
+	}
+	if debug {
+		println("New repeat offsets", o.Offsets)
+	}
+
+	if nUsed == 0 || seqs == 0 {
+		return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
+	}
+	if debug {
+		println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
+	}
+	if seqs/nUsed < 512 {
+		// Use 512 as minimum.
+		nUsed = seqs / 512
+	}
+	copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
+		hist := dst.Histogram()
+		var maxSym uint8
+		var maxCount int
+		var fakeLength int
+		for i, v := range src {
+			if v > 0 {
+				v = v / nUsed
+				if v == 0 {
+					v = 1
+				}
+			}
+			if v > maxCount {
+				maxCount = v
+			}
+			if v != 0 {
+				maxSym = uint8(i)
+			}
+			fakeLength += v
+			hist[i] = uint32(v)
+		}
+		dst.HistogramFinished(maxSym, maxCount)
+		dst.reUsed = false
+		dst.useRLE = false
+		err := dst.normalizeCount(fakeLength)
+		if err != nil {
+			return nil, err
+		}
+		if debug {
+			println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
+		}
+		return dst.writeCount(nil)
+	}
+	if debug {
+		print("Literal lengths: ")
+	}
+	llTable, err := copyHist(block.coders.llEnc, &ll)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Match lengths: ")
+	}
+	mlTable, err := copyHist(block.coders.mlEnc, &ml)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Offsets: ")
+	}
+	ofTable, err := copyHist(block.coders.ofEnc, &of)
+	if err != nil {
+		return nil, err
+	}
+
+	// Literal table
+	avgSize := litTotal
+	if avgSize > huff0.BlockSizeMax/2 {
+		avgSize = huff0.BlockSizeMax / 2
+	}
+	huffBuff := make([]byte, 0, avgSize)
+	// Target size
+	div := litTotal / avgSize
+	if div < 1 {
+		div = 1
+	}
+	if debug {
+		println("Huffman weights:")
+	}
+	for i, n := range remain[:] {
+		if n > 0 {
+			n = n / div
+			// Allow all entries to be represented.
+			if n == 0 {
+				n = 1
+			}
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			if debug {
+				printf("[%d: %d], ", i, n)
+			}
+		}
+	}
+	if o.CompatV155 && remain[255]/div == 0 {
+		huffBuff = append(huffBuff, 255)
+	}
+	scratch := &huff0.Scratch{TableLog: 11}
+	for tries := 0; tries < 255; tries++ {
+		scratch = &huff0.Scratch{TableLog: 11}
+		_, _, err = huff0.Compress1X(huffBuff, scratch)
+		if err == nil {
+			break
+		}
+		if debug {
+			printf("Try %d: Huffman error: %v\n", tries+1, err)
+		}
+		huffBuff = huffBuff[:0]
+		if tries == 250 {
+			if debug {
+				println("Huffman: Bailing out with predefined table")
+			}
+
+			// Bail out.... Just generate something
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
+			for i := 0; i < 128; i++ {
+				huffBuff = append(huffBuff, byte(i))
+			}
+			continue
+		}
+		if errors.Is(err, huff0.ErrIncompressible) {
+			// Try truncating least common.
+			for i, n := range remain[:] {
+				if n > 0 {
+					n = n / (div * (i + 1))
+					if n > 0 {
+						huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+					}
+				}
+			}
+			if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
+				huffBuff = append(huffBuff, 255)
+			}
+			if len(huffBuff) == 0 {
+				huffBuff = append(huffBuff, 0, 255)
+			}
+		}
+		if errors.Is(err, huff0.ErrUseRLE) {
+			for i, n := range remain[:] {
+				n = n / (div * (i + 1))
+				// Allow all entries to be represented.
+				if n == 0 {
+					n = 1
+				}
+				huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			}
+		}
+	}
+
+	var out bytes.Buffer
+	out.Write([]byte(dictMagic))
+	out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
+	out.Write(scratch.OutTable)
+	if debug {
+		println("huff table:", len(scratch.OutTable), "bytes")
+		println("of table:", len(ofTable), "bytes")
+		println("ml table:", len(mlTable), "bytes")
+		println("ll table:", len(llTable), "bytes")
+	}
+	out.Write(ofTable)
+	out.Write(mlTable)
+	out.Write(llTable)
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
+	out.Write(hist)
+	if debug {
+		_, err := loadDict(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		i, err := InspectDictionary(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		println("ID:", i.ID())
+		println("Content size:", i.ContentSize())
+		println("Encoder:", i.LitEncoder() != nil)
+		println("Offsets:", i.Offsets())
+		var totalSize int
+		for _, b := range contents {
+			totalSize += len(b)
+		}
+
+		encWith := func(opts ...EOption) int {
+			enc, err := NewWriter(nil, opts...)
+			if err != nil {
+				panic(err)
+			}
+			defer enc.Close()
+			var dst []byte
+			var totalSize int
+			for _, b := range contents {
+				dst = enc.EncodeAll(b, dst[:0])
+				totalSize += len(dst)
+			}
+			return totalSize
+		}
+		plain := encWith(WithEncoderLevel(o.Level))
+		withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
+		println("Input size:", totalSize)
+		println("Plain Compressed:", plain)
+		println("Dict Compressed:", withDict)
+		println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
+	}
+	return out.Bytes(), nil
+}

+ 1 - 0
vendor/github.com/klauspost/compress/zstd/enc_base.go

@@ -144,6 +144,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
 	} else {
 	} else {
 		e.crc.Reset()
 		e.crc.Reset()
 	}
 	}
+	e.blk.dictLitEnc = nil
 	if d != nil {
 	if d != nil {
 		low := e.lowMem
 		low := e.lowMem
 		if singleBlock {
 		if singleBlock {

+ 6 - 5
vendor/github.com/klauspost/compress/zstd/enc_best.go

@@ -197,12 +197,13 @@ encodeLoop:
 
 
 		// Set m to a match at offset if it looks like that will improve compression.
 		// Set m to a match at offset if it looks like that will improve compression.
 		improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
 		improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
-			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+			delta := s - offset
+			if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
 				return
 				return
 			}
 			}
 			if debugAsserts {
 			if debugAsserts {
-				if offset <= 0 {
-					panic(offset)
+				if offset >= s {
+					panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
 				}
 				}
 				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
 				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
 					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
 					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
@@ -343,8 +344,8 @@ encodeLoop:
 		if best.rep > 0 {
 		if best.rep > 0 {
 			var seq seq
 			var seq seq
 			seq.matchLen = uint32(best.length - zstdMinMatch)
 			seq.matchLen = uint32(best.length - zstdMinMatch)
-			if debugAsserts && s <= nextEmit {
-				panic("s <= nextEmit")
+			if debugAsserts && s < nextEmit {
+				panic("s < nextEmit")
 			}
 			}
 			addLiterals(&seq, best.s)
 			addLiterals(&seq, best.s)
 
 

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/enc_dfast.go

@@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
 			}
 			}
 		}
 		}
 		e.lastDictID = d.id
 		e.lastDictID = d.id
-		e.allDirty = true
+		allDirty = true
 	}
 	}
 	// Reset table to initial state
 	// Reset table to initial state
 	e.cur = e.maxMatchOff
 	e.cur = e.maxMatchOff

+ 5 - 12
vendor/github.com/klauspost/compress/zstd/enc_fast.go

@@ -133,8 +133,7 @@ encodeLoop:
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				// Consider history as well.
 				var seq seq
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 				seq.matchLen = uint32(length - zstdMinMatch)
 				seq.matchLen = uint32(length - zstdMinMatch)
 
 
 				// We might be able to match backwards.
 				// We might be able to match backwards.
@@ -645,8 +644,7 @@ encodeLoop:
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				// Consider history as well.
 				var seq seq
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 
 
 				seq.matchLen = uint32(length - zstdMinMatch)
 				seq.matchLen = uint32(length - zstdMinMatch)
 
 
@@ -831,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 		}
 		}
 		if true {
 		if true {
 			end := e.maxMatchOff + int32(len(d.content)) - 8
 			end := e.maxMatchOff + int32(len(d.content)) - 8
-			for i := e.maxMatchOff; i < end; i += 3 {
+			for i := e.maxMatchOff; i < end; i += 2 {
 				const hashLog = tableBits
 				const hashLog = tableBits
 
 
 				cv := load6432(d.content, i-e.maxMatchOff)
 				cv := load6432(d.content, i-e.maxMatchOff)
-				nextHash := hashLen(cv, hashLog, tableFastHashLen)      // 0 -> 5
-				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen)  // 1 -> 6
-				nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
+				nextHash := hashLen(cv, hashLog, tableFastHashLen)     // 0 -> 6
+				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
 				e.dictTable[nextHash] = tableEntry{
 				e.dictTable[nextHash] = tableEntry{
 					val:    uint32(cv),
 					val:    uint32(cv),
 					offset: i,
 					offset: i,
@@ -846,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 					val:    uint32(cv >> 8),
 					val:    uint32(cv >> 8),
 					offset: i + 1,
 					offset: i + 1,
 				}
 				}
-				e.dictTable[nextHash2] = tableEntry{
-					val:    uint32(cv >> 16),
-					offset: i + 2,
-				}
 			}
 			}
 		}
 		}
 		e.lastDictID = d.id
 		e.lastDictID = d.id

+ 4 - 9
vendor/github.com/klauspost/compress/zstd/encoder.go

@@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
 			DictID:        e.o.dict.ID(),
 			DictID:        e.o.dict.ID(),
 		}
 		}
 
 
-		dst, err := fh.appendTo(tmp[:0])
-		if err != nil {
-			return err
-		}
+		dst := fh.appendTo(tmp[:0])
 		s.headerWritten = true
 		s.headerWritten = true
 		s.wWg.Wait()
 		s.wWg.Wait()
 		var n2 int
 		var n2 int
@@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
 				Checksum: false,
 				Checksum: false,
 				DictID:   0,
 				DictID:   0,
 			}
 			}
-			dst, _ = fh.appendTo(dst)
+			dst = fh.appendTo(dst)
 
 
 			// Write raw block as last one only.
 			// Write raw block as last one only.
 			var blk blockHeader
 			var blk blockHeader
@@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
 	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
 	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
 		dst = make([]byte, 0, len(src))
 		dst = make([]byte, 0, len(src))
 	}
 	}
-	dst, err := fh.appendTo(dst)
-	if err != nil {
-		panic(err)
-	}
+	dst = fh.appendTo(dst)
 
 
 	// If we can do everything in one block, prefer that.
 	// If we can do everything in one block, prefer that.
 	if len(src) <= e.o.blockSize {
 	if len(src) <= e.o.blockSize {
@@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
 	// Add padding with content from crypto/rand.Reader
 	// Add padding with content from crypto/rand.Reader
 	if e.o.pad > 0 {
 	if e.o.pad > 0 {
 		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
 		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		var err error
 		dst, err = skippableFrame(dst, add, rand.Reader)
 		dst, err = skippableFrame(dst, add, rand.Reader)
 		if err != nil {
 		if err != nil {
 			panic(err)
 			panic(err)

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/encoder_options.go

@@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption {
 		}
 		}
 		// No need to waste our time.
 		// No need to waste our time.
 		if n == 1 {
 		if n == 1 {
-			o.pad = 0
+			n = 0
 		}
 		}
 		if n > 1<<30 {
 		if n > 1<<30 {
 			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
 			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")

+ 4 - 4
vendor/github.com/klauspost/compress/zstd/framedec.go

@@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error {
 		switch err {
 		switch err {
 		case io.EOF, io.ErrUnexpectedEOF:
 		case io.EOF, io.ErrUnexpectedEOF:
 			return io.EOF
 			return io.EOF
-		default:
-			return err
 		case nil:
 		case nil:
 			signature[0] = b[0]
 			signature[0] = b[0]
+		default:
+			return err
 		}
 		}
 		// Read the rest, don't allow io.ErrUnexpectedEOF
 		// Read the rest, don't allow io.ErrUnexpectedEOF
 		b, err = br.readSmall(3)
 		b, err = br.readSmall(3)
 		switch err {
 		switch err {
 		case io.EOF:
 		case io.EOF:
 			return io.EOF
 			return io.EOF
-		default:
-			return err
 		case nil:
 		case nil:
 			copy(signature[1:], b)
 			copy(signature[1:], b)
+		default:
+			return err
 		}
 		}
 
 
 		if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
 		if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {

+ 2 - 2
vendor/github.com/klauspost/compress/zstd/frameenc.go

@@ -22,7 +22,7 @@ type frameHeader struct {
 
 
 const maxHeaderSize = 14
 const maxHeaderSize = 14
 
 
-func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+func (f frameHeader) appendTo(dst []byte) []byte {
 	dst = append(dst, frameMagic...)
 	dst = append(dst, frameMagic...)
 	var fhd uint8
 	var fhd uint8
 	if f.Checksum {
 	if f.Checksum {
@@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
 	default:
 	default:
 		panic("invalid fcs")
 		panic("invalid fcs")
 	}
 	}
-	return dst, nil
+	return dst
 }
 }
 
 
 const skippableFrameHeader = 4 + 4
 const skippableFrameHeader = 4 + 4

+ 16 - 0
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go

@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+//	len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int

+ 68 - 0
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s

@@ -0,0 +1,68 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+// Requires: BMI
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+	MOVQ a_base+0(FP), AX
+	MOVQ b_base+24(FP), CX
+	MOVQ a_len+8(FP), DX
+
+	// matchLen
+	XORL SI, SI
+	CMPL DX, $0x08
+	JB   matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+	MOVQ  (AX)(SI*1), BX
+	XORQ  (CX)(SI*1), BX
+	TESTQ BX, BX
+	JZ    matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+	TZCNTQ BX, BX
+#else
+	BSFQ BX, BX
+#endif
+	SARQ $0x03, BX
+	LEAL (SI)(BX*1), SI
+	JMP  gen_match_len_end
+
+matchlen_loop_standalone:
+	LEAL -8(DX), DX
+	LEAL 8(SI), SI
+	CMPL DX, $0x08
+	JAE  matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+	CMPL DX, $0x04
+	JB   matchlen_match2_standalone
+	MOVL (AX)(SI*1), BX
+	CMPL (CX)(SI*1), BX
+	JNE  matchlen_match2_standalone
+	LEAL -4(DX), DX
+	LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+	CMPL DX, $0x02
+	JB   matchlen_match1_standalone
+	MOVW (AX)(SI*1), BX
+	CMPW (CX)(SI*1), BX
+	JNE  matchlen_match1_standalone
+	LEAL -2(DX), DX
+	LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+	CMPL DX, $0x01
+	JB   gen_match_len_end
+	MOVB (AX)(SI*1), BL
+	CMPB (CX)(SI*1), BL
+	JNE  gen_match_len_end
+	INCL SI
+
+gen_match_len_end:
+	MOVQ SI, ret+48(FP)
+	RET

+ 33 - 0
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go

@@ -0,0 +1,33 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"math/bits"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+	for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
+		diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+		if diff != 0 {
+			return n + bits.TrailingZeros64(diff)>>3
+		}
+		n += 8
+	}
+
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+
+}

+ 6 - 11
vendor/github.com/klauspost/compress/zstd/seqdec.go

@@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
 			return io.ErrUnexpectedEOF
 			return io.ErrUnexpectedEOF
 		}
 		}
 		var ll, mo, ml int
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
 
@@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
 
 
 	// extra bits are stored in reverse order.
 	// extra bits are stored in reverse order.
 	br.fill()
 	br.fill()
-	if s.maxBits <= 32 {
-		mo += br.getBits(moB)
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-	} else {
-		mo += br.getBits(moB)
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
 		br.fill()
 		br.fill()
-		// matchlength+literal length, max 32 bits
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-
 	}
 	}
+	// matchlength+literal length, max 32 bits
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
 	mo = s.adjustOffset(mo, ll, moB)
 	mo = s.adjustOffset(mo, ll, moB)
 	return
 	return
 }
 }

+ 64 - 64
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s

@@ -5,11 +5,11 @@
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 32(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    8(CX), SI
 	ADDQ    SI, AX
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
 	MOVQ    ctx+16(FP), AX
@@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
 	MOVQ R12, 152(AX)
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
-	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ DX, 24(AX)
+	MOVB BL, 32(AX)
+	MOVQ SI, 8(AX)
 
 
 	// Return success
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
 	MOVQ $0x00000000, ret+24(FP)
@@ -336,11 +336,11 @@ error_overread:
 // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 32(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    8(CX), SI
 	ADDQ    SI, AX
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
 	MOVQ    ctx+16(FP), AX
@@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
 	MOVQ R12, 152(AX)
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
-	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ DX, 24(AX)
+	MOVB BL, 32(AX)
+	MOVQ SI, 8(AX)
 
 
 	// Return success
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
 	MOVQ $0x00000000, ret+24(FP)
@@ -638,11 +638,11 @@ error_overread:
 // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 32(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    8(BX), BX
 	ADDQ    BX, CX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
 	MOVQ    ctx+16(FP), CX
@@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
 	MOVQ R11, 152(CX)
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
-	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ AX, 24(CX)
+	MOVB DL, 32(CX)
+	MOVQ BX, 8(CX)
 
 
 	// Return success
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
 	MOVQ $0x00000000, ret+24(FP)
@@ -927,11 +927,11 @@ error_overread:
 // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 32(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    8(BX), BX
 	ADDQ    BX, CX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
 	MOVQ    ctx+16(FP), CX
@@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
 	MOVQ R11, 152(CX)
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
-	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ AX, 24(CX)
+	MOVB DL, 32(CX)
+	MOVQ BX, 8(CX)
 
 
 	// Return success
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
 	MOVQ $0x00000000, ret+24(FP)
@@ -1797,11 +1797,11 @@ empty_seqs:
 // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 32(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    8(CX), SI
 	ADDQ    SI, AX
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
 	MOVQ    ctx+16(FP), AX
@@ -2295,9 +2295,9 @@ handle_loop:
 
 
 loop_finished:
 loop_finished:
 	MOVQ br+8(FP), AX
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
-	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ DX, 24(AX)
+	MOVB BL, 32(AX)
+	MOVQ SI, 8(AX)
 
 
 	// Update the context
 	// Update the context
 	MOVQ ctx+16(FP), AX
 	MOVQ ctx+16(FP), AX
@@ -2362,11 +2362,11 @@ error_not_enough_space:
 // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 32(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    8(BX), BX
 	ADDQ    BX, CX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
 	MOVQ    ctx+16(FP), CX
@@ -2818,9 +2818,9 @@ handle_loop:
 
 
 loop_finished:
 loop_finished:
 	MOVQ br+8(FP), CX
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
-	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ AX, 24(CX)
+	MOVB DL, 32(CX)
+	MOVQ BX, 8(CX)
 
 
 	// Update the context
 	// Update the context
 	MOVQ ctx+16(FP), AX
 	MOVQ ctx+16(FP), AX
@@ -2885,11 +2885,11 @@ error_not_enough_space:
 // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 32(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    8(CX), SI
 	ADDQ    SI, AX
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
 	MOVQ    ctx+16(FP), AX
@@ -3485,9 +3485,9 @@ handle_loop:
 
 
 loop_finished:
 loop_finished:
 	MOVQ br+8(FP), AX
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
-	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ DX, 24(AX)
+	MOVB BL, 32(AX)
+	MOVQ SI, 8(AX)
 
 
 	// Update the context
 	// Update the context
 	MOVQ ctx+16(FP), AX
 	MOVQ ctx+16(FP), AX
@@ -3552,11 +3552,11 @@ error_not_enough_space:
 // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 32(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    8(BX), BX
 	ADDQ    BX, CX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
 	MOVQ    ctx+16(FP), CX
@@ -4110,9 +4110,9 @@ handle_loop:
 
 
 loop_finished:
 loop_finished:
 	MOVQ br+8(FP), CX
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
-	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ AX, 24(CX)
+	MOVB DL, 32(CX)
+	MOVQ BX, 8(CX)
 
 
 	// Update the context
 	// Update the context
 	MOVQ ctx+16(FP), AX
 	MOVQ ctx+16(FP), AX

+ 1 - 1
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go

@@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 	}
 	}
 	for i := range seqs {
 	for i := range seqs {
 		var ll, mo, ml int
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
 

+ 2 - 3
vendor/github.com/klauspost/compress/zstd/snappy.go

@@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
 	var written int64
 	var written int64
 	var readHeader bool
 	var readHeader bool
 	{
 	{
-		var header []byte
-		var n int
-		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+		header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
 
 
+		var n int
 		n, r.err = w.Write(header)
 		n, r.err = w.Write(header)
 		if r.err != nil {
 		if r.err != nil {
 			return written, r.err
 			return written, r.err

+ 0 - 22
vendor/github.com/klauspost/compress/zstd/zstd.go

@@ -9,7 +9,6 @@ import (
 	"errors"
 	"errors"
 	"log"
 	"log"
 	"math"
 	"math"
-	"math/bits"
 )
 )
 
 
 // enable debug printing
 // enable debug printing
@@ -106,27 +105,6 @@ func printf(format string, a ...interface{}) {
 	}
 	}
 }
 }
 
 
-// matchLen returns the maximum common prefix length of a and b.
-// a must be the shortest of the two.
-func matchLen(a, b []byte) (n int) {
-	for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
-		diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
-		if diff != 0 {
-			return n + bits.TrailingZeros64(diff)>>3
-		}
-		n += 8
-	}
-
-	for i := range a {
-		if a[i] != b[i] {
-			break
-		}
-		n++
-	}
-	return n
-
-}
-
 func load3232(b []byte, i int32) uint32 {
 func load3232(b []byte, i int32) uint32 {
 	return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
 	return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
 }
 }

+ 1 - 1
vendor/modules.txt

@@ -640,7 +640,7 @@ github.com/ishidawataru/sctp
 # github.com/jmoiron/sqlx v1.3.3
 # github.com/jmoiron/sqlx v1.3.3
 ## explicit; go 1.10
 ## explicit; go 1.10
 github.com/jmoiron/sqlx/types
 github.com/jmoiron/sqlx/types
-# github.com/klauspost/compress v1.16.5
+# github.com/klauspost/compress v1.17.2
 ## explicit; go 1.18
 ## explicit; go 1.18
 github.com/klauspost/compress
 github.com/klauspost/compress
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/fse