Ver Fonte

Merge pull request #44527 from thaJeztah/update_buildkit_deps1

vendor: update various dependencies in preparation of BuildKit update
Sebastiaan van Stijn há 2 anos atrás
pai
commit
3910417695
53 ficheiros alterados com 2176 adições e 622 exclusões
  1. 7 7
      vendor.mod
  2. 16 13
      vendor.sum
  3. 4 0
      vendor/github.com/go-logr/logr/README.md
  4. 32 4
      vendor/github.com/go-logr/logr/funcr/funcr.go
  5. 9 0
      vendor/github.com/go-logr/logr/logr.go
  6. 28 3
      vendor/github.com/klauspost/compress/README.md
  7. 22 14
      vendor/github.com/klauspost/compress/huff0/decompress.go
  8. 4 0
      vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
  9. 0 1
      vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
  10. 11 7
      vendor/github.com/klauspost/compress/huff0/decompress_generic.go
  11. 5 1
      vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
  12. 2 0
      vendor/github.com/klauspost/compress/zstd/README.md
  13. 2 3
      vendor/github.com/klauspost/compress/zstd/blockdec.go
  14. 1 2
      vendor/github.com/klauspost/compress/zstd/bytebuf.go
  15. 35 9
      vendor/github.com/klauspost/compress/zstd/decoder.go
  16. 35 9
      vendor/github.com/klauspost/compress/zstd/decoder_options.go
  17. 1 0
      vendor/github.com/klauspost/compress/zstd/enc_best.go
  18. 16 7
      vendor/github.com/klauspost/compress/zstd/enc_better.go
  19. 5 2
      vendor/github.com/klauspost/compress/zstd/enc_dfast.go
  20. 5 3
      vendor/github.com/klauspost/compress/zstd/enc_fast.go
  21. 29 7
      vendor/github.com/klauspost/compress/zstd/framedec.go
  22. 2 1
      vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
  23. 0 1
      vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
  24. 9 12
      vendor/github.com/klauspost/compress/zstd/history.go
  25. 20 2
      vendor/github.com/klauspost/compress/zstd/seqdec.go
  26. 14 3
      vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
  27. 0 1
      vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
  28. 2 2
      vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
  29. 7 4
      vendor/github.com/prometheus/client_golang/prometheus/counter.go
  30. 59 48
      vendor/github.com/prometheus/client_golang/prometheus/doc.go
  31. 4 2
      vendor/github.com/prometheus/client_golang/prometheus/gauge.go
  32. 896 82
      vendor/github.com/prometheus/client_golang/prometheus/histogram.go
  33. 60 0
      vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
  34. 8 5
      vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
  35. 2 1
      vendor/github.com/prometheus/client_golang/prometheus/labels.go
  36. 1 1
      vendor/github.com/prometheus/client_golang/prometheus/metric.go
  37. 2 3
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
  38. 14 10
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
  39. 31 3
      vendor/github.com/prometheus/client_golang/prometheus/registry.go
  40. 6 3
      vendor/github.com/prometheus/client_golang/prometheus/summary.go
  41. 6 5
      vendor/github.com/prometheus/client_golang/prometheus/timer.go
  42. 262 71
      vendor/github.com/prometheus/client_model/go/metrics.pb.go
  43. 25 28
      vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
  44. 34 32
      vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
  45. 34 32
      vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
  46. 34 32
      vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
  47. 1 1
      vendor/golang.org/x/crypto/ssh/messages.go
  48. 18 0
      vendor/golang.org/x/net/http2/headermap.go
  49. 188 0
      vendor/golang.org/x/net/http2/hpack/static_table.go
  50. 1 77
      vendor/golang.org/x/net/http2/hpack/tables.go
  51. 133 51
      vendor/golang.org/x/net/http2/server.go
  52. 26 9
      vendor/golang.org/x/net/http2/transport.go
  53. 8 8
      vendor/modules.txt

+ 7 - 7
vendor.mod

@@ -48,7 +48,7 @@ require (
 	github.com/hashicorp/serf v0.8.5
 	github.com/imdario/mergo v0.3.12
 	github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
-	github.com/klauspost/compress v1.15.9
+	github.com/klauspost/compress v1.15.12
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 	github.com/moby/buildkit v0.10.6
@@ -66,12 +66,12 @@ require (
 	github.com/morikuni/aec v1.0.0
 	github.com/opencontainers/go-digest v1.0.0
 	github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
-	github.com/opencontainers/runc v1.1.2
+	github.com/opencontainers/runc v1.1.3
 	github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
 	github.com/opencontainers/selinux v1.10.2
 	github.com/pelletier/go-toml v1.9.4
 	github.com/pkg/errors v0.9.1
-	github.com/prometheus/client_golang v1.13.0
+	github.com/prometheus/client_golang v1.14.0
 	github.com/rootless-containers/rootlesskit v1.1.0
 	github.com/sirupsen/logrus v1.9.0
 	github.com/spf13/cobra v1.6.1
@@ -82,7 +82,7 @@ require (
 	github.com/vishvananda/netlink v1.2.1-beta.2
 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
 	go.etcd.io/bbolt v1.3.6
-	golang.org/x/net v0.1.0
+	golang.org/x/net v0.2.0
 	golang.org/x/sync v0.1.0
 	golang.org/x/sys v0.2.0
 	golang.org/x/time v0.1.0
@@ -110,7 +110,7 @@ require (
 	github.com/felixge/httpsnoop v1.0.2 // indirect
 	github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
 	github.com/fsnotify/fsnotify v1.5.1 // indirect
-	github.com/go-logr/logr v1.2.2 // indirect
+	github.com/go-logr/logr v1.2.3 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/gogo/googleapis v1.4.1 // indirect
@@ -134,7 +134,7 @@ require (
 	github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
 	github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
 	github.com/philhofer/fwd v1.0.0 // indirect
-	github.com/prometheus/client_model v0.2.0 // indirect
+	github.com/prometheus/client_model v0.3.0 // indirect
 	github.com/prometheus/common v0.37.0 // indirect
 	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/rexray/gocsi v1.2.2 // indirect
@@ -159,7 +159,7 @@ require (
 	go.uber.org/atomic v1.9.0 // indirect
 	go.uber.org/multierr v1.8.0 // indirect
 	go.uber.org/zap v1.21.0 // indirect
-	golang.org/x/crypto v0.1.0 // indirect
+	golang.org/x/crypto v0.2.0 // indirect
 	golang.org/x/oauth2 v0.1.0 // indirect
 	golang.org/x/text v0.4.0 // indirect
 	google.golang.org/api v0.93.0 // indirect

+ 16 - 13
vendor.sum

@@ -470,8 +470,9 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
 github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
 github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
 github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
@@ -733,8 +734,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
-github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
+github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -885,8 +886,8 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
 github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
 github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
 github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
-github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw=
-github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
+github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
 github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -935,14 +936,15 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
-github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
 github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -992,6 +994,7 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
 github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
 github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
 github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -1216,8 +1219,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP
 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
+golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1319,8 +1322,8 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
 golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1488,7 +1491,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
 golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
+golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

+ 4 - 0
vendor/github.com/go-logr/logr/README.md

@@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated.
 There are implementations for the following logging libraries:
 
 - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
 - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
 - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
 - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
 - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
 - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
 - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
 - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
 - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
 
 ## FAQ
 

+ 32 - 4
vendor/github.com/go-logr/logr/funcr/funcr.go

@@ -351,15 +351,15 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
 	if v, ok := value.(logr.Marshaler); ok {
 		// Replace the value with what the type wants to get logged.
 		// That then gets handled below via reflection.
-		value = v.MarshalLog()
+		value = invokeMarshaler(v)
 	}
 
 	// Handle types that want to format themselves.
 	switch v := value.(type) {
 	case fmt.Stringer:
-		value = v.String()
+		value = invokeStringer(v)
 	case error:
-		value = v.Error()
+		value = invokeError(v)
 	}
 
 	// Handling the most common types without reflect is a small perf win.
@@ -408,8 +408,9 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
 			if i > 0 {
 				buf.WriteByte(',')
 			}
+			k, _ := v[i].(string) // sanitize() above means no need to check success
 			// arbitrary keys might need escaping
-			buf.WriteString(prettyString(v[i].(string)))
+			buf.WriteString(prettyString(k))
 			buf.WriteByte(':')
 			buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
 		}
@@ -596,6 +597,33 @@ func isEmpty(v reflect.Value) bool {
 	return false
 }
 
+func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return s.String()
+}
+
+func invokeError(e error) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return e.Error()
+}
+
 // Caller represents the original call site for a log line, after considering
 // logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper.  The File and
 // Line fields will always be provided, while the Func field is optional.

+ 9 - 0
vendor/github.com/go-logr/logr/logr.go

@@ -115,6 +115,15 @@ limitations under the License.
 // may be any Go value, but how the value is formatted is determined by the
 // LogSink implementation.
 //
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// Calling methods with the null logger (Logger{}) as instance will crash
+// because it has no LogSink. Therefore this null logger should never be passed
+// around. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
 // Key Naming Conventions
 //
 // Keys are not strictly required to conform to any specification or regex, but

+ 28 - 3
vendor/github.com/klauspost/compress/README.md

@@ -17,6 +17,30 @@ This package provides various compression algorithms.
 
 # changelog
 
+* Sept 26, 2022 (v1.15.11)
+
+	* flate: Improve level 1-3 compression  https://github.com/klauspost/compress/pull/678
+	* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
+	* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
+	* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
+
+* Sept 16, 2022 (v1.15.10)
+
+	* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+	* Add Go 1.19 - deprecate Go 1.16  https://github.com/klauspost/compress/pull/651
+	* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+	* zstd: Improve "better" compresssion  https://github.com/klauspost/compress/pull/657
+	* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+	* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+	* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+	* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+	* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+	* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+	* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
 * July 13, 2022 (v1.15.8)
 
 	* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
@@ -91,15 +115,15 @@ This package provides various compression algorithms.
 	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
 	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
 
-<details>
-	<summary>See  Details</summary>
 Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
 
 Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
 
 While the release has been extensively tested, it is recommended to testing when upgrading.
-</details>
 
+<details>
+	<summary>See changes to v1.14.x</summary>
+	
 * Feb 22, 2022 (v1.14.4)
 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
@@ -125,6 +149,7 @@ While the release has been extensively tested, it is recommended to testing when
 	* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
 	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
 	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+</details>
 
 <details>
 	<summary>See changes to v1.13.x</summary>

+ 22 - 14
vendor/github.com/klauspost/compress/huff0/decompress.go

@@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
@@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			// copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {

+ 4 - 0
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go

@@ -14,12 +14,14 @@ import (
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog > 8.
+//
 //go:noescape
 func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 
 // decompress4x_8b_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog <= 8 which decodes 4 entries
 // per loop.
+//
 //go:noescape
 func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 
@@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 
 // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 

+ 0 - 1
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
 
 //go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
 
 // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_main_loop_amd64(SB), $0-8

+ 11 - 7
vendor/github.com/klauspost/compress/huff0/decompress_generic.go

@@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			//copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {

+ 5 - 1
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go

@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
 // emitLiteral writes a literal chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= len(lit) && len(lit) <= 65536
 func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
 // emitCopy writes a copy chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= offset && offset <= 65535
 //	4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
 // src[i:i+k-j] and src[j:k] have the same contents.
 //
 // It assumes that:
+//
 //	0 <= i && i < j && j <= len(src)
 func extendMatch(src []byte, i, j int) int {
 	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
 // been written.
 //
 // It also assumes that:
+//
 //	len(dst) >= MaxEncodedLen(len(src)) &&
-// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+//	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
 func encodeBlock(dst, src []byte) (d int) {
 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
 	// The table element type is uint16, as s < sLimit and sLimit < len(src)

+ 2 - 0
vendor/github.com/klauspost/compress/zstd/README.md

@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
 
 Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
 
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
 ## Installation
 
 Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.

+ 2 - 3
vendor/github.com/klauspost/compress/zstd/blockdec.go

@@ -10,7 +10,6 @@ import (
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sync"
@@ -233,7 +232,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
 			if b.lowMem {
 				b.dst = make([]byte, b.RLESize)
 			} else {
-				b.dst = make([]byte, maxBlockSize)
+				b.dst = make([]byte, maxCompressedBlockSize)
 			}
 		}
 		b.dst = b.dst[:b.RLESize]
@@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
 		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
 		buf.Write(in)
-		ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+		os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
 	}
 
 	return nil

+ 1 - 2
vendor/github.com/klauspost/compress/zstd/bytebuf.go

@@ -7,7 +7,6 @@ package zstd
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 )
 
 type byteBuffer interface {
@@ -124,7 +123,7 @@ func (r *readerWrapper) readByte() (byte, error) {
 }
 
 func (r *readerWrapper) skipN(n int64) error {
-	n2, err := io.CopyN(ioutil.Discard, r.r, n)
+	n2, err := io.CopyN(io.Discard, r.r, n)
 	if n2 != n {
 		err = io.ErrUnexpectedEOF
 	}

+ 35 - 9
vendor/github.com/klauspost/compress/zstd/decoder.go

@@ -35,6 +35,7 @@ type Decoder struct {
 		br           readerWrapper
 		enabled      bool
 		inFrame      bool
+		dstBuf       []byte
 	}
 
 	frame *frameDec
@@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
 	}
 
 	// If bytes buffer and < 5MB, do sync decoding anyway.
-	if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+	if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
 		bb2 := bb
 		if debugDecoder {
 			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
 		}
 		b := bb2.Bytes()
 		var dst []byte
-		if cap(d.current.b) > 0 {
-			dst = d.current.b
+		if cap(d.syncStream.dstBuf) > 0 {
+			dst = d.syncStream.dstBuf[:0]
 		}
 
-		dst, err := d.DecodeAll(b, dst[:0])
+		dst, err := d.DecodeAll(b, dst)
 		if err == nil {
 			err = io.EOF
 		}
+		// Save output buffer
+		d.syncStream.dstBuf = dst
 		d.current.b = dst
 		d.current.err = err
 		d.current.flushed = true
@@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error {
 	d.current.err = nil
 	d.current.flushed = false
 	d.current.d = nil
+	d.syncStream.dstBuf = nil
 
 	// Ensure no-one else is still running...
 	d.streamWg.Wait()
@@ -312,6 +316,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 	// Grab a block decoder and frame decoder.
 	block := <-d.decoders
 	frame := block.localFrame
+	initialSize := len(dst)
 	defer func() {
 		if debugDecoder {
 			printf("re-adding decoder: %p", block)
@@ -354,7 +359,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 			return dst, ErrWindowSizeExceeded
 		}
 		if frame.FrameContentSize != fcsUnknown {
-			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+				}
+				return dst, ErrDecoderSizeExceeded
+			}
+			if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+				}
 				return dst, ErrDecoderSizeExceeded
 			}
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +378,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 			}
 		}
 
-		if cap(dst) == 0 {
+		if cap(dst) == 0 && !d.o.limitToCap {
 			// Allocate len(input) * 2 by default if nothing is provided
 			// and we didn't get frame content size.
 			size := len(input) * 2
@@ -382,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 		if err != nil {
 			return dst, err
 		}
+		if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+			return dst, ErrDecoderSizeExceeded
+		}
 		if len(frame.bBuf) == 0 {
 			if debugDecoder {
 				println("frame dbuf empty")
@@ -667,6 +684,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 				if debugDecoder {
 					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 				}
+				hist.reset()
 				hist.decoders = block.async.newHist.decoders
 				hist.recentOffsets = block.async.newHist.recentOffsets
 				hist.windowSize = block.async.newHist.windowSize
@@ -698,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 			seqExecute <- block
 		}
 		close(seqExecute)
+		hist.reset()
 	}()
 
 	var wg sync.WaitGroup
@@ -721,6 +740,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 				if debugDecoder {
 					println("Async 2: new history")
 				}
+				hist.reset()
 				hist.windowSize = block.async.newHist.windowSize
 				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 				if block.async.newHist.dict != nil {
@@ -750,7 +770,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 					if block.lowMem {
 						block.dst = make([]byte, block.RLESize)
 					} else {
-						block.dst = make([]byte, maxBlockSize)
+						block.dst = make([]byte, maxCompressedBlockSize)
 					}
 				}
 				block.dst = block.dst[:block.RLESize]
@@ -802,13 +822,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 		if debugDecoder {
 			println("decoder goroutines finished")
 		}
+		hist.reset()
 	}()
 
+	var hist history
 decodeStream:
 	for {
-		var hist history
 		var hasErr bool
-
+		hist.reset()
 		decodeBlock := func(block *blockDec) {
 			if hasErr {
 				if block != nil {
@@ -852,6 +873,10 @@ decodeStream:
 			}
 		}
 		if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+			if debugDecoder {
+				println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+			}
+
 			err = ErrDecoderSizeExceeded
 		}
 		if err != nil {
@@ -920,5 +945,6 @@ decodeStream:
 	}
 	close(seqDecode)
 	wg.Wait()
+	hist.reset()
 	d.frame.history.b = frameHistCache
 }

+ 35 - 9
vendor/github.com/klauspost/compress/zstd/decoder_options.go

@@ -14,20 +14,23 @@ type DOption func(*decoderOptions) error
 
 // options retains accumulated state of multiple options.
 type decoderOptions struct {
-	lowMem         bool
-	concurrent     int
-	maxDecodedSize uint64
-	maxWindowSize  uint64
-	dicts          []dict
-	ignoreChecksum bool
+	lowMem          bool
+	concurrent      int
+	maxDecodedSize  uint64
+	maxWindowSize   uint64
+	dicts           []dict
+	ignoreChecksum  bool
+	limitToCap      bool
+	decodeBufsBelow int
 }
 
 func (o *decoderOptions) setDefault() {
 	*o = decoderOptions{
 		// use less ram: true for now, but may change.
-		lowMem:        true,
-		concurrent:    runtime.GOMAXPROCS(0),
-		maxWindowSize: MaxWindowSize,
+		lowMem:          true,
+		concurrent:      runtime.GOMAXPROCS(0),
+		maxWindowSize:   MaxWindowSize,
+		decodeBufsBelow: 128 << 10,
 	}
 	if o.concurrent > 4 {
 		o.concurrent = 4
@@ -114,6 +117,29 @@ func WithDecoderMaxWindow(size uint64) DOption {
 	}
 }
 
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+	return func(o *decoderOptions) error {
+		o.limitToCap = b
+		return nil
+	}
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+	return func(o *decoderOptions) error {
+		o.decodeBufsBelow = size
+		return nil
+	}
+}
+
 // IgnoreChecksum allows to forcibly ignore checksum checking.
 func IgnoreChecksum(b bool) DOption {
 	return func(o *decoderOptions) error {

+ 1 - 0
vendor/github.com/klauspost/compress/zstd/enc_best.go

@@ -32,6 +32,7 @@ type match struct {
 	length int32
 	rep    int32
 	est    int32
+	_      [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
 }
 
 const highScore = 25000

+ 16 - 7
vendor/github.com/klauspost/compress/zstd/enc_better.go

@@ -416,15 +416,23 @@ encodeLoop:
 
 		// Try to find a better match by searching for a long match at the end of the current best match
 		if s+matched < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is around 3 bytes, but depends on input.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 3
+
 			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
-			cv := load3232(src, s)
+			s2 := s + skipBeginning
+			cv := load3232(src, s2)
 			candidateL := e.longTable[nextHashL]
-			coffsetL := candidateL.offset - e.cur - matched
-			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+			coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+			if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 				// Found a long match, at least 4 bytes.
-				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 				if matchedNext > matched {
 					t = coffsetL
+					s = s2
 					matched = matchedNext
 					if debugMatches {
 						println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
 
 			// Check prev long...
 			if true {
-				coffsetL = candidateL.prev - e.cur - matched
-				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+				if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 					// Found a long match, at least 4 bytes.
-					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 					if matchedNext > matched {
 						t = coffsetL
+						s = s2
 						matched = matchedNext
 						if debugMatches {
 							println("prev long match at end-of-match")

+ 5 - 2
vendor/github.com/klauspost/compress/zstd/enc_dfast.go

@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
 	}
 
 	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
-		copy(e.longTable[:], e.dictLongTable)
+		//copy(e.longTable[:], e.dictLongTable)
+		e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
 		for i := range e.longTableShardDirty {
 			e.longTableShardDirty[i] = false
 		}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
 			continue
 		}
 
-		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
 		e.longTableShardDirty[i] = false
 	}
 }

+ 5 - 3
vendor/github.com/klauspost/compress/zstd/enc_fast.go

@@ -304,7 +304,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
 		minNonLiteralBlockSize = 1 + 1 + inputMargin
 	)
 	if debugEncoder {
-		if len(src) > maxBlockSize {
+		if len(src) > maxCompressedBlockSize {
 			panic("src too big")
 		}
 	}
@@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 	const shardCnt = tableShardCnt
 	const shardSize = tableShardSize
 	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
-		copy(e.table[:], e.dictTable)
+		//copy(e.table[:], e.dictTable)
+		e.table = *(*[tableSize]tableEntry)(e.dictTable)
 		for i := range e.tableShardDirty {
 			e.tableShardDirty[i] = false
 		}
@@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
 			continue
 		}
 
-		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
 		e.tableShardDirty[i] = false
 	}
 	e.allDirty = false

+ 29 - 7
vendor/github.com/klauspost/compress/zstd/framedec.go

@@ -261,11 +261,16 @@ func (d *frameDec) reset(br byteBuffer) error {
 	}
 	d.history.windowSize = int(d.WindowSize)
 	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
-		// Alloc 2x window size if not low-mem, or very small window size.
+		// Alloc 2x window size if not low-mem, or window size below 2MB.
 		d.history.allocFrameBuffer = d.history.windowSize * 2
 	} else {
-		// Alloc with one additional block
-		d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		if d.o.lowMem {
+			// Alloc with 1MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+		} else {
+			// Alloc with 2MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		}
 	}
 
 	if debugDecoder {
@@ -343,7 +348,7 @@ func (d *frameDec) consumeCRC() error {
 	return nil
 }
 
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
 func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	saved := d.history.b
 
@@ -353,12 +358,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	// Store input length, so we only check new data.
 	crcStart := len(dst)
 	d.history.decoders.maxSyncLen = 0
+	if d.o.limitToCap {
+		d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+	}
 	if d.FrameContentSize != fcsUnknown {
-		d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+			d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		}
 		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+			if debugDecoder {
+				println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+			}
 			return dst, ErrDecoderSizeExceeded
 		}
-		if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+		if debugDecoder {
+			println("maxSyncLen:", d.history.decoders.maxSyncLen)
+		}
+		if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
 			// Alloc for output
 			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 			copy(dst2, dst)
@@ -378,7 +394,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 		if err != nil {
 			break
 		}
-		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+		if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+			println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.o.limitToCap && len(d.history.b) > cap(dst) {
+			println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
 			err = ErrDecoderSizeExceeded
 			break
 		}

+ 2 - 1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go

@@ -21,7 +21,8 @@ type buildDtableAsmContext struct {
 
 // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 // Function returns non-zero exit code on error.
-// go:noescape
+//
+//go:noescape
 func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 
 // please keep in sync with _generate/gen_fse.go

+ 0 - 1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 TEXT ·buildDtable_asm(SB), $0-24

+ 9 - 12
vendor/github.com/klauspost/compress/zstd/history.go

@@ -37,24 +37,21 @@ func (h *history) reset() {
 	h.ignoreBuffer = 0
 	h.error = false
 	h.recentOffsets = [3]int{1, 4, 8}
-	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
+	h.decoders.freeDecoders()
 	h.decoders = sequenceDecs{br: h.decoders.br}
+	h.freeHuffDecoder()
+	h.huffTree = nil
+	h.dict = nil
+	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
 	if h.huffTree != nil {
 		if h.dict == nil || h.dict.litEnc != h.huffTree {
 			huffDecoderPool.Put(h.huffTree)
+			h.huffTree = nil
 		}
 	}
-	h.huffTree = nil
-	h.dict = nil
-	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
 }
 
 func (h *history) setDict(dict *dict) {

+ 20 - 2
vendor/github.com/klauspost/compress/zstd/seqdec.go

@@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
 	return nil
 }
 
+func (s *sequenceDecs) freeDecoders() {
+	if f := s.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.litLengths.fse = nil
+	}
+	if f := s.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.offsets.fse = nil
+	}
+	if f := s.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.matchLengths.fse = nil
+	}
+}
+
 // execute will execute the decoded sequence with the provided history.
 // The sequence must be evaluated before being sent.
 func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
 		}
 		size := ll + ml + len(out)
 		if size-startSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+			if size-startSize == 424242 {
+				panic("here")
+			}
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		if size > cap(out) {
 			// Not enough size, which can happen under high volume block streaming conditions
@@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
 
 	// Check if space for literals
 	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 
 	// Add final literals

+ 14 - 3
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go

@@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
 // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
@@ -135,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 		if debugDecoder {
 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 		}
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 
 	default:
 		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
@@ -143,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
 	}
 	err := br.close()
 	if err != nil {
@@ -201,20 +206,24 @@ const errorNotEnoughSpace = 5
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
@@ -281,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
@@ -308,10 +317,12 @@ type executeAsmContext struct {
 // Returns false if a match offset is too big.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 
 // Same as above, but with safe memcopies
+//
 //go:noescape
 func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 

+ 0 - 1
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s

@@ -1,7 +1,6 @@
 // Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV

+ 2 - 2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go

@@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 		}
 		s.seqSize += ll + ml
 		if s.seqSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		litRemain -= ll
 		if litRemain < 0 {
@@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
 	}
 	s.seqSize += litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {

+ 7 - 4
vendor/github.com/prometheus/client_golang/prometheus/counter.go

@@ -140,12 +140,13 @@ func (c *counter) get() float64 {
 }
 
 func (c *counter) Write(out *dto.Metric) error {
-	val := c.get()
-
+	// Read the Exemplar first and the value second. This is to avoid a race condition
+	// where users see an exemplar for a not-yet-existing observation.
 	var exemplar *dto.Exemplar
 	if e := c.exemplar.Load(); e != nil {
 		exemplar = e.(*dto.Exemplar)
 	}
+	val := c.get()
 
 	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
 }
@@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
 	c, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *CounterVec) With(labels Labels) Counter {
 	c, err := v.GetMetricWith(labels)
 	if err != nil {

+ 59 - 48
vendor/github.com/prometheus/client_golang/prometheus/doc.go

@@ -21,55 +21,66 @@
 // All exported functions and methods are safe to be used concurrently unless
 // specified otherwise.
 //
-// A Basic Example
+// # A Basic Example
 //
 // As a starting point, a very basic usage example:
 //
-//    package main
-//
-//    import (
-//    	"log"
-//    	"net/http"
-//
-//    	"github.com/prometheus/client_golang/prometheus"
-//    	"github.com/prometheus/client_golang/prometheus/promhttp"
-//    )
-//
-//    var (
-//    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-//    		Name: "cpu_temperature_celsius",
-//    		Help: "Current temperature of the CPU.",
-//    	})
-//    	hdFailures = prometheus.NewCounterVec(
-//    		prometheus.CounterOpts{
-//    			Name: "hd_errors_total",
-//    			Help: "Number of hard-disk errors.",
-//    		},
-//    		[]string{"device"},
-//    	)
-//    )
-//
-//    func init() {
-//    	// Metrics have to be registered to be exposed:
-//    	prometheus.MustRegister(cpuTemp)
-//    	prometheus.MustRegister(hdFailures)
-//    }
-//
-//    func main() {
-//    	cpuTemp.Set(65.3)
-//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-//    	// The Handler function provides a default handler to expose metrics
-//    	// via an HTTP server. "/metrics" is the usual endpoint for that.
-//    	http.Handle("/metrics", promhttp.Handler())
-//    	log.Fatal(http.ListenAndServe(":8080", nil))
-//    }
-//
+//	package main
+//
+//	import (
+//		"log"
+//		"net/http"
+//
+//		"github.com/prometheus/client_golang/prometheus"
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
+//	)
+//
+//	type metrics struct {
+//		cpuTemp  prometheus.Gauge
+//	  hdFailures *prometheus.CounterVec
+//	}
+//
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
+//	  m := &metrics{
+//	    cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+//	      Name: "cpu_temperature_celsius",
+//	      Help: "Current temperature of the CPU.",
+//	    }),
+//	    hdFailures: prometheus.NewCounterVec(
+//	      prometheus.CounterOpts{
+//	        Name: "hd_errors_total",
+//	        Help: "Number of hard-disk errors.",
+//	      },
+//	      []string{"device"},
+//	    ),
+//	  }
+//	  reg.MustRegister(m.cpuTemp)
+//	  reg.MustRegister(m.hdFailures)
+//	  return m
+//	}
+//
+//	func main() {
+//	  // Create a non-global registry.
+//	  reg := prometheus.NewRegistry()
+//
+//	  // Create new metrics and register them using the custom registry.
+//	  m := NewMetrics(reg)
+//	  // Set values for the new created metrics.
+//		m.cpuTemp.Set(65.3)
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+//		// Expose metrics and custom registry via an HTTP server
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+//		log.Fatal(http.ListenAndServe(":8080", nil))
+//	}
 //
 // This is a complete program that exports two metrics, a Gauge and a Counter,
 // the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
 //
-// Metrics
+// # Metrics
 //
 // The number of exported identifiers in this package might appear a bit
 // overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
 // To create instances of Metrics and their vector versions, you need a suitable
 // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
 //
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
 //
 // While you could create your own implementations of Metric, most likely you
 // will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
 // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
 // shortcuts.
 //
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
 //
 // While MustRegister is the by far most common way of registering a Collector,
 // sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
 // NewProcessCollector). With a custom registry, you are in control and decide
 // yourself about the Collectors to register.
 //
-// HTTP Exposition
+// # HTTP Exposition
 //
 // The Registry implements the Gatherer interface. The caller of the Gather
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // are served via HTTP on the /metrics endpoint. That's happening in the example
 // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
 //
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
 //
 // Function for pushing to the Pushgateway can be found in the push sub-package.
 //
-// Graphite Bridge
+// # Graphite Bridge
 //
 // Functions and examples to push metrics from a Gatherer to Graphite can be
 // found in the graphite sub-package.
 //
-// Other Means of Exposition
+// # Other Means of Exposition
 //
 // More ways of exposing metrics can easily be added by following the approaches
 // of the existing implementations.

+ 4 - 2
vendor/github.com/prometheus/client_golang/prometheus/gauge.go

@@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
 	g, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *GaugeVec) With(labels Labels) Gauge {
 	g, err := v.GetMetricWith(labels)
 	if err != nil {

+ 896 - 82
vendor/github.com/prometheus/client_golang/prometheus/histogram.go

@@ -28,19 +28,216 @@ import (
 	dto "github.com/prometheus/client_model/go"
 )
 
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// 	// Populate nativeHistogramBounds.
+// 	numBuckets := 1
+// 	for i := range nativeHistogramBounds {
+// 		bounds := []float64{0.5}
+// 		factor := math.Exp2(math.Exp2(float64(-i)))
+// 		for j := 0; j < numBuckets-1; j++ {
+// 			var bound float64
+// 			if (j+1)%2 == 0 {
+// 				// Use previously calculated value for increased precision.
+// 				bound = nativeHistogramBounds[i-1][j/2+1]
+// 			} else {
+// 				bound = bounds[j] * factor
+// 			}
+// 			bounds = append(bounds, bound)
+// 		}
+// 		numBuckets *= 2
+// 		nativeHistogramBounds[i] = bounds
+// 	}
+// }
+
 // A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
 //
 // On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
 //
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
 //
 // To create Histogram instances, use NewHistogram.
 type Histogram interface {
@@ -50,7 +247,8 @@ type Histogram interface {
 	// Observe adds a single observation to the histogram. Observations are
 	// usually positive or zero. Negative observations are accepted but
 	// prevent current versions of Prometheus from properly detecting
-	// counter resets in the sum of observations. See
+	// counter resets in the sum of observations. (The experimental Native
+	// Histograms handle negative observations properly.) See
 	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
 	// for details.
 	Observe(float64)
@@ -64,18 +262,28 @@ const bucketLabel = "le"
 // tailored to broadly measure the response time (in seconds) of a network
 // service. Most likely, however, you will be required to define buckets
 // customized to your use case.
-var (
-	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
 
-	errBucketLabelNotAllowed = fmt.Errorf(
-		"%q is not allowed as label name in histograms", bucketLabel,
-	)
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in histograms", bucketLabel,
 )
 
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is zero or negative.
 func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 {
 	return buckets
 }
 
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
 // or if 'factor' is less than or equal 1.
@@ -180,8 +388,85 @@ type HistogramOpts struct {
 	// element in the slice is the upper inclusive bound of a bucket. The
 	// values must be sorted in strictly increasing order. There is no need
 	// to add a highest bucket with +Inf bound, it will be added
-	// implicitly. The default value is DefBuckets.
+	// implicitly. If Buckets is left as nil or set to a slice of length
+	// zero, it is replaced by default buckets. The default buckets are
+	// DefBuckets if no buckets for a native histogram (see below) are used,
+	// otherwise the default is no buckets. (In other words, if you want to
+	// use both reguler buckets and buckets for a native histogram, you have
+	// to define the regular buckets here explicitly.)
 	Buckets []float64
+
+	// If NativeHistogramBucketFactor is greater than one, so-called sparse
+	// buckets are used (in addition to the regular buckets, if defined
+	// above). A Histogram with sparse buckets will be ingested as a Native
+	// Histogram by a Prometheus server with that feature enabled (requires
+	// Prometheus v2.40+). Sparse buckets are exponential buckets covering
+	// the whole float64 range (with the exception of the “zero” bucket, see
+	// SparseBucketsZeroThreshold below). From any one bucket to the next,
+	// the width of the bucket grows by a constant
+	// factor. NativeHistogramBucketFactor provides an upper bound for this
+	// factor (exception see below). The smaller
+	// NativeHistogramBucketFactor, the more buckets will be used and thus
+	// the more costly the histogram will become. A generally good trade-off
+	// between cost and accuracy is a value of 1.1 (each bucket is at most
+	// 10% wider than the previous one), which will result in each power of
+	// two divided into 8 buckets (e.g. there will be 8 buckets between 1
+	// and 2, same as between 2 and 4, and 4 and 8, etc.).
+	//
+	// Details about the actually used factor: The factor is calculated as
+	// 2^(2^n), where n is an integer number between (and including) -8 and
+	// 4. n is chosen so that the resulting factor is the largest that is
+	// still smaller or equal to NativeHistogramBucketFactor. Note that the
+	// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+	// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+	// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+	// it is larger than the provided NativeHistogramBucketFactor.
+	//
+	// NOTE: Native Histograms are still an experimental feature. Their
+	// behavior might still change without a major version
+	// bump. Subsequently, all NativeHistogram... options here might still
+	// change their behavior or name (or might completely disappear) without
+	// a major version bump.
+	NativeHistogramBucketFactor float64
+	// All observations with an absolute value of less or equal
+	// NativeHistogramZeroThreshold are accumulated into a “zero”
+	// bucket. For best results, this should be close to a bucket
+	// boundary. This is usually the case if picking a power of two. If
+	// NativeHistogramZeroThreshold is left at zero,
+	// DefSparseBucketsZeroThreshold is used as the threshold. To configure
+	// a zero bucket with an actual threshold of zero (i.e. only
+	// observations of precisely zero will go into the zero bucket), set
+	// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+	// constant (or any negative float value).
+	NativeHistogramZeroThreshold float64
+
+	// The remaining fields define a strategy to limit the number of
+	// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+	// at zero, the number of buckets is not limited. (Note that this might
+	// lead to unbounded memory consumption if the values observed by the
+	// Histogram are sufficiently wide-spread. In particular, this could be
+	// used as a DoS attack vector. Where the observed values depend on
+	// external inputs, it is highly recommended to set a
+	// NativeHistogramMaxBucketNumber.)  Once the set
+	// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+	// enacted: First, if the last reset (or the creation) of the histogram
+	// is at least NativeHistogramMinResetDuration ago, then the whole
+	// histogram is reset to its initial state (including regular
+	// buckets). If less time has passed, or if
+	// NativeHistogramMinResetDuration is zero, no reset is
+	// performed. Instead, the zero threshold is increased sufficiently to
+	// reduce the number of buckets to or below
+	// NativeHistogramMaxBucketNumber, but not to more than
+	// NativeHistogramMaxZeroThreshold. Thus, if
+	// NativeHistogramMaxZeroThreshold is already at or below the current
+	// zero threshold, nothing happens at this step. After that, if the
+	// number of buckets still exceeds NativeHistogramMaxBucketNumber, the
+	// resolution of the histogram is reduced by doubling the width of the
+	// sparse buckets (up to a growth factor between one bucket to the next
+	// of 2^(2^4) = 65536, see above).
+	NativeHistogramMaxBucketNumber  uint32
+	NativeHistogramMinResetDuration time.Duration
+	NativeHistogramMaxZeroThreshold float64
 }
 
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		}
 	}
 
-	if len(opts.Buckets) == 0 {
-		opts.Buckets = DefBuckets
-	}
-
 	h := &histogram{
-		desc:        desc,
-		upperBounds: opts.Buckets,
-		labelPairs:  MakeLabelPairs(desc, labelValues),
-		counts:      [2]*histogramCounts{{}, {}},
-		now:         time.Now,
+		desc:                            desc,
+		upperBounds:                     opts.Buckets,
+		labelPairs:                      MakeLabelPairs(desc, labelValues),
+		nativeHistogramMaxBuckets:       opts.NativeHistogramMaxBucketNumber,
+		nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+		lastResetTime:                   time.Now(),
+		now:                             time.Now,
+	}
+	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+		h.upperBounds = DefBuckets
+	}
+	if opts.NativeHistogramBucketFactor <= 1 {
+		h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+	} else {
+		switch {
+		case opts.NativeHistogramZeroThreshold > 0:
+			h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+		case opts.NativeHistogramZeroThreshold == 0:
+			h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+		} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+		h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
 	}
 	for i, upperBound := range h.upperBounds {
 		if i < len(h.upperBounds)-1 {
@@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 	}
 	// Finally we know the final length of h.upperBounds and can make buckets
 	// for both counts as well as exemplars:
-	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
-	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+	h.counts[0] = &histogramCounts{
+		buckets:                          make([]uint64, len(h.upperBounds)),
+		nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+		nativeHistogramSchema:            h.nativeHistogramSchema,
+	}
+	h.counts[1] = &histogramCounts{
+		buckets:                          make([]uint64, len(h.upperBounds)),
+		nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+		nativeHistogramSchema:            h.nativeHistogramSchema,
+	}
 	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
 
 	h.init(h) // Init self-collection.
@@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 }
 
 type histogramCounts struct {
+	// Order in this struct matters for the alignment required by atomic
+	// operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
 	// sumBits contains the bits of the float64 representing the sum of all
-	// observations. sumBits and count have to go first in the struct to
-	// guarantee alignment for atomic operations.
-	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	// observations.
 	sumBits uint64
 	count   uint64
+
+	// nativeHistogramZeroBucket counts all (positive and negative)
+	// observations in the zero bucket (with an absolute value less or equal
+	// the current threshold, see next field.
+	nativeHistogramZeroBucket uint64
+	// nativeHistogramZeroThresholdBits is the bit pattern of the current
+	// threshold for the zero bucket. It's initially equal to
+	// nativeHistogramZeroThreshold but may change according to the bucket
+	// count limitation strategy.
+	nativeHistogramZeroThresholdBits uint64
+	// nativeHistogramSchema may change over time according to the bucket
+	// count limitation strategy and therefore has to be saved here.
+	nativeHistogramSchema int32
+	// Number of (positive and negative) sparse buckets.
+	nativeHistogramBucketsNumber uint32
+
+	// Regular buckets.
 	buckets []uint64
+
+	// The sparse buckets for native histograms are implemented with a
+	// sync.Map for now. A dedicated data structure will likely be more
+	// efficient. There are separate maps for negative and positive
+	// observations. The map's value is an *int64, counting observations in
+	// that bucket. (Note that we don't use uint64 as an int64 won't
+	// overflow in practice, and working with signed numbers from the
+	// beginning simplifies the handling of deltas.) The map's key is the
+	// index of the bucket according to the used
+	// nativeHistogramSchema. Index 0 is for an upper bound of 1.
+	nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+	if bucket < len(hc.buckets) {
+		atomic.AddUint64(&hc.buckets[bucket], 1)
+	}
+	atomicAddFloat(&hc.sumBits, v)
+	if doSparse && !math.IsNaN(v) {
+		var (
+			key                  int
+			schema               = atomic.LoadInt32(&hc.nativeHistogramSchema)
+			zeroThreshold        = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+			bucketCreated, isInf bool
+		)
+		if math.IsInf(v, 0) {
+			// Pretend v is MaxFloat64 but later increment key by one.
+			if math.IsInf(v, +1) {
+				v = math.MaxFloat64
+			} else {
+				v = -math.MaxFloat64
+			}
+			isInf = true
+		}
+		frac, exp := math.Frexp(math.Abs(v))
+		if schema > 0 {
+			bounds := nativeHistogramBounds[schema]
+			key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+		} else {
+			key = exp
+			if frac == 0.5 {
+				key--
+			}
+			div := 1 << -schema
+			key = (key + div - 1) / div
+		}
+		if isInf {
+			key++
+		}
+		switch {
+		case v > zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+		case v < -zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+		default:
+			atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+		}
+		if bucketCreated {
+			atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hc.count, 1)
 }
 
 type histogram struct {
@@ -276,7 +667,7 @@ type histogram struct {
 	// perspective of the histogram) swap the hot–cold under the writeMtx
 	// lock. A cooldown is awaited (while locked) by comparing the number of
 	// observations with the initiation count. Once they match, then the
-	// last observation on the now cool one has completed. All cool fields must
+	// last observation on the now cool one has completed. All cold fields must
 	// be merged into the new hot before releasing writeMtx.
 	//
 	// Fields with atomic access first! See alignment constraint:
@@ -284,8 +675,10 @@ type histogram struct {
 	countAndHotIdx uint64
 
 	selfCollector
-	desc     *Desc
-	writeMtx sync.Mutex // Only used in the Write method.
+	desc *Desc
+
+	// Only used in the Write method and for sparse bucket management.
+	mtx sync.Mutex
 
 	// Two counts, one is "hot" for lock-free observations, the other is
 	// "cold" for writing out a dto.Metric. It has to be an array of
@@ -293,9 +686,15 @@ type histogram struct {
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
 	counts [2]*histogramCounts
 
-	upperBounds []float64
-	labelPairs  []*dto.LabelPair
-	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	upperBounds                     []float64
+	labelPairs                      []*dto.LabelPair
+	exemplars                       []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	nativeHistogramSchema           int32          // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+	nativeHistogramZeroThreshold    float64        // The initial zero threshold.
+	nativeHistogramMaxZeroThreshold float64
+	nativeHistogramMaxBuckets       uint32
+	nativeHistogramMinResetDuration time.Duration
+	lastResetTime                   time.Time // Protected by mtx.
 
 	now func() time.Time // To mock out time.Now() for testing.
 }
@@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error {
 	// the hot path, i.e. Observe is called much more often than Write. The
 	// complication of making Write lock-free isn't worth it, if possible at
 	// all.
-	h.writeMtx.Lock()
-	defer h.writeMtx.Unlock()
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
 
 	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
 	// without touching the count bits. See the struct comments for a full
@@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error {
 	hotCounts := h.counts[n>>63]
 	coldCounts := h.counts[(^n)>>63]
 
-	// Await cooldown.
-	for count != atomic.LoadUint64(&coldCounts.count) {
-		runtime.Gosched() // Let observations get work done.
-	}
+	waitForCooldown(count, coldCounts)
 
 	his := &dto.Histogram{
 		Bucket:      make([]*dto.Bucket, len(h.upperBounds)),
 		SampleCount: proto.Uint64(count),
 		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
 	}
+	out.Histogram = his
+	out.Label = h.labelPairs
+
 	var cumCount uint64
 	for i, upperBound := range h.upperBounds {
 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error {
 		}
 		his.Bucket = append(his.Bucket, b)
 	}
-
-	out.Histogram = his
-	out.Label = h.labelPairs
-
-	// Finally add all the cold counts to the new hot counts and reset the cold counts.
-	atomic.AddUint64(&hotCounts.count, count)
-	atomic.StoreUint64(&coldCounts.count, 0)
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			atomic.StoreUint64(&coldCounts.sumBits, 0)
-			break
-		}
-	}
-	for i := range h.upperBounds {
-		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
-		atomic.StoreUint64(&coldCounts.buckets[i], 0)
+	if h.nativeHistogramSchema > math.MinInt32 {
+		his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+		his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+		zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+		defer func() {
+			coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+			coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+		}()
+
+		his.ZeroCount = proto.Uint64(zeroBucket)
+		his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+		his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
 	}
+	addAndResetCounts(hotCounts, coldCounts)
 	return nil
 }
 
@@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int {
 
 // observe is the implementation for Observe without the findBucket part.
 func (h *histogram) observe(v float64, bucket int) {
+	// Do not add to sparse buckets for NaN observations.
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
 	// We increment h.countAndHotIdx so that the counter in the lower
 	// 63 bits gets incremented. At the same time, we get the new value
 	// back, which we can use to find the currently-hot counts.
 	n := atomic.AddUint64(&h.countAndHotIdx, 1)
 	hotCounts := h.counts[n>>63]
+	hotCounts.observe(v, bucket, doSparse)
+	if doSparse {
+		h.limitBuckets(hotCounts, v, bucket)
+	}
+}
 
-	if bucket < len(h.upperBounds) {
-		atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+	if h.nativeHistogramMaxBuckets == 0 {
+		return // No limit configured.
 	}
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			break
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded yet.
+	}
+
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// The hot counts might have been swapped just before we acquired the
+	// lock. Re-fetch the hot counts first...
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hotCounts := h.counts[hotIdx]
+	coldCounts := h.counts[coldIdx]
+	// ...and then check again if we really have to reduce the bucket count.
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded after all.
+	}
+	// Try the various strategies in order.
+	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+		return
+	}
+	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+		return
+	}
+	h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
+// has been passed. It returns true if the histogram has been reset. The caller
+// must have locked h.mtx.
+func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
+	// We are using the possibly mocked h.now() rather than
+	// time.Since(h.lastResetTime) to enable testing.
+	if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+		return false
+	}
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Repeat the latest observation to not lose it completely.
+	cold.observe(value, bucket, true)
+	// Make coldCounts the new hot counts while ressetting countAndHotIdx.
+	n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	return true
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+	currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+	if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+		return false
+	}
+	// Find the key of the bucket closest to zero.
+	smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+	smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+	if smallestNegativeKey < smallestKey {
+		smallestKey = smallestNegativeKey
+	}
+	if smallestKey == math.MaxInt32 {
+		return false
+	}
+	newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+	if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+		return false // New threshold would exceed the max threshold.
+	}
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// Remove applicable buckets.
+	if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	// Make cold counts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the new zero threshold in the cold counts, too...
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// ...and then merge the newly deleted buckets into the wider zero
+	// bucket.
+	mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			if key == smallestKey {
+				// Merge into hot zero bucket...
+				atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+				// ...and delete from cold counts.
+				coldBuckets.Delete(key)
+				atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+			} else {
+				// Add to corresponding hot bucket...
+				if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+					atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+				}
+				// ...and reset cold bucket.
+				atomic.StoreInt64(bucket, 0)
+			}
+			return true
 		}
 	}
-	// Increment count last as we take it as a signal that the observation
-	// is complete.
-	atomic.AddUint64(&hotCounts.count, 1)
+
+	cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+	return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+	coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+	if coldSchema == -4 {
+		return // Already at lowest resolution.
+	}
+	coldSchema--
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// Play it simple and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+	// Make coldCounts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the schema in the cold counts, too...
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// ...and then merge the cold buckets into the wider hot buckets.
+	merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			// Adjust key to match the bucket to merge into.
+			if key > 0 {
+				key++
+			}
+			key /= 2
+			// Add to corresponding hot bucket.
+			if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+				atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+	// Play it simple again and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+	atomic.StoreUint64(&counts.sumBits, 0)
+	atomic.StoreUint64(&counts.count, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+	atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+	for i := range h.upperBounds {
+		atomic.StoreUint64(&counts.buckets[i], 0)
+	}
+	deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+	deleteSyncMap(&counts.nativeHistogramBucketsPositive)
 }
 
 // updateExemplar replaces the exemplar for the provided bucket. With empty
@@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
 	h, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
 
 // With works as GetMetricWith but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *HistogramVec) With(labels Labels) Observer {
 	h, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
 // to send it to Prometheus in the Collect method.
 //
 // buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
 //
 // NewConstHistogram returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.
@@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) {
 func (s buckSort) Less(i, j int) bool {
 	return s[i].GetUpperBound() < s[j].GetUpperBound()
 }
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+//   - bucketFactor <= 1: panics.
+//   - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+	if bucketFactor <= 1 {
+		panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+	}
+	floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+	switch {
+	case floor <= -8:
+		return 8
+	case floor >= 4:
+		return -4
+	default:
+		return -int32(floor)
+	}
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+	var ii []int
+	buckets.Range(func(k, v interface{}) bool {
+		ii = append(ii, k.(int))
+		return true
+	})
+	sort.Ints(ii)
+
+	if len(ii) == 0 {
+		return nil, nil
+	}
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		v, _ := buckets.Load(i)
+		count := atomic.LoadInt64(v.(*int64))
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one ore two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+	if existingBucket, ok := buckets.Load(key); ok {
+		// Fast path without allocation.
+		atomic.AddInt64(existingBucket.(*int64), increment)
+		return false
+	}
+	// Bucket doesn't exist yet. Slow path allocating new counter.
+	newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+	if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+		// The bucket was created concurrently in another goroutine.
+		// Have to increment after all.
+		atomic.AddInt64(actualBucket.(*int64), increment)
+		return false
+	}
+	return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+	return func(k, v interface{}) bool {
+		bucket := v.(*int64)
+		if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+			atomic.AddUint32(bucketNumber, 1)
+		}
+		atomic.StoreInt64(bucket, 0)
+		return true
+	}
+}
+
+func deleteSyncMap(m *sync.Map) {
+	m.Range(func(k, v interface{}) bool {
+		m.Delete(k)
+		return true
+	})
+}
+
+func findSmallestKey(m *sync.Map) int {
+	result := math.MaxInt32
+	m.Range(func(k, v interface{}) bool {
+		key := k.(int)
+		if key < result {
+			result = key
+		}
+		return true
+	})
+	return result
+}
+
+func getLe(key int, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with a key
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its key
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := key << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := key & ((1 << schema) - 1)
+	frac := nativeHistogramBounds[schema][fracIdx]
+	exp := (key >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+	for count != atomic.LoadUint64(&counts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+	for {
+		loadedBits := atomic.LoadUint64(bits)
+		newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+		if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+			break
+		}
+	}
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to.  See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+	atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+	atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+	atomic.StoreUint64(&cold.count, 0)
+	coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+	atomicAddFloat(&hot.sumBits, coldSum)
+	atomic.StoreUint64(&cold.sumBits, 0)
+	for i := range hot.buckets {
+		atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+		atomic.StoreUint64(&cold.buckets[i], 0)
+	}
+	atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+	atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}

+ 60 - 0
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go

@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+	"math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+	if a == b {
+		return true
+	}
+	absA := math.Abs(a)
+	absB := math.Abs(b)
+	diff := math.Abs(a - b)
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+		return diff < epsilon*minNormalFloat64
+	}
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := range a {
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+			return false
+		}
+	}
+	return true
+}

+ 8 - 5
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go

@@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
 // If IsJunk is not defined:
 //
 // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-//     alo <= i <= i+k <= ahi
-//     blo <= j <= j+k <= bhi
+//
+//	alo <= i <= i+k <= ahi
+//	blo <= j <= j+k <= bhi
+//
 // and for all (i',j',k') meeting those conditions,
-//     k >= k'
-//     i <= i'
-//     and if i == i', j <= j'
+//
+//	k >= k'
+//	i <= i'
+//	and if i == i', j <= j'
 //
 // In other words, of all maximal matching blocks, return one that
 // starts earliest in a, and of all those maximal matching blocks that

+ 2 - 1
vendor/github.com/prometheus/client_golang/prometheus/labels.go

@@ -25,7 +25,8 @@ import (
 // Labels represents a collection of label name -> value mappings. This type is
 // commonly used with the With(Labels) and GetMetricWith(Labels) methods of
 // metric vector Collectors, e.g.:
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
 //
 // The other use-case is the specification of constant label pairs in Opts or to
 // create a Desc.

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/metric.go

@@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
 			} else {
 				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
 				b := &dto.Bucket{
-					CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
+					CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
 					UpperBound:      proto.Float64(math.Inf(1)),
 					Exemplar:        e,
 				}

+ 2 - 3
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go

@@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
 	return func(r *http.Request) (*http.Response, error) {
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			exemplarAdd(
+			addWithExemplar(
 				counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
 				1,
 				rtOpts.getExemplarFn(r.Context()),
 			)
-			counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
 		}
 		return resp, err
 	}
@@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
 		start := time.Now()
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
 				time.Since(start).Seconds(),
 				rtOpts.getExemplarFn(r.Context()),

+ 14 - 10
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go

@@ -28,7 +28,9 @@ import (
 // magicString is used for the hacky label test in checkLabels. Remove once fixed.
 const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
 
-func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
 	if labels == nil {
 		obs.Observe(val)
 		return
@@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str
 	obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
 }
 
-func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
 	if labels == nil {
 		obs.Add(val)
 		return
@@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				time.Since(now).Seconds(),
 				hOpts.getExemplarFn(r.Context()),
@@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
 		now := time.Now()
 		next.ServeHTTP(w, r)
 
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			time.Since(now).Seconds(),
 			hOpts.getExemplarFn(r.Context()),
@@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 
-			exemplarAdd(
+			addWithExemplar(
 				counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				1,
 				hOpts.getExemplarFn(r.Context()),
@@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
 
 	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
-		exemplarAdd(
+		addWithExemplar(
 			counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			1,
 			hOpts.getExemplarFn(r.Context()),
@@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
 	return func(w http.ResponseWriter, r *http.Request) {
 		now := time.Now()
 		d := newDelegator(w, func(status int) {
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
 				time.Since(now).Seconds(),
 				hOpts.getExemplarFn(r.Context()),
@@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 			size := computeApproximateRequestSize(r)
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				float64(size),
 				hOpts.getExemplarFn(r.Context()),
@@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
 	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
 		size := computeApproximateRequestSize(r)
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			float64(size),
 			hOpts.getExemplarFn(r.Context()),
@@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		d := newDelegator(w, nil)
 		next.ServeHTTP(d, r)
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 			float64(d.Written()),
 			hOpts.getExemplarFn(r.Context()),

+ 31 - 3
vendor/github.com/prometheus/client_golang/prometheus/registry.go

@@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error {
 }
 
 // Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
 type Registry struct {
 	mtx                   sync.RWMutex
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
@@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
 
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	// Only report the checked Collectors; unchecked collectors don't report any
+	// Desc.
+	for _, c := range r.collectorsByID {
+		c.Describe(ch)
+	}
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	for _, c := range r.collectorsByID {
+		c.Collect(ch)
+	}
+	for _, c := range r.uncheckedCollectors {
+		c.Collect(ch)
+	}
+}
+
 // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
 // Prometheus text format, and writes it to a temporary file. Upon success, the
 // temporary file is renamed to the provided filename.

+ 6 - 3
vendor/github.com/prometheus/client_golang/prometheus/summary.go

@@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
 	s, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *SummaryVec) With(labels Labels) Observer {
 	s, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error {
 //
 // quantiles maps ranks to quantile values. For example, a median latency of
 // 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-//     map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
 //
 // NewConstSummary returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.

+ 6 - 5
vendor/github.com/prometheus/client_golang/prometheus/timer.go

@@ -25,11 +25,12 @@ type Timer struct {
 // NewTimer creates a new Timer. The provided Observer is used to observe a
 // duration in seconds. Timer is usually used to time a function call in the
 // following way:
-//    func TimeMe() {
-//        timer := NewTimer(myHistogram)
-//        defer timer.ObserveDuration()
-//        // Do actual work.
-//    }
+//
+//	func TimeMe() {
+//	    timer := NewTimer(myHistogram)
+//	    defer timer.ObserveDuration()
+//	    // Do actual work.
+//	}
 func NewTimer(o Observer) *Timer {
 	return &Timer{
 		begin:    time.Now(),

+ 262 - 71
vendor/github.com/prometheus/client_model/go/metrics.pb.go

@@ -1,5 +1,5 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
+// source: io/prometheus/client/metrics.proto
 
 package io_prometheus_client
 
@@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 type MetricType int32
 
 const (
-	MetricType_COUNTER   MetricType = 0
-	MetricType_GAUGE     MetricType = 1
-	MetricType_SUMMARY   MetricType = 2
-	MetricType_UNTYPED   MetricType = 3
+	// COUNTER must use the Metric field "counter".
+	MetricType_COUNTER MetricType = 0
+	// GAUGE must use the Metric field "gauge".
+	MetricType_GAUGE MetricType = 1
+	// SUMMARY must use the Metric field "summary".
+	MetricType_SUMMARY MetricType = 2
+	// UNTYPED must use the Metric field "untyped".
+	MetricType_UNTYPED MetricType = 3
+	// HISTOGRAM must use the Metric field "histogram".
 	MetricType_HISTOGRAM MetricType = 4
+	// GAUGE_HISTOGRAM must use the Metric field "histogram".
+	MetricType_GAUGE_HISTOGRAM MetricType = 5
 )
 
 var MetricType_name = map[int32]string{
@@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{
 	2: "SUMMARY",
 	3: "UNTYPED",
 	4: "HISTOGRAM",
+	5: "GAUGE_HISTOGRAM",
 }
 
 var MetricType_value = map[string]int32{
-	"COUNTER":   0,
-	"GAUGE":     1,
-	"SUMMARY":   2,
-	"UNTYPED":   3,
-	"HISTOGRAM": 4,
+	"COUNTER":         0,
+	"GAUGE":           1,
+	"SUMMARY":         2,
+	"UNTYPED":         3,
+	"HISTOGRAM":       4,
+	"GAUGE_HISTOGRAM": 5,
 }
 
 func (x MetricType) Enum() *MetricType {
@@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
 }
 
 func (MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return fileDescriptor_d1e5ddb18987a258, []int{0}
 }
 
 type LabelPair struct {
@@ -82,7 +91,7 @@ func (m *LabelPair) Reset()         { *m = LabelPair{} }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (*LabelPair) ProtoMessage()    {}
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return fileDescriptor_d1e5ddb18987a258, []int{0}
 }
 
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
@@ -128,7 +137,7 @@ func (m *Gauge) Reset()         { *m = Gauge{} }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (*Gauge) ProtoMessage()    {}
 func (*Gauge) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{1}
+	return fileDescriptor_d1e5ddb18987a258, []int{1}
 }
 
 func (m *Gauge) XXX_Unmarshal(b []byte) error {
@@ -168,7 +177,7 @@ func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (*Counter) ProtoMessage()    {}
 func (*Counter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{2}
+	return fileDescriptor_d1e5ddb18987a258, []int{2}
 }
 
 func (m *Counter) XXX_Unmarshal(b []byte) error {
@@ -215,7 +224,7 @@ func (m *Quantile) Reset()         { *m = Quantile{} }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (*Quantile) ProtoMessage()    {}
 func (*Quantile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{3}
+	return fileDescriptor_d1e5ddb18987a258, []int{3}
 }
 
 func (m *Quantile) XXX_Unmarshal(b []byte) error {
@@ -263,7 +272,7 @@ func (m *Summary) Reset()         { *m = Summary{} }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (*Summary) ProtoMessage()    {}
 func (*Summary) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{4}
+	return fileDescriptor_d1e5ddb18987a258, []int{4}
 }
 
 func (m *Summary) XXX_Unmarshal(b []byte) error {
@@ -316,7 +325,7 @@ func (m *Untyped) Reset()         { *m = Untyped{} }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (*Untyped) ProtoMessage()    {}
 func (*Untyped) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{5}
+	return fileDescriptor_d1e5ddb18987a258, []int{5}
 }
 
 func (m *Untyped) XXX_Unmarshal(b []byte) error {
@@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 {
 }
 
 type Histogram struct {
-	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
-	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
-	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	SampleCount      *uint64  `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+	SampleSum        *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	// Buckets for the conventional histogram.
+	Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+	// then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+	// In the future, more bucket schemas may be added using numbers < -4 or > 8.
+	Schema         *int32   `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+	ZeroThreshold  *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
+	ZeroCount      *uint64  `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
+	ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+	// Negative buckets for the native histogram.
+	NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+	// Use either "negative_delta" or "negative_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
+	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+	// Positive buckets for the native histogram.
+	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+	// Use either "positive_delta" or "positive_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDelta        []int64   `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
+	PositiveCount        []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
 	XXX_unrecognized     []byte    `json:"-"`
 	XXX_sizecache        int32     `json:"-"`
@@ -357,7 +391,7 @@ func (m *Histogram) Reset()         { *m = Histogram{} }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (*Histogram) ProtoMessage()    {}
 func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{6}
+	return fileDescriptor_d1e5ddb18987a258, []int{6}
 }
 
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
@@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 {
 	return 0
 }
 
+func (m *Histogram) GetSampleCountFloat() float64 {
+	if m != nil && m.SampleCountFloat != nil {
+		return *m.SampleCountFloat
+	}
+	return 0
+}
+
 func (m *Histogram) GetSampleSum() float64 {
 	if m != nil && m.SampleSum != nil {
 		return *m.SampleSum
@@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket {
 	return nil
 }
 
+func (m *Histogram) GetSchema() int32 {
+	if m != nil && m.Schema != nil {
+		return *m.Schema
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroThreshold() float64 {
+	if m != nil && m.ZeroThreshold != nil {
+		return *m.ZeroThreshold
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCount() uint64 {
+	if m != nil && m.ZeroCount != nil {
+		return *m.ZeroCount
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCountFloat() float64 {
+	if m != nil && m.ZeroCountFloat != nil {
+		return *m.ZeroCountFloat
+	}
+	return 0
+}
+
+func (m *Histogram) GetNegativeSpan() []*BucketSpan {
+	if m != nil {
+		return m.NegativeSpan
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeDelta() []int64 {
+	if m != nil {
+		return m.NegativeDelta
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeCount() []float64 {
+	if m != nil {
+		return m.NegativeCount
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveSpan() []*BucketSpan {
+	if m != nil {
+		return m.PositiveSpan
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveDelta() []int64 {
+	if m != nil {
+		return m.PositiveDelta
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveCount() []float64 {
+	if m != nil {
+		return m.PositiveCount
+	}
+	return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
 type Bucket struct {
 	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+	CumulativeCountFloat *float64  `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
 	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
 	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
@@ -412,7 +526,7 @@ func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (*Bucket) ProtoMessage()    {}
 func (*Bucket) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{7}
+	return fileDescriptor_d1e5ddb18987a258, []int{7}
 }
 
 func (m *Bucket) XXX_Unmarshal(b []byte) error {
@@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 {
 	return 0
 }
 
+func (m *Bucket) GetCumulativeCountFloat() float64 {
+	if m != nil && m.CumulativeCountFloat != nil {
+		return *m.CumulativeCountFloat
+	}
+	return 0
+}
+
 func (m *Bucket) GetUpperBound() float64 {
 	if m != nil && m.UpperBound != nil {
 		return *m.UpperBound
@@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar {
 	return nil
 }
 
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+	Offset               *int32   `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
+	Length               *uint32  `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BucketSpan) Reset()         { *m = BucketSpan{} }
+func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
+func (*BucketSpan) ProtoMessage()    {}
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1e5ddb18987a258, []int{8}
+}
+
+func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
+}
+func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+}
+func (m *BucketSpan) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BucketSpan.Merge(m, src)
+}
+func (m *BucketSpan) XXX_Size() int {
+	return xxx_messageInfo_BucketSpan.Size(m)
+}
+func (m *BucketSpan) XXX_DiscardUnknown() {
+	xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+
+func (m *BucketSpan) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return 0
+}
+
+func (m *BucketSpan) GetLength() uint32 {
+	if m != nil && m.Length != nil {
+		return *m.Length
+	}
+	return 0
+}
+
 type Exemplar struct {
 	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
 	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@@ -467,7 +641,7 @@ func (m *Exemplar) Reset()         { *m = Exemplar{} }
 func (m *Exemplar) String() string { return proto.CompactTextString(m) }
 func (*Exemplar) ProtoMessage()    {}
 func (*Exemplar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{8}
+	return fileDescriptor_d1e5ddb18987a258, []int{9}
 }
 
 func (m *Exemplar) XXX_Unmarshal(b []byte) error {
@@ -526,7 +700,7 @@ func (m *Metric) Reset()         { *m = Metric{} }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (*Metric) ProtoMessage()    {}
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{9}
+	return fileDescriptor_d1e5ddb18987a258, []int{10}
 }
 
 func (m *Metric) XXX_Unmarshal(b []byte) error {
@@ -610,7 +784,7 @@ func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (*MetricFamily) ProtoMessage()    {}
 func (*MetricFamily) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{10}
+	return fileDescriptor_d1e5ddb18987a258, []int{11}
 }
 
 func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
@@ -669,55 +843,72 @@ func init() {
 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+	proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
 	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
 }
 
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
-
-var fileDescriptor_6039342a2ba47b72 = []byte{
-	// 665 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
-	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
-	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
-	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
-	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
-	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
-	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
-	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
-	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
-	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
-	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
-	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
-	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
-	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
-	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
-	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
-	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
-	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
-	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
-	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
-	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
-	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
-	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
-	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
-	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
-	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
-	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
-	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
-	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
-	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
-	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
-	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
-	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
-	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
-	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
-	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
-	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
-	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
-	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
-	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
-	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
-	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
+func init() {
+	proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
+}
+
+var fileDescriptor_d1e5ddb18987a258 = []byte{
+	// 896 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
+	0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
+	0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
+	0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
+	0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
+	0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
+	0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
+	0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
+	0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
+	0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
+	0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
+	0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
+	0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
+	0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
+	0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
+	0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
+	0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
+	0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
+	0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
+	0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
+	0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
+	0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
+	0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
+	0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
+	0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
+	0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
+	0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
+	0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
+	0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
+	0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
+	0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
+	0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
+	0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
+	0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
+	0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
+	0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
+	0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
+	0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
+	0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
+	0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
+	0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
+	0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
+	0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
+	0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
+	0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
+	0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
+	0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
+	0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
+	0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
+	0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
+	0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
+	0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
+	0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
+	0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
+	0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
+	0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
 }

+ 25 - 28
vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go

@@ -14,6 +14,7 @@ package rc2
 import (
 	"crypto/cipher"
 	"encoding/binary"
+	"math/bits"
 )
 
 // The rc2 block size in bytes
@@ -80,10 +81,6 @@ func expandKey(key []byte, t1 int) [64]uint16 {
 	return k
 }
 
-func rotl16(x uint16, b uint) uint16 {
-	return (x >> (16 - b)) | (x << b)
-}
-
 func (c *rc2Cipher) Encrypt(dst, src []byte) {
 
 	r0 := binary.LittleEndian.Uint16(src[0:])
@@ -96,22 +93,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
 	for j <= 16 {
 		// mix r0
 		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
-		r0 = rotl16(r0, 1)
+		r0 = bits.RotateLeft16(r0, 1)
 		j++
 
 		// mix r1
 		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
-		r1 = rotl16(r1, 2)
+		r1 = bits.RotateLeft16(r1, 2)
 		j++
 
 		// mix r2
 		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
-		r2 = rotl16(r2, 3)
+		r2 = bits.RotateLeft16(r2, 3)
 		j++
 
 		// mix r3
 		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
-		r3 = rotl16(r3, 5)
+		r3 = bits.RotateLeft16(r3, 5)
 		j++
 
 	}
@@ -124,22 +121,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
 	for j <= 40 {
 		// mix r0
 		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
-		r0 = rotl16(r0, 1)
+		r0 = bits.RotateLeft16(r0, 1)
 		j++
 
 		// mix r1
 		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
-		r1 = rotl16(r1, 2)
+		r1 = bits.RotateLeft16(r1, 2)
 		j++
 
 		// mix r2
 		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
-		r2 = rotl16(r2, 3)
+		r2 = bits.RotateLeft16(r2, 3)
 		j++
 
 		// mix r3
 		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
-		r3 = rotl16(r3, 5)
+		r3 = bits.RotateLeft16(r3, 5)
 		j++
 
 	}
@@ -152,22 +149,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) {
 	for j <= 60 {
 		// mix r0
 		r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
-		r0 = rotl16(r0, 1)
+		r0 = bits.RotateLeft16(r0, 1)
 		j++
 
 		// mix r1
 		r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
-		r1 = rotl16(r1, 2)
+		r1 = bits.RotateLeft16(r1, 2)
 		j++
 
 		// mix r2
 		r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
-		r2 = rotl16(r2, 3)
+		r2 = bits.RotateLeft16(r2, 3)
 		j++
 
 		// mix r3
 		r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
-		r3 = rotl16(r3, 5)
+		r3 = bits.RotateLeft16(r3, 5)
 		j++
 	}
 
@@ -188,22 +185,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
 
 	for j >= 44 {
 		// unmix r3
-		r3 = rotl16(r3, 16-5)
+		r3 = bits.RotateLeft16(r3, 16-5)
 		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
 		j--
 
 		// unmix r2
-		r2 = rotl16(r2, 16-3)
+		r2 = bits.RotateLeft16(r2, 16-3)
 		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
 		j--
 
 		// unmix r1
-		r1 = rotl16(r1, 16-2)
+		r1 = bits.RotateLeft16(r1, 16-2)
 		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
 		j--
 
 		// unmix r0
-		r0 = rotl16(r0, 16-1)
+		r0 = bits.RotateLeft16(r0, 16-1)
 		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
 		j--
 	}
@@ -215,22 +212,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
 
 	for j >= 20 {
 		// unmix r3
-		r3 = rotl16(r3, 16-5)
+		r3 = bits.RotateLeft16(r3, 16-5)
 		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
 		j--
 
 		// unmix r2
-		r2 = rotl16(r2, 16-3)
+		r2 = bits.RotateLeft16(r2, 16-3)
 		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
 		j--
 
 		// unmix r1
-		r1 = rotl16(r1, 16-2)
+		r1 = bits.RotateLeft16(r1, 16-2)
 		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
 		j--
 
 		// unmix r0
-		r0 = rotl16(r0, 16-1)
+		r0 = bits.RotateLeft16(r0, 16-1)
 		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
 		j--
 
@@ -243,22 +240,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) {
 
 	for j >= 0 {
 		// unmix r3
-		r3 = rotl16(r3, 16-5)
+		r3 = bits.RotateLeft16(r3, 16-5)
 		r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
 		j--
 
 		// unmix r2
-		r2 = rotl16(r2, 16-3)
+		r2 = bits.RotateLeft16(r2, 16-3)
 		r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
 		j--
 
 		// unmix r1
-		r1 = rotl16(r1, 16-2)
+		r1 = bits.RotateLeft16(r1, 16-2)
 		r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
 		j--
 
 		// unmix r0
-		r0 = rotl16(r0, 16-1)
+		r0 = bits.RotateLeft16(r0, 16-1)
 		r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
 		j--
 

+ 34 - 32
vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go

@@ -5,6 +5,8 @@
 // Package salsa provides low-level access to functions in the Salsa family.
 package salsa // import "golang.org/x/crypto/salsa20/salsa"
 
+import "math/bits"
+
 // Sigma is the Salsa20 constant for 256-bit keys.
 var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
 
@@ -31,76 +33,76 @@ func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
 
 	for i := 0; i < 20; i += 2 {
 		u := x0 + x12
-		x4 ^= u<<7 | u>>(32-7)
+		x4 ^= bits.RotateLeft32(u, 7)
 		u = x4 + x0
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x4
-		x12 ^= u<<13 | u>>(32-13)
+		x12 ^= bits.RotateLeft32(u, 13)
 		u = x12 + x8
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x1
-		x9 ^= u<<7 | u>>(32-7)
+		x9 ^= bits.RotateLeft32(u, 7)
 		u = x9 + x5
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x9
-		x1 ^= u<<13 | u>>(32-13)
+		x1 ^= bits.RotateLeft32(u, 13)
 		u = x1 + x13
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x6
-		x14 ^= u<<7 | u>>(32-7)
+		x14 ^= bits.RotateLeft32(u, 7)
 		u = x14 + x10
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x14
-		x6 ^= u<<13 | u>>(32-13)
+		x6 ^= bits.RotateLeft32(u, 13)
 		u = x6 + x2
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x11
-		x3 ^= u<<7 | u>>(32-7)
+		x3 ^= bits.RotateLeft32(u, 7)
 		u = x3 + x15
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x3
-		x11 ^= u<<13 | u>>(32-13)
+		x11 ^= bits.RotateLeft32(u, 13)
 		u = x11 + x7
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 
 		u = x0 + x3
-		x1 ^= u<<7 | u>>(32-7)
+		x1 ^= bits.RotateLeft32(u, 7)
 		u = x1 + x0
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x1
-		x3 ^= u<<13 | u>>(32-13)
+		x3 ^= bits.RotateLeft32(u, 13)
 		u = x3 + x2
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x4
-		x6 ^= u<<7 | u>>(32-7)
+		x6 ^= bits.RotateLeft32(u, 7)
 		u = x6 + x5
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x6
-		x4 ^= u<<13 | u>>(32-13)
+		x4 ^= bits.RotateLeft32(u, 13)
 		u = x4 + x7
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x9
-		x11 ^= u<<7 | u>>(32-7)
+		x11 ^= bits.RotateLeft32(u, 7)
 		u = x11 + x10
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x11
-		x9 ^= u<<13 | u>>(32-13)
+		x9 ^= bits.RotateLeft32(u, 13)
 		u = x9 + x8
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x14
-		x12 ^= u<<7 | u>>(32-7)
+		x12 ^= bits.RotateLeft32(u, 7)
 		u = x12 + x15
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x12
-		x14 ^= u<<13 | u>>(32-13)
+		x14 ^= bits.RotateLeft32(u, 13)
 		u = x14 + x13
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 	}
 	out[0] = byte(x0)
 	out[1] = byte(x0 >> 8)

+ 34 - 32
vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go

@@ -4,6 +4,8 @@
 
 package salsa
 
+import "math/bits"
+
 // Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
 // the result into the 64-byte array out. The input and output may be the same array.
 func Core208(out *[64]byte, in *[64]byte) {
@@ -29,76 +31,76 @@ func Core208(out *[64]byte, in *[64]byte) {
 
 	for i := 0; i < 8; i += 2 {
 		u := x0 + x12
-		x4 ^= u<<7 | u>>(32-7)
+		x4 ^= bits.RotateLeft32(u, 7)
 		u = x4 + x0
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x4
-		x12 ^= u<<13 | u>>(32-13)
+		x12 ^= bits.RotateLeft32(u, 13)
 		u = x12 + x8
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x1
-		x9 ^= u<<7 | u>>(32-7)
+		x9 ^= bits.RotateLeft32(u, 7)
 		u = x9 + x5
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x9
-		x1 ^= u<<13 | u>>(32-13)
+		x1 ^= bits.RotateLeft32(u, 13)
 		u = x1 + x13
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x6
-		x14 ^= u<<7 | u>>(32-7)
+		x14 ^= bits.RotateLeft32(u, 7)
 		u = x14 + x10
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x14
-		x6 ^= u<<13 | u>>(32-13)
+		x6 ^= bits.RotateLeft32(u, 13)
 		u = x6 + x2
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x11
-		x3 ^= u<<7 | u>>(32-7)
+		x3 ^= bits.RotateLeft32(u, 7)
 		u = x3 + x15
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x3
-		x11 ^= u<<13 | u>>(32-13)
+		x11 ^= bits.RotateLeft32(u, 13)
 		u = x11 + x7
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 
 		u = x0 + x3
-		x1 ^= u<<7 | u>>(32-7)
+		x1 ^= bits.RotateLeft32(u, 7)
 		u = x1 + x0
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x1
-		x3 ^= u<<13 | u>>(32-13)
+		x3 ^= bits.RotateLeft32(u, 13)
 		u = x3 + x2
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x4
-		x6 ^= u<<7 | u>>(32-7)
+		x6 ^= bits.RotateLeft32(u, 7)
 		u = x6 + x5
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x6
-		x4 ^= u<<13 | u>>(32-13)
+		x4 ^= bits.RotateLeft32(u, 13)
 		u = x4 + x7
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x9
-		x11 ^= u<<7 | u>>(32-7)
+		x11 ^= bits.RotateLeft32(u, 7)
 		u = x11 + x10
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x11
-		x9 ^= u<<13 | u>>(32-13)
+		x9 ^= bits.RotateLeft32(u, 13)
 		u = x9 + x8
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x14
-		x12 ^= u<<7 | u>>(32-7)
+		x12 ^= bits.RotateLeft32(u, 7)
 		u = x12 + x15
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x12
-		x14 ^= u<<13 | u>>(32-13)
+		x14 ^= bits.RotateLeft32(u, 13)
 		u = x14 + x13
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 	}
 	x0 += j0
 	x1 += j1

+ 34 - 32
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go

@@ -4,6 +4,8 @@
 
 package salsa
 
+import "math/bits"
+
 const rounds = 20
 
 // core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
@@ -31,76 +33,76 @@ func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
 
 	for i := 0; i < rounds; i += 2 {
 		u := x0 + x12
-		x4 ^= u<<7 | u>>(32-7)
+		x4 ^= bits.RotateLeft32(u, 7)
 		u = x4 + x0
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x4
-		x12 ^= u<<13 | u>>(32-13)
+		x12 ^= bits.RotateLeft32(u, 13)
 		u = x12 + x8
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x1
-		x9 ^= u<<7 | u>>(32-7)
+		x9 ^= bits.RotateLeft32(u, 7)
 		u = x9 + x5
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x9
-		x1 ^= u<<13 | u>>(32-13)
+		x1 ^= bits.RotateLeft32(u, 13)
 		u = x1 + x13
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x6
-		x14 ^= u<<7 | u>>(32-7)
+		x14 ^= bits.RotateLeft32(u, 7)
 		u = x14 + x10
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x14
-		x6 ^= u<<13 | u>>(32-13)
+		x6 ^= bits.RotateLeft32(u, 13)
 		u = x6 + x2
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x11
-		x3 ^= u<<7 | u>>(32-7)
+		x3 ^= bits.RotateLeft32(u, 7)
 		u = x3 + x15
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x3
-		x11 ^= u<<13 | u>>(32-13)
+		x11 ^= bits.RotateLeft32(u, 13)
 		u = x11 + x7
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 
 		u = x0 + x3
-		x1 ^= u<<7 | u>>(32-7)
+		x1 ^= bits.RotateLeft32(u, 7)
 		u = x1 + x0
-		x2 ^= u<<9 | u>>(32-9)
+		x2 ^= bits.RotateLeft32(u, 9)
 		u = x2 + x1
-		x3 ^= u<<13 | u>>(32-13)
+		x3 ^= bits.RotateLeft32(u, 13)
 		u = x3 + x2
-		x0 ^= u<<18 | u>>(32-18)
+		x0 ^= bits.RotateLeft32(u, 18)
 
 		u = x5 + x4
-		x6 ^= u<<7 | u>>(32-7)
+		x6 ^= bits.RotateLeft32(u, 7)
 		u = x6 + x5
-		x7 ^= u<<9 | u>>(32-9)
+		x7 ^= bits.RotateLeft32(u, 9)
 		u = x7 + x6
-		x4 ^= u<<13 | u>>(32-13)
+		x4 ^= bits.RotateLeft32(u, 13)
 		u = x4 + x7
-		x5 ^= u<<18 | u>>(32-18)
+		x5 ^= bits.RotateLeft32(u, 18)
 
 		u = x10 + x9
-		x11 ^= u<<7 | u>>(32-7)
+		x11 ^= bits.RotateLeft32(u, 7)
 		u = x11 + x10
-		x8 ^= u<<9 | u>>(32-9)
+		x8 ^= bits.RotateLeft32(u, 9)
 		u = x8 + x11
-		x9 ^= u<<13 | u>>(32-13)
+		x9 ^= bits.RotateLeft32(u, 13)
 		u = x9 + x8
-		x10 ^= u<<18 | u>>(32-18)
+		x10 ^= bits.RotateLeft32(u, 18)
 
 		u = x15 + x14
-		x12 ^= u<<7 | u>>(32-7)
+		x12 ^= bits.RotateLeft32(u, 7)
 		u = x12 + x15
-		x13 ^= u<<9 | u>>(32-9)
+		x13 ^= bits.RotateLeft32(u, 9)
 		u = x13 + x12
-		x14 ^= u<<13 | u>>(32-13)
+		x14 ^= bits.RotateLeft32(u, 13)
 		u = x14 + x13
-		x15 ^= u<<18 | u>>(32-18)
+		x15 ^= bits.RotateLeft32(u, 18)
 	}
 	x0 += j0
 	x1 += j1

+ 1 - 1
vendor/golang.org/x/crypto/ssh/messages.go

@@ -68,7 +68,7 @@ type kexInitMsg struct {
 
 // See RFC 4253, section 8.
 
-// Diffie-Helman
+// Diffie-Hellman
 const msgKexDHInit = 30
 
 type kexDHInitMsg struct {

+ 18 - 0
vendor/golang.org/x/net/http2/headermap.go

@@ -27,7 +27,14 @@ func buildCommonHeaderMaps() {
 		"accept-language",
 		"accept-ranges",
 		"age",
+		"access-control-allow-credentials",
+		"access-control-allow-headers",
+		"access-control-allow-methods",
 		"access-control-allow-origin",
+		"access-control-expose-headers",
+		"access-control-max-age",
+		"access-control-request-headers",
+		"access-control-request-method",
 		"allow",
 		"authorization",
 		"cache-control",
@@ -53,6 +60,7 @@ func buildCommonHeaderMaps() {
 		"link",
 		"location",
 		"max-forwards",
+		"origin",
 		"proxy-authenticate",
 		"proxy-authorization",
 		"range",
@@ -68,6 +76,8 @@ func buildCommonHeaderMaps() {
 		"vary",
 		"via",
 		"www-authenticate",
+		"x-forwarded-for",
+		"x-forwarded-proto",
 	}
 	commonLowerHeader = make(map[string]string, len(common))
 	commonCanonHeader = make(map[string]string, len(common))
@@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) {
 	}
 	return asciiToLower(v)
 }
+
+func canonicalHeader(v string) string {
+	buildCommonHeaderMapsOnce()
+	if s, ok := commonCanonHeader[v]; ok {
+		return s
+	}
+	return http.CanonicalHeaderKey(v)
+}

+ 188 - 0
vendor/golang.org/x/net/http2/hpack/static_table.go

@@ -0,0 +1,188 @@
+// go generate gen.go
+// Code generated by the command above; DO NOT EDIT.
+
+package hpack
+
+var staticTable = &headerFieldTable{
+	evictCount: 0,
+	byName: map[string]uint64{
+		":authority":                  1,
+		":method":                     3,
+		":path":                       5,
+		":scheme":                     7,
+		":status":                     14,
+		"accept-charset":              15,
+		"accept-encoding":             16,
+		"accept-language":             17,
+		"accept-ranges":               18,
+		"accept":                      19,
+		"access-control-allow-origin": 20,
+		"age":                         21,
+		"allow":                       22,
+		"authorization":               23,
+		"cache-control":               24,
+		"content-disposition":         25,
+		"content-encoding":            26,
+		"content-language":            27,
+		"content-length":              28,
+		"content-location":            29,
+		"content-range":               30,
+		"content-type":                31,
+		"cookie":                      32,
+		"date":                        33,
+		"etag":                        34,
+		"expect":                      35,
+		"expires":                     36,
+		"from":                        37,
+		"host":                        38,
+		"if-match":                    39,
+		"if-modified-since":           40,
+		"if-none-match":               41,
+		"if-range":                    42,
+		"if-unmodified-since":         43,
+		"last-modified":               44,
+		"link":                        45,
+		"location":                    46,
+		"max-forwards":                47,
+		"proxy-authenticate":          48,
+		"proxy-authorization":         49,
+		"range":                       50,
+		"referer":                     51,
+		"refresh":                     52,
+		"retry-after":                 53,
+		"server":                      54,
+		"set-cookie":                  55,
+		"strict-transport-security":   56,
+		"transfer-encoding":           57,
+		"user-agent":                  58,
+		"vary":                        59,
+		"via":                         60,
+		"www-authenticate":            61,
+	},
+	byNameValue: map[pairNameValue]uint64{
+		{name: ":authority", value: ""}:                   1,
+		{name: ":method", value: "GET"}:                   2,
+		{name: ":method", value: "POST"}:                  3,
+		{name: ":path", value: "/"}:                       4,
+		{name: ":path", value: "/index.html"}:             5,
+		{name: ":scheme", value: "http"}:                  6,
+		{name: ":scheme", value: "https"}:                 7,
+		{name: ":status", value: "200"}:                   8,
+		{name: ":status", value: "204"}:                   9,
+		{name: ":status", value: "206"}:                   10,
+		{name: ":status", value: "304"}:                   11,
+		{name: ":status", value: "400"}:                   12,
+		{name: ":status", value: "404"}:                   13,
+		{name: ":status", value: "500"}:                   14,
+		{name: "accept-charset", value: ""}:               15,
+		{name: "accept-encoding", value: "gzip, deflate"}: 16,
+		{name: "accept-language", value: ""}:              17,
+		{name: "accept-ranges", value: ""}:                18,
+		{name: "accept", value: ""}:                       19,
+		{name: "access-control-allow-origin", value: ""}:  20,
+		{name: "age", value: ""}:                          21,
+		{name: "allow", value: ""}:                        22,
+		{name: "authorization", value: ""}:                23,
+		{name: "cache-control", value: ""}:                24,
+		{name: "content-disposition", value: ""}:          25,
+		{name: "content-encoding", value: ""}:             26,
+		{name: "content-language", value: ""}:             27,
+		{name: "content-length", value: ""}:               28,
+		{name: "content-location", value: ""}:             29,
+		{name: "content-range", value: ""}:                30,
+		{name: "content-type", value: ""}:                 31,
+		{name: "cookie", value: ""}:                       32,
+		{name: "date", value: ""}:                         33,
+		{name: "etag", value: ""}:                         34,
+		{name: "expect", value: ""}:                       35,
+		{name: "expires", value: ""}:                      36,
+		{name: "from", value: ""}:                         37,
+		{name: "host", value: ""}:                         38,
+		{name: "if-match", value: ""}:                     39,
+		{name: "if-modified-since", value: ""}:            40,
+		{name: "if-none-match", value: ""}:                41,
+		{name: "if-range", value: ""}:                     42,
+		{name: "if-unmodified-since", value: ""}:          43,
+		{name: "last-modified", value: ""}:                44,
+		{name: "link", value: ""}:                         45,
+		{name: "location", value: ""}:                     46,
+		{name: "max-forwards", value: ""}:                 47,
+		{name: "proxy-authenticate", value: ""}:           48,
+		{name: "proxy-authorization", value: ""}:          49,
+		{name: "range", value: ""}:                        50,
+		{name: "referer", value: ""}:                      51,
+		{name: "refresh", value: ""}:                      52,
+		{name: "retry-after", value: ""}:                  53,
+		{name: "server", value: ""}:                       54,
+		{name: "set-cookie", value: ""}:                   55,
+		{name: "strict-transport-security", value: ""}:    56,
+		{name: "transfer-encoding", value: ""}:            57,
+		{name: "user-agent", value: ""}:                   58,
+		{name: "vary", value: ""}:                         59,
+		{name: "via", value: ""}:                          60,
+		{name: "www-authenticate", value: ""}:             61,
+	},
+	ents: []HeaderField{
+		{Name: ":authority", Value: "", Sensitive: false},
+		{Name: ":method", Value: "GET", Sensitive: false},
+		{Name: ":method", Value: "POST", Sensitive: false},
+		{Name: ":path", Value: "/", Sensitive: false},
+		{Name: ":path", Value: "/index.html", Sensitive: false},
+		{Name: ":scheme", Value: "http", Sensitive: false},
+		{Name: ":scheme", Value: "https", Sensitive: false},
+		{Name: ":status", Value: "200", Sensitive: false},
+		{Name: ":status", Value: "204", Sensitive: false},
+		{Name: ":status", Value: "206", Sensitive: false},
+		{Name: ":status", Value: "304", Sensitive: false},
+		{Name: ":status", Value: "400", Sensitive: false},
+		{Name: ":status", Value: "404", Sensitive: false},
+		{Name: ":status", Value: "500", Sensitive: false},
+		{Name: "accept-charset", Value: "", Sensitive: false},
+		{Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false},
+		{Name: "accept-language", Value: "", Sensitive: false},
+		{Name: "accept-ranges", Value: "", Sensitive: false},
+		{Name: "accept", Value: "", Sensitive: false},
+		{Name: "access-control-allow-origin", Value: "", Sensitive: false},
+		{Name: "age", Value: "", Sensitive: false},
+		{Name: "allow", Value: "", Sensitive: false},
+		{Name: "authorization", Value: "", Sensitive: false},
+		{Name: "cache-control", Value: "", Sensitive: false},
+		{Name: "content-disposition", Value: "", Sensitive: false},
+		{Name: "content-encoding", Value: "", Sensitive: false},
+		{Name: "content-language", Value: "", Sensitive: false},
+		{Name: "content-length", Value: "", Sensitive: false},
+		{Name: "content-location", Value: "", Sensitive: false},
+		{Name: "content-range", Value: "", Sensitive: false},
+		{Name: "content-type", Value: "", Sensitive: false},
+		{Name: "cookie", Value: "", Sensitive: false},
+		{Name: "date", Value: "", Sensitive: false},
+		{Name: "etag", Value: "", Sensitive: false},
+		{Name: "expect", Value: "", Sensitive: false},
+		{Name: "expires", Value: "", Sensitive: false},
+		{Name: "from", Value: "", Sensitive: false},
+		{Name: "host", Value: "", Sensitive: false},
+		{Name: "if-match", Value: "", Sensitive: false},
+		{Name: "if-modified-since", Value: "", Sensitive: false},
+		{Name: "if-none-match", Value: "", Sensitive: false},
+		{Name: "if-range", Value: "", Sensitive: false},
+		{Name: "if-unmodified-since", Value: "", Sensitive: false},
+		{Name: "last-modified", Value: "", Sensitive: false},
+		{Name: "link", Value: "", Sensitive: false},
+		{Name: "location", Value: "", Sensitive: false},
+		{Name: "max-forwards", Value: "", Sensitive: false},
+		{Name: "proxy-authenticate", Value: "", Sensitive: false},
+		{Name: "proxy-authorization", Value: "", Sensitive: false},
+		{Name: "range", Value: "", Sensitive: false},
+		{Name: "referer", Value: "", Sensitive: false},
+		{Name: "refresh", Value: "", Sensitive: false},
+		{Name: "retry-after", Value: "", Sensitive: false},
+		{Name: "server", Value: "", Sensitive: false},
+		{Name: "set-cookie", Value: "", Sensitive: false},
+		{Name: "strict-transport-security", Value: "", Sensitive: false},
+		{Name: "transfer-encoding", Value: "", Sensitive: false},
+		{Name: "user-agent", Value: "", Sensitive: false},
+		{Name: "vary", Value: "", Sensitive: false},
+		{Name: "via", Value: "", Sensitive: false},
+		{Name: "www-authenticate", Value: "", Sensitive: false},
+	},
+}

+ 1 - 77
vendor/golang.org/x/net/http2/hpack/tables.go

@@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) {
 // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
 // table, the return value i actually refers to the entry t.ents[t.len()-i].
 //
-// All tables are assumed to be a dynamic tables except for the global
-// staticTable pointer.
+// All tables are assumed to be a dynamic tables except for the global staticTable.
 //
 // See Section 2.3.3.
 func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
@@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 {
 	return k + 1
 }
 
-// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
-var staticTable = newStaticTable()
-var staticTableEntries = [...]HeaderField{
-	{Name: ":authority"},
-	{Name: ":method", Value: "GET"},
-	{Name: ":method", Value: "POST"},
-	{Name: ":path", Value: "/"},
-	{Name: ":path", Value: "/index.html"},
-	{Name: ":scheme", Value: "http"},
-	{Name: ":scheme", Value: "https"},
-	{Name: ":status", Value: "200"},
-	{Name: ":status", Value: "204"},
-	{Name: ":status", Value: "206"},
-	{Name: ":status", Value: "304"},
-	{Name: ":status", Value: "400"},
-	{Name: ":status", Value: "404"},
-	{Name: ":status", Value: "500"},
-	{Name: "accept-charset"},
-	{Name: "accept-encoding", Value: "gzip, deflate"},
-	{Name: "accept-language"},
-	{Name: "accept-ranges"},
-	{Name: "accept"},
-	{Name: "access-control-allow-origin"},
-	{Name: "age"},
-	{Name: "allow"},
-	{Name: "authorization"},
-	{Name: "cache-control"},
-	{Name: "content-disposition"},
-	{Name: "content-encoding"},
-	{Name: "content-language"},
-	{Name: "content-length"},
-	{Name: "content-location"},
-	{Name: "content-range"},
-	{Name: "content-type"},
-	{Name: "cookie"},
-	{Name: "date"},
-	{Name: "etag"},
-	{Name: "expect"},
-	{Name: "expires"},
-	{Name: "from"},
-	{Name: "host"},
-	{Name: "if-match"},
-	{Name: "if-modified-since"},
-	{Name: "if-none-match"},
-	{Name: "if-range"},
-	{Name: "if-unmodified-since"},
-	{Name: "last-modified"},
-	{Name: "link"},
-	{Name: "location"},
-	{Name: "max-forwards"},
-	{Name: "proxy-authenticate"},
-	{Name: "proxy-authorization"},
-	{Name: "range"},
-	{Name: "referer"},
-	{Name: "refresh"},
-	{Name: "retry-after"},
-	{Name: "server"},
-	{Name: "set-cookie"},
-	{Name: "strict-transport-security"},
-	{Name: "transfer-encoding"},
-	{Name: "user-agent"},
-	{Name: "vary"},
-	{Name: "via"},
-	{Name: "www-authenticate"},
-}
-
-func newStaticTable() *headerFieldTable {
-	t := &headerFieldTable{}
-	t.init()
-	for _, e := range staticTableEntries[:] {
-		t.addEntry(e)
-	}
-	return t
-}
-
 var huffmanCodes = [256]uint32{
 	0x1ff8,
 	0x7fffd8,

+ 133 - 51
vendor/golang.org/x/net/http2/server.go

@@ -622,7 +622,9 @@ type stream struct {
 	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
 	gotTrailerHeader bool        // HEADER frame for trailers was seen
 	wroteHeaders     bool        // whether we wrote headers (not status 100)
+	readDeadline     *time.Timer // nil if unused
 	writeDeadline    *time.Timer // nil if unused
+	closeErr         error       // set before cw is closed
 
 	trailer    http.Header // accumulated trailers
 	reqTrailer http.Header // handler's Request.Trailer
@@ -869,7 +871,9 @@ func (sc *serverConn) serve() {
 
 	// Each connection starts with initialWindowSize inflow tokens.
 	// If a higher value is configured, we add more tokens.
-	sc.sendWindowUpdate(nil)
+	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+		sc.sendWindowUpdate(nil, int(diff))
+	}
 
 	if err := sc.readPreface(); err != nil {
 		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
@@ -946,6 +950,8 @@ func (sc *serverConn) serve() {
 				}
 			case *startPushRequest:
 				sc.startPush(v)
+			case func(*serverConn):
+				v(sc)
 			default:
 				panic(fmt.Sprintf("unexpected type %T", v))
 			}
@@ -1459,6 +1465,21 @@ func (sc *serverConn) processFrame(f Frame) error {
 		sc.sawFirstSettings = true
 	}
 
+	// Discard frames for streams initiated after the identified last
+	// stream sent in a GOAWAY, or all frames after sending an error.
+	// We still need to return connection-level flow control for DATA frames.
+	// RFC 9113 Section 6.8.
+	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
+
+		if f, ok := f.(*DataFrame); ok {
+			if sc.inflow.available() < int32(f.Length) {
+				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
+			}
+			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+		}
+		return nil
+	}
+
 	switch f := f.(type) {
 	case *SettingsFrame:
 		return sc.processSettings(f)
@@ -1501,9 +1522,6 @@ func (sc *serverConn) processPing(f *PingFrame) error {
 		// PROTOCOL_ERROR."
 		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
 	}
-	if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
-		return nil
-	}
 	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
 	return nil
 }
@@ -1565,6 +1583,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
 		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
 	}
 	st.state = stateClosed
+	if st.readDeadline != nil {
+		st.readDeadline.Stop()
+	}
 	if st.writeDeadline != nil {
 		st.writeDeadline.Stop()
 	}
@@ -1586,10 +1607,18 @@ func (sc *serverConn) closeStream(st *stream, err error) {
 	if p := st.body; p != nil {
 		// Return any buffered unread bytes worth of conn-level flow control.
 		// See golang.org/issue/16481
-		sc.sendWindowUpdate(nil)
+		sc.sendWindowUpdate(nil, p.Len())
 
 		p.CloseWithError(err)
 	}
+	if e, ok := err.(StreamError); ok {
+		if e.Cause != nil {
+			err = e.Cause
+		} else {
+			err = errStreamClosed
+		}
+	}
+	st.closeErr = err
 	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
 	sc.writeSched.CloseStream(st.id)
 }
@@ -1686,16 +1715,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
 func (sc *serverConn) processData(f *DataFrame) error {
 	sc.serveG.check()
 	id := f.Header().StreamID
-	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) {
-		// Discard all DATA frames if the GOAWAY is due to an
-		// error, or:
-		//
-		// Section 6.8: After sending a GOAWAY frame, the sender
-		// can discard frames for streams initiated by the
-		// receiver with identifiers higher than the identified
-		// last stream.
-		return nil
-	}
 
 	data := f.Data()
 	state, st := sc.state(id)
@@ -1734,7 +1753,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
 		// sendWindowUpdate, which also schedules sending the
 		// frames.
 		sc.inflow.take(int32(f.Length))
-		sc.sendWindowUpdate(nil) // conn-level
+		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
 
 		if st != nil && st.resetQueued {
 			// Already have a stream error in flight. Don't send another.
@@ -1752,7 +1771,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
 			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
 		}
 		sc.inflow.take(int32(f.Length))
-		sc.sendWindowUpdate(nil) // conn-level
+		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
 
 		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
 		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
@@ -1770,7 +1789,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
 		if len(data) > 0 {
 			wrote, err := st.body.Write(data)
 			if err != nil {
-				sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote))
+				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
 				return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
 			}
 			if wrote != len(data) {
@@ -1838,19 +1857,27 @@ func (st *stream) copyTrailersToHandlerRequest() {
 	}
 }
 
+// onReadTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's ReadTimeout has fired.
+func (st *stream) onReadTimeout() {
+	// Wrap the ErrDeadlineExceeded to avoid callers depending on us
+	// returning the bare error.
+	st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+}
+
 // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
 // when the stream's WriteTimeout has fired.
 func (st *stream) onWriteTimeout() {
-	st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
+	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
+		StreamID: st.id,
+		Code:     ErrCodeInternal,
+		Cause:    os.ErrDeadlineExceeded,
+	}})
 }
 
 func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	sc.serveG.check()
 	id := f.StreamID
-	if sc.inGoAway {
-		// Ignore.
-		return nil
-	}
 	// http://tools.ietf.org/html/rfc7540#section-5.1.1
 	// Streams initiated by a client MUST use odd-numbered stream
 	// identifiers. [...] An endpoint that receives an unexpected
@@ -1953,6 +1980,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout != 0 {
 		sc.conn.SetReadDeadline(time.Time{})
+		if st.body != nil {
+			st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+		}
 	}
 
 	go sc.runHandler(rw, req, handler)
@@ -2021,9 +2051,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
 }
 
 func (sc *serverConn) processPriority(f *PriorityFrame) error {
-	if sc.inGoAway {
-		return nil
-	}
 	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
 		return err
 	}
@@ -2322,39 +2349,24 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
 
 func (sc *serverConn) noteBodyRead(st *stream, n int) {
 	sc.serveG.check()
-	sc.sendWindowUpdate(nil) // conn-level
+	sc.sendWindowUpdate(nil, n) // conn-level
 	if st.state != stateHalfClosedRemote && st.state != stateClosed {
 		// Don't send this WINDOW_UPDATE if the stream is closed
 		// remotely.
-		sc.sendWindowUpdate(st)
+		sc.sendWindowUpdate(st, n)
 	}
 }
 
 // st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream) {
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
 	sc.serveG.check()
-
-	var n int32
-	if st == nil {
-		if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 {
-			return
-		} else {
-			n = windowSize - avail
-		}
-	} else {
-		if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 {
-			return
-		} else {
-			n = windowSize - avail
-		}
-	}
 	// "The legal range for the increment to the flow control
 	// window is 1 to 2^31-1 (2,147,483,647) octets."
 	// A Go Read call on 64-bit machines could in theory read
 	// a larger Read than this. Very unlikely, but we handle it here
 	// rather than elsewhere for now.
 	const maxUint31 = 1<<31 - 1
-	for n >= maxUint31 {
+	for n > maxUint31 {
 		sc.sendWindowUpdate32(st, maxUint31)
 		n -= maxUint31
 	}
@@ -2474,7 +2486,15 @@ type responseWriterState struct {
 
 type chunkWriter struct{ rws *responseWriterState }
 
-func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+func (cw chunkWriter) Write(p []byte) (n int, err error) {
+	n, err = cw.rws.writeChunk(p)
+	if err == errStreamClosed {
+		// If writing failed because the stream has been closed,
+		// return the reason it was closed.
+		err = cw.rws.stream.closeErr
+	}
+	return n, err
+}
 
 func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
 
@@ -2668,23 +2688,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
 	}
 }
 
+func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
+	st := w.rws.stream
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
+		// If we're setting a deadline in the past, reset the stream immediately
+		// so writes after SetWriteDeadline returns will fail.
+		st.onReadTimeout()
+		return nil
+	}
+	w.rws.conn.sendServeMsg(func(sc *serverConn) {
+		if st.readDeadline != nil {
+			if !st.readDeadline.Stop() {
+				// Deadline already exceeded, or stream has been closed.
+				return
+			}
+		}
+		if deadline.IsZero() {
+			st.readDeadline = nil
+		} else if st.readDeadline == nil {
+			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
+		} else {
+			st.readDeadline.Reset(deadline.Sub(time.Now()))
+		}
+	})
+	return nil
+}
+
+func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
+	st := w.rws.stream
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
+		// If we're setting a deadline in the past, reset the stream immediately
+		// so writes after SetWriteDeadline returns will fail.
+		st.onWriteTimeout()
+		return nil
+	}
+	w.rws.conn.sendServeMsg(func(sc *serverConn) {
+		if st.writeDeadline != nil {
+			if !st.writeDeadline.Stop() {
+				// Deadline already exceeded, or stream has been closed.
+				return
+			}
+		}
+		if deadline.IsZero() {
+			st.writeDeadline = nil
+		} else if st.writeDeadline == nil {
+			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
+		} else {
+			st.writeDeadline.Reset(deadline.Sub(time.Now()))
+		}
+	})
+	return nil
+}
+
 func (w *responseWriter) Flush() {
+	w.FlushError()
+}
+
+func (w *responseWriter) FlushError() error {
 	rws := w.rws
 	if rws == nil {
 		panic("Header called after Handler finished")
 	}
+	var err error
 	if rws.bw.Buffered() > 0 {
-		if err := rws.bw.Flush(); err != nil {
-			// Ignore the error. The frame writer already knows.
-			return
-		}
+		err = rws.bw.Flush()
 	} else {
 		// The bufio.Writer won't call chunkWriter.Write
 		// (writeChunk with zero bytes, so we have to do it
 		// ourselves to force the HTTP response header and/or
 		// final DATA frame (with END_STREAM) to be sent.
-		rws.writeChunk(nil)
+		_, err = chunkWriter{rws}.Write(nil)
+		if err == nil {
+			select {
+			case <-rws.stream.cw:
+				err = rws.stream.closeErr
+			default:
+			}
+		}
 	}
+	return err
 }
 
 func (w *responseWriter) CloseNotify() <-chan bool {

+ 26 - 9
vendor/golang.org/x/net/http2/transport.go

@@ -16,6 +16,7 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"io/fs"
 	"log"
 	"math"
 	mathrand "math/rand"
@@ -501,6 +502,15 @@ func authorityAddr(scheme string, authority string) (addr string) {
 	return net.JoinHostPort(host, port)
 }
 
+var retryBackoffHook func(time.Duration) *time.Timer
+
+func backoffNewTimer(d time.Duration) *time.Timer {
+	if retryBackoffHook != nil {
+		return retryBackoffHook(d)
+	}
+	return time.NewTimer(d)
+}
+
 // RoundTripOpt is like RoundTrip, but takes options.
 func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
 	if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
@@ -526,11 +536,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
 				}
 				backoff := float64(uint(1) << (uint(retry) - 1))
 				backoff += backoff * (0.1 * mathrand.Float64())
+				d := time.Second * time.Duration(backoff)
+				timer := backoffNewTimer(d)
 				select {
-				case <-time.After(time.Second * time.Duration(backoff)):
+				case <-timer.C:
 					t.vlogf("RoundTrip retrying after failure: %v", err)
 					continue
 				case <-req.Context().Done():
+					timer.Stop()
 					err = req.Context().Err()
 				}
 			}
@@ -1075,7 +1088,7 @@ var errRequestCanceled = errors.New("net/http: request canceled")
 func commaSeparatedTrailers(req *http.Request) (string, error) {
 	keys := make([]string, 0, len(req.Trailer))
 	for k := range req.Trailer {
-		k = http.CanonicalHeaderKey(k)
+		k = canonicalHeader(k)
 		switch k {
 		case "Transfer-Encoding", "Trailer", "Content-Length":
 			return "", fmt.Errorf("invalid Trailer key %q", k)
@@ -1612,7 +1625,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
 
 	var sawEOF bool
 	for !sawEOF {
-		n, err := body.Read(buf[:len(buf)])
+		n, err := body.Read(buf)
 		if hasContentLen {
 			remainLen -= int64(n)
 			if remainLen == 0 && err == nil {
@@ -1915,7 +1928,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
 
 	// Header list size is ok. Write the headers.
 	enumerateHeaders(func(name, value string) {
-		name, ascii := asciiToLower(name)
+		name, ascii := lowerHeader(name)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -1968,7 +1981,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
 	}
 
 	for k, vv := range trailer {
-		lowKey, ascii := asciiToLower(k)
+		lowKey, ascii := lowerHeader(k)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -2301,7 +2314,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
 		Status:     status + " " + http.StatusText(statusCode),
 	}
 	for _, hf := range regularFields {
-		key := http.CanonicalHeaderKey(hf.Name)
+		key := canonicalHeader(hf.Name)
 		if key == "Trailer" {
 			t := res.Trailer
 			if t == nil {
@@ -2309,7 +2322,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
 				res.Trailer = t
 			}
 			foreachHeaderElement(hf.Value, func(v string) {
-				t[http.CanonicalHeaderKey(v)] = nil
+				t[canonicalHeader(v)] = nil
 			})
 		} else {
 			vv := header[key]
@@ -2414,7 +2427,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
 
 	trailer := make(http.Header)
 	for _, hf := range f.RegularFields() {
-		key := http.CanonicalHeaderKey(hf.Name)
+		key := canonicalHeader(hf.Name)
 		trailer[key] = append(trailer[key], hf.Value)
 	}
 	cs.trailer = trailer
@@ -2985,7 +2998,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) {
 }
 
 func (gz *gzipReader) Close() error {
-	return gz.body.Close()
+	if err := gz.body.Close(); err != nil {
+		return err
+	}
+	gz.zerr = fs.ErrClosed
+	return nil
 }
 
 type errorReader struct{ err error }

+ 8 - 8
vendor/modules.txt

@@ -333,7 +333,7 @@ github.com/fernet/fernet-go
 github.com/fluent/fluent-logger-golang/fluent
 # github.com/fsnotify/fsnotify v1.5.1
 ## explicit; go 1.13
-# github.com/go-logr/logr v1.2.2
+# github.com/go-logr/logr v1.2.3
 ## explicit; go 1.16
 github.com/go-logr/logr
 github.com/go-logr/logr/funcr
@@ -467,8 +467,8 @@ github.com/ishidawataru/sctp
 # github.com/jmespath/go-jmespath v0.4.0
 ## explicit; go 1.14
 github.com/jmespath/go-jmespath
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.15.12
+## explicit; go 1.17
 github.com/klauspost/compress
 github.com/klauspost/compress/fse
 github.com/klauspost/compress/huff0
@@ -712,7 +712,7 @@ github.com/opencontainers/go-digest/digestset
 github.com/opencontainers/image-spec/identity
 github.com/opencontainers/image-spec/specs-go
 github.com/opencontainers/image-spec/specs-go/v1
-# github.com/opencontainers/runc v1.1.2
+# github.com/opencontainers/runc v1.1.3
 ## explicit; go 1.16
 github.com/opencontainers/runc/libcontainer/cgroups
 github.com/opencontainers/runc/libcontainer/configs
@@ -739,12 +739,12 @@ github.com/philhofer/fwd
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
-# github.com/prometheus/client_golang v1.13.0
+# github.com/prometheus/client_golang v1.14.0
 ## explicit; go 1.17
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/internal
 github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.2.0
+# github.com/prometheus/client_model v0.3.0
 ## explicit; go 1.9
 github.com/prometheus/client_model/go
 # github.com/prometheus/common v0.37.0
@@ -910,7 +910,7 @@ go.uber.org/zap/internal/bufferpool
 go.uber.org/zap/internal/color
 go.uber.org/zap/internal/exit
 go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.1.0
+# golang.org/x/crypto v0.2.0
 ## explicit; go 1.17
 golang.org/x/crypto/blowfish
 golang.org/x/crypto/chacha20
@@ -930,7 +930,7 @@ golang.org/x/crypto/pkcs12/internal/rc2
 golang.org/x/crypto/salsa20/salsa
 golang.org/x/crypto/ssh
 golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-# golang.org/x/net v0.1.0
+# golang.org/x/net v0.2.0
 ## explicit; go 1.17
 golang.org/x/net/bpf
 golang.org/x/net/context