Pārlūkot izejas kodu

vendor: github.com/prometheus/client_golang v1.14.0

full diff: https://github.com/prometheus/client_golang/compare/v1.13.0...v1.14.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit dc52e08bb5719885b89205cf9f7de690b9786ac5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn 2 gadi atpakaļ
vecāks
revīzija
0163808dbe

+ 2 - 2
vendor.mod

@@ -69,7 +69,7 @@ require (
 	github.com/opencontainers/selinux v1.10.2
 	github.com/pelletier/go-toml v1.9.4
 	github.com/pkg/errors v0.9.1
-	github.com/prometheus/client_golang v1.13.0
+	github.com/prometheus/client_golang v1.14.0
 	github.com/rootless-containers/rootlesskit v1.1.0
 	github.com/sirupsen/logrus v1.9.0
 	github.com/spf13/cobra v1.6.1
@@ -135,7 +135,7 @@ require (
 	github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
 	github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
 	github.com/philhofer/fwd v1.0.0 // indirect
-	github.com/prometheus/client_model v0.2.0 // indirect
+	github.com/prometheus/client_model v0.3.0 // indirect
 	github.com/prometheus/common v0.37.0 // indirect
 	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/rexray/gocsi v1.2.2 // indirect

+ 4 - 3
vendor.sum

@@ -936,14 +936,15 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
-github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
 github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=

+ 7 - 4
vendor/github.com/prometheus/client_golang/prometheus/counter.go

@@ -140,12 +140,13 @@ func (c *counter) get() float64 {
 }
 
 func (c *counter) Write(out *dto.Metric) error {
-	val := c.get()
-
+	// Read the Exemplar first and the value second. This is to avoid a race condition
+	// where users see an exemplar for a not-yet-existing observation.
 	var exemplar *dto.Exemplar
 	if e := c.exemplar.Load(); e != nil {
 		exemplar = e.(*dto.Exemplar)
 	}
+	val := c.get()
 
 	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
 }
@@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
 	c, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *CounterVec) With(labels Labels) Counter {
 	c, err := v.GetMetricWith(labels)
 	if err != nil {

+ 59 - 48
vendor/github.com/prometheus/client_golang/prometheus/doc.go

@@ -21,55 +21,66 @@
 // All exported functions and methods are safe to be used concurrently unless
 // specified otherwise.
 //
-// A Basic Example
+// # A Basic Example
 //
 // As a starting point, a very basic usage example:
 //
-//    package main
-//
-//    import (
-//    	"log"
-//    	"net/http"
-//
-//    	"github.com/prometheus/client_golang/prometheus"
-//    	"github.com/prometheus/client_golang/prometheus/promhttp"
-//    )
-//
-//    var (
-//    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-//    		Name: "cpu_temperature_celsius",
-//    		Help: "Current temperature of the CPU.",
-//    	})
-//    	hdFailures = prometheus.NewCounterVec(
-//    		prometheus.CounterOpts{
-//    			Name: "hd_errors_total",
-//    			Help: "Number of hard-disk errors.",
-//    		},
-//    		[]string{"device"},
-//    	)
-//    )
-//
-//    func init() {
-//    	// Metrics have to be registered to be exposed:
-//    	prometheus.MustRegister(cpuTemp)
-//    	prometheus.MustRegister(hdFailures)
-//    }
-//
-//    func main() {
-//    	cpuTemp.Set(65.3)
-//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-//    	// The Handler function provides a default handler to expose metrics
-//    	// via an HTTP server. "/metrics" is the usual endpoint for that.
-//    	http.Handle("/metrics", promhttp.Handler())
-//    	log.Fatal(http.ListenAndServe(":8080", nil))
-//    }
-//
+//	package main
+//
+//	import (
+//		"log"
+//		"net/http"
+//
+//		"github.com/prometheus/client_golang/prometheus"
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
+//	)
+//
+//	type metrics struct {
+//		cpuTemp  prometheus.Gauge
+//	  hdFailures *prometheus.CounterVec
+//	}
+//
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
+//	  m := &metrics{
+//	    cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+//	      Name: "cpu_temperature_celsius",
+//	      Help: "Current temperature of the CPU.",
+//	    }),
+//	    hdFailures: prometheus.NewCounterVec(
+//	      prometheus.CounterOpts{
+//	        Name: "hd_errors_total",
+//	        Help: "Number of hard-disk errors.",
+//	      },
+//	      []string{"device"},
+//	    ),
+//	  }
+//	  reg.MustRegister(m.cpuTemp)
+//	  reg.MustRegister(m.hdFailures)
+//	  return m
+//	}
+//
+//	func main() {
+//	  // Create a non-global registry.
+//	  reg := prometheus.NewRegistry()
+//
+//	  // Create new metrics and register them using the custom registry.
+//	  m := NewMetrics(reg)
+//	  // Set values for the new created metrics.
+//		m.cpuTemp.Set(65.3)
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+//		// Expose metrics and custom registry via an HTTP server
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+//		log.Fatal(http.ListenAndServe(":8080", nil))
+//	}
 //
 // This is a complete program that exports two metrics, a Gauge and a Counter,
 // the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
 //
-// Metrics
+// # Metrics
 //
 // The number of exported identifiers in this package might appear a bit
 // overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
 // To create instances of Metrics and their vector versions, you need a suitable
 // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
 //
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
 //
 // While you could create your own implementations of Metric, most likely you
 // will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
 // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
 // shortcuts.
 //
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
 //
 // While MustRegister is the by far most common way of registering a Collector,
 // sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
 // NewProcessCollector). With a custom registry, you are in control and decide
 // yourself about the Collectors to register.
 //
-// HTTP Exposition
+// # HTTP Exposition
 //
 // The Registry implements the Gatherer interface. The caller of the Gather
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // are served via HTTP on the /metrics endpoint. That's happening in the example
 // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
 //
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
 //
 // Function for pushing to the Pushgateway can be found in the push sub-package.
 //
-// Graphite Bridge
+// # Graphite Bridge
 //
 // Functions and examples to push metrics from a Gatherer to Graphite can be
 // found in the graphite sub-package.
 //
-// Other Means of Exposition
+// # Other Means of Exposition
 //
 // More ways of exposing metrics can easily be added by following the approaches
 // of the existing implementations.

+ 4 - 2
vendor/github.com/prometheus/client_golang/prometheus/gauge.go

@@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Add(42)
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
 func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
 	g, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
 func (v *GaugeVec) With(labels Labels) Gauge {
 	g, err := v.GetMetricWith(labels)
 	if err != nil {

+ 896 - 82
vendor/github.com/prometheus/client_golang/prometheus/histogram.go

@@ -28,19 +28,216 @@ import (
 	dto "github.com/prometheus/client_model/go"
 )
 
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// 	// Populate nativeHistogramBounds.
+// 	numBuckets := 1
+// 	for i := range nativeHistogramBounds {
+// 		bounds := []float64{0.5}
+// 		factor := math.Exp2(math.Exp2(float64(-i)))
+// 		for j := 0; j < numBuckets-1; j++ {
+// 			var bound float64
+// 			if (j+1)%2 == 0 {
+// 				// Use previously calculated value for increased precision.
+// 				bound = nativeHistogramBounds[i-1][j/2+1]
+// 			} else {
+// 				bound = bounds[j] * factor
+// 			}
+// 			bounds = append(bounds, bound)
+// 		}
+// 		numBuckets *= 2
+// 		nativeHistogramBounds[i] = bounds
+// 	}
+// }
+
 // A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
 //
 // On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
 //
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
 //
 // To create Histogram instances, use NewHistogram.
 type Histogram interface {
@@ -50,7 +247,8 @@ type Histogram interface {
 	// Observe adds a single observation to the histogram. Observations are
 	// usually positive or zero. Negative observations are accepted but
 	// prevent current versions of Prometheus from properly detecting
-	// counter resets in the sum of observations. See
+	// counter resets in the sum of observations. (The experimental Native
+	// Histograms handle negative observations properly.) See
 	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
 	// for details.
 	Observe(float64)
@@ -64,18 +262,28 @@ const bucketLabel = "le"
 // tailored to broadly measure the response time (in seconds) of a network
 // service. Most likely, however, you will be required to define buckets
 // customized to your use case.
-var (
-	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
 
-	errBucketLabelNotAllowed = fmt.Errorf(
-		"%q is not allowed as label name in histograms", bucketLabel,
-	)
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in histograms", bucketLabel,
 )
 
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is zero or negative.
 func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 {
 	return buckets
 }
 
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
 //
 // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
 // or if 'factor' is less than or equal 1.
@@ -180,8 +388,85 @@ type HistogramOpts struct {
 	// element in the slice is the upper inclusive bound of a bucket. The
 	// values must be sorted in strictly increasing order. There is no need
 	// to add a highest bucket with +Inf bound, it will be added
-	// implicitly. The default value is DefBuckets.
+	// implicitly. If Buckets is left as nil or set to a slice of length
+	// zero, it is replaced by default buckets. The default buckets are
+	// DefBuckets if no buckets for a native histogram (see below) are used,
+	// otherwise the default is no buckets. (In other words, if you want to
+	// use both reguler buckets and buckets for a native histogram, you have
+	// to define the regular buckets here explicitly.)
 	Buckets []float64
+
+	// If NativeHistogramBucketFactor is greater than one, so-called sparse
+	// buckets are used (in addition to the regular buckets, if defined
+	// above). A Histogram with sparse buckets will be ingested as a Native
+	// Histogram by a Prometheus server with that feature enabled (requires
+	// Prometheus v2.40+). Sparse buckets are exponential buckets covering
+	// the whole float64 range (with the exception of the “zero” bucket, see
+	// SparseBucketsZeroThreshold below). From any one bucket to the next,
+	// the width of the bucket grows by a constant
+	// factor. NativeHistogramBucketFactor provides an upper bound for this
+	// factor (exception see below). The smaller
+	// NativeHistogramBucketFactor, the more buckets will be used and thus
+	// the more costly the histogram will become. A generally good trade-off
+	// between cost and accuracy is a value of 1.1 (each bucket is at most
+	// 10% wider than the previous one), which will result in each power of
+	// two divided into 8 buckets (e.g. there will be 8 buckets between 1
+	// and 2, same as between 2 and 4, and 4 and 8, etc.).
+	//
+	// Details about the actually used factor: The factor is calculated as
+	// 2^(2^n), where n is an integer number between (and including) -8 and
+	// 4. n is chosen so that the resulting factor is the largest that is
+	// still smaller or equal to NativeHistogramBucketFactor. Note that the
+	// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+	// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+	// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+	// it is larger than the provided NativeHistogramBucketFactor.
+	//
+	// NOTE: Native Histograms are still an experimental feature. Their
+	// behavior might still change without a major version
+	// bump. Subsequently, all NativeHistogram... options here might still
+	// change their behavior or name (or might completely disappear) without
+	// a major version bump.
+	NativeHistogramBucketFactor float64
+	// All observations with an absolute value of less or equal
+	// NativeHistogramZeroThreshold are accumulated into a “zero”
+	// bucket. For best results, this should be close to a bucket
+	// boundary. This is usually the case if picking a power of two. If
+	// NativeHistogramZeroThreshold is left at zero,
+	// DefSparseBucketsZeroThreshold is used as the threshold. To configure
+	// a zero bucket with an actual threshold of zero (i.e. only
+	// observations of precisely zero will go into the zero bucket), set
+	// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+	// constant (or any negative float value).
+	NativeHistogramZeroThreshold float64
+
+	// The remaining fields define a strategy to limit the number of
+	// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+	// at zero, the number of buckets is not limited. (Note that this might
+	// lead to unbounded memory consumption if the values observed by the
+	// Histogram are sufficiently wide-spread. In particular, this could be
+	// used as a DoS attack vector. Where the observed values depend on
+	// external inputs, it is highly recommended to set a
+	// NativeHistogramMaxBucketNumber.)  Once the set
+	// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+	// enacted: First, if the last reset (or the creation) of the histogram
+	// is at least NativeHistogramMinResetDuration ago, then the whole
+	// histogram is reset to its initial state (including regular
+	// buckets). If less time has passed, or if
+	// NativeHistogramMinResetDuration is zero, no reset is
+	// performed. Instead, the zero threshold is increased sufficiently to
+	// reduce the number of buckets to or below
+	// NativeHistogramMaxBucketNumber, but not to more than
+	// NativeHistogramMaxZeroThreshold. Thus, if
+	// NativeHistogramMaxZeroThreshold is already at or below the current
+	// zero threshold, nothing happens at this step. After that, if the
+	// number of buckets still exceeds NativeHistogramMaxBucketNumber, the
+	// resolution of the histogram is reduced by doubling the width of the
+	// sparse buckets (up to a growth factor between one bucket to the next
+	// of 2^(2^4) = 65536, see above).
+	NativeHistogramMaxBucketNumber  uint32
+	NativeHistogramMinResetDuration time.Duration
+	NativeHistogramMaxZeroThreshold float64
 }
 
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		}
 	}
 
-	if len(opts.Buckets) == 0 {
-		opts.Buckets = DefBuckets
-	}
-
 	h := &histogram{
-		desc:        desc,
-		upperBounds: opts.Buckets,
-		labelPairs:  MakeLabelPairs(desc, labelValues),
-		counts:      [2]*histogramCounts{{}, {}},
-		now:         time.Now,
+		desc:                            desc,
+		upperBounds:                     opts.Buckets,
+		labelPairs:                      MakeLabelPairs(desc, labelValues),
+		nativeHistogramMaxBuckets:       opts.NativeHistogramMaxBucketNumber,
+		nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+		lastResetTime:                   time.Now(),
+		now:                             time.Now,
+	}
+	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+		h.upperBounds = DefBuckets
+	}
+	if opts.NativeHistogramBucketFactor <= 1 {
+		h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+	} else {
+		switch {
+		case opts.NativeHistogramZeroThreshold > 0:
+			h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+		case opts.NativeHistogramZeroThreshold == 0:
+			h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+		} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+		h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
 	}
 	for i, upperBound := range h.upperBounds {
 		if i < len(h.upperBounds)-1 {
@@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 	}
 	// Finally we know the final length of h.upperBounds and can make buckets
 	// for both counts as well as exemplars:
-	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
-	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+	h.counts[0] = &histogramCounts{
+		buckets:                          make([]uint64, len(h.upperBounds)),
+		nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+		nativeHistogramSchema:            h.nativeHistogramSchema,
+	}
+	h.counts[1] = &histogramCounts{
+		buckets:                          make([]uint64, len(h.upperBounds)),
+		nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+		nativeHistogramSchema:            h.nativeHistogramSchema,
+	}
 	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
 
 	h.init(h) // Init self-collection.
@@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 }
 
 type histogramCounts struct {
+	// Order in this struct matters for the alignment required by atomic
+	// operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
 	// sumBits contains the bits of the float64 representing the sum of all
-	// observations. sumBits and count have to go first in the struct to
-	// guarantee alignment for atomic operations.
-	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	// observations.
 	sumBits uint64
 	count   uint64
+
+	// nativeHistogramZeroBucket counts all (positive and negative)
+	// observations in the zero bucket (with an absolute value less or equal
+	// the current threshold, see next field.
+	nativeHistogramZeroBucket uint64
+	// nativeHistogramZeroThresholdBits is the bit pattern of the current
+	// threshold for the zero bucket. It's initially equal to
+	// nativeHistogramZeroThreshold but may change according to the bucket
+	// count limitation strategy.
+	nativeHistogramZeroThresholdBits uint64
+	// nativeHistogramSchema may change over time according to the bucket
+	// count limitation strategy and therefore has to be saved here.
+	nativeHistogramSchema int32
+	// Number of (positive and negative) sparse buckets.
+	nativeHistogramBucketsNumber uint32
+
+	// Regular buckets.
 	buckets []uint64
+
+	// The sparse buckets for native histograms are implemented with a
+	// sync.Map for now. A dedicated data structure will likely be more
+	// efficient. There are separate maps for negative and positive
+	// observations. The map's value is an *int64, counting observations in
+	// that bucket. (Note that we don't use uint64 as an int64 won't
+	// overflow in practice, and working with signed numbers from the
+	// beginning simplifies the handling of deltas.) The map's key is the
+	// index of the bucket according to the used
+	// nativeHistogramSchema. Index 0 is for an upper bound of 1.
+	nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+	if bucket < len(hc.buckets) {
+		atomic.AddUint64(&hc.buckets[bucket], 1)
+	}
+	atomicAddFloat(&hc.sumBits, v)
+	if doSparse && !math.IsNaN(v) {
+		var (
+			key                  int
+			schema               = atomic.LoadInt32(&hc.nativeHistogramSchema)
+			zeroThreshold        = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+			bucketCreated, isInf bool
+		)
+		if math.IsInf(v, 0) {
+			// Pretend v is MaxFloat64 but later increment key by one.
+			if math.IsInf(v, +1) {
+				v = math.MaxFloat64
+			} else {
+				v = -math.MaxFloat64
+			}
+			isInf = true
+		}
+		frac, exp := math.Frexp(math.Abs(v))
+		if schema > 0 {
+			bounds := nativeHistogramBounds[schema]
+			key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+		} else {
+			key = exp
+			if frac == 0.5 {
+				key--
+			}
+			div := 1 << -schema
+			key = (key + div - 1) / div
+		}
+		if isInf {
+			key++
+		}
+		switch {
+		case v > zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+		case v < -zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+		default:
+			atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+		}
+		if bucketCreated {
+			atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hc.count, 1)
 }
 
 type histogram struct {
@@ -276,7 +667,7 @@ type histogram struct {
 	// perspective of the histogram) swap the hot–cold under the writeMtx
 	// lock. A cooldown is awaited (while locked) by comparing the number of
 	// observations with the initiation count. Once they match, then the
-	// last observation on the now cool one has completed. All cool fields must
+	// last observation on the now cool one has completed. All cold fields must
 	// be merged into the new hot before releasing writeMtx.
 	//
 	// Fields with atomic access first! See alignment constraint:
@@ -284,8 +675,10 @@ type histogram struct {
 	countAndHotIdx uint64
 
 	selfCollector
-	desc     *Desc
-	writeMtx sync.Mutex // Only used in the Write method.
+	desc *Desc
+
+	// Only used in the Write method and for sparse bucket management.
+	mtx sync.Mutex
 
 	// Two counts, one is "hot" for lock-free observations, the other is
 	// "cold" for writing out a dto.Metric. It has to be an array of
@@ -293,9 +686,15 @@ type histogram struct {
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
 	counts [2]*histogramCounts
 
-	upperBounds []float64
-	labelPairs  []*dto.LabelPair
-	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	upperBounds                     []float64
+	labelPairs                      []*dto.LabelPair
+	exemplars                       []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	nativeHistogramSchema           int32          // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+	nativeHistogramZeroThreshold    float64        // The initial zero threshold.
+	nativeHistogramMaxZeroThreshold float64
+	nativeHistogramMaxBuckets       uint32
+	nativeHistogramMinResetDuration time.Duration
+	lastResetTime                   time.Time // Protected by mtx.
 
 	now func() time.Time // To mock out time.Now() for testing.
 }
@@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error {
 	// the hot path, i.e. Observe is called much more often than Write. The
 	// complication of making Write lock-free isn't worth it, if possible at
 	// all.
-	h.writeMtx.Lock()
-	defer h.writeMtx.Unlock()
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
 
 	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
 	// without touching the count bits. See the struct comments for a full
@@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error {
 	hotCounts := h.counts[n>>63]
 	coldCounts := h.counts[(^n)>>63]
 
-	// Await cooldown.
-	for count != atomic.LoadUint64(&coldCounts.count) {
-		runtime.Gosched() // Let observations get work done.
-	}
+	waitForCooldown(count, coldCounts)
 
 	his := &dto.Histogram{
 		Bucket:      make([]*dto.Bucket, len(h.upperBounds)),
 		SampleCount: proto.Uint64(count),
 		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
 	}
+	out.Histogram = his
+	out.Label = h.labelPairs
+
 	var cumCount uint64
 	for i, upperBound := range h.upperBounds {
 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error {
 		}
 		his.Bucket = append(his.Bucket, b)
 	}
-
-	out.Histogram = his
-	out.Label = h.labelPairs
-
-	// Finally add all the cold counts to the new hot counts and reset the cold counts.
-	atomic.AddUint64(&hotCounts.count, count)
-	atomic.StoreUint64(&coldCounts.count, 0)
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			atomic.StoreUint64(&coldCounts.sumBits, 0)
-			break
-		}
-	}
-	for i := range h.upperBounds {
-		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
-		atomic.StoreUint64(&coldCounts.buckets[i], 0)
+	if h.nativeHistogramSchema > math.MinInt32 {
+		his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+		his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+		zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+		defer func() {
+			coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+			coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+		}()
+
+		his.ZeroCount = proto.Uint64(zeroBucket)
+		his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+		his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
 	}
+	addAndResetCounts(hotCounts, coldCounts)
 	return nil
 }
 
@@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int {
 
 // observe is the implementation for Observe without the findBucket part.
 func (h *histogram) observe(v float64, bucket int) {
+	// Do not add to sparse buckets for NaN observations.
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
 	// We increment h.countAndHotIdx so that the counter in the lower
 	// 63 bits gets incremented. At the same time, we get the new value
 	// back, which we can use to find the currently-hot counts.
 	n := atomic.AddUint64(&h.countAndHotIdx, 1)
 	hotCounts := h.counts[n>>63]
+	hotCounts.observe(v, bucket, doSparse)
+	if doSparse {
+		h.limitBuckets(hotCounts, v, bucket)
+	}
+}
 
-	if bucket < len(h.upperBounds) {
-		atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+	if h.nativeHistogramMaxBuckets == 0 {
+		return // No limit configured.
 	}
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			break
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded yet.
+	}
+
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// The hot counts might have been swapped just before we acquired the
+	// lock. Re-fetch the hot counts first...
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hotCounts := h.counts[hotIdx]
+	coldCounts := h.counts[coldIdx]
+	// ...and then check again if we really have to reduce the bucket count.
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded after all.
+	}
+	// Try the various strategies in order.
+	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+		return
+	}
+	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+		return
+	}
+	h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
+// has been passed. It returns true if the histogram has been reset. The caller
+// must have locked h.mtx.
+func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
+	// We are using the possibly mocked h.now() rather than
+	// time.Since(h.lastResetTime) to enable testing.
+	if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+		return false
+	}
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Repeat the latest observation to not lose it completely.
+	cold.observe(value, bucket, true)
+	// Make coldCounts the new hot counts while ressetting countAndHotIdx.
+	n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	return true
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+	currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+	if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+		return false
+	}
+	// Find the key of the bucket closest to zero.
+	smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+	smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+	if smallestNegativeKey < smallestKey {
+		smallestKey = smallestNegativeKey
+	}
+	if smallestKey == math.MaxInt32 {
+		return false
+	}
+	newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+	if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+		return false // New threshold would exceed the max threshold.
+	}
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// Remove applicable buckets.
+	if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	// Make cold counts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the new zero threshold in the cold counts, too...
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// ...and then merge the newly deleted buckets into the wider zero
+	// bucket.
+	mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			if key == smallestKey {
+				// Merge into hot zero bucket...
+				atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+				// ...and delete from cold counts.
+				coldBuckets.Delete(key)
+				atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+			} else {
+				// Add to corresponding hot bucket...
+				if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+					atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+				}
+				// ...and reset cold bucket.
+				atomic.StoreInt64(bucket, 0)
+			}
+			return true
 		}
 	}
-	// Increment count last as we take it as a signal that the observation
-	// is complete.
-	atomic.AddUint64(&hotCounts.count, 1)
+
+	cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+	return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+	coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+	if coldSchema == -4 {
+		return // Already at lowest resolution.
+	}
+	coldSchema--
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// Play it simple and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+	// Make coldCounts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the schema in the cold counts, too...
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// ...and then merge the cold buckets into the wider hot buckets.
+	merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			// Adjust key to match the bucket to merge into.
+			if key > 0 {
+				key++
+			}
+			key /= 2
+			// Add to corresponding hot bucket.
+			if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+				atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+	// Play it simple again and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+	atomic.StoreUint64(&counts.sumBits, 0)
+	atomic.StoreUint64(&counts.count, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+	atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+	for i := range h.upperBounds {
+		atomic.StoreUint64(&counts.buckets[i], 0)
+	}
+	deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+	deleteSyncMap(&counts.nativeHistogramBucketsPositive)
 }
 
 // updateExemplar replaces the exemplar for the provided bucket. With empty
@@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
 	h, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
 
 // With works as GetMetricWith but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *HistogramVec) With(labels Labels) Observer {
 	h, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
 // to send it to Prometheus in the Collect method.
 //
 // buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
 //
 // NewConstHistogram returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.
@@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) {
 func (s buckSort) Less(i, j int) bool {
 	return s[i].GetUpperBound() < s[j].GetUpperBound()
 }
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+//   - bucketFactor <= 1: panics.
+//   - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+	if bucketFactor <= 1 {
+		panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+	}
+	floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+	switch {
+	case floor <= -8:
+		return 8
+	case floor >= 4:
+		return -4
+	default:
+		return -int32(floor)
+	}
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+	var ii []int
+	buckets.Range(func(k, v interface{}) bool {
+		ii = append(ii, k.(int))
+		return true
+	})
+	sort.Ints(ii)
+
+	if len(ii) == 0 {
+		return nil, nil
+	}
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		v, _ := buckets.Load(i)
+		count := atomic.LoadInt64(v.(*int64))
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one ore two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+	if existingBucket, ok := buckets.Load(key); ok {
+		// Fast path without allocation.
+		atomic.AddInt64(existingBucket.(*int64), increment)
+		return false
+	}
+	// Bucket doesn't exist yet. Slow path allocating new counter.
+	newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+	if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+		// The bucket was created concurrently in another goroutine.
+		// Have to increment after all.
+		atomic.AddInt64(actualBucket.(*int64), increment)
+		return false
+	}
+	return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+	return func(k, v interface{}) bool {
+		bucket := v.(*int64)
+		if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+			atomic.AddUint32(bucketNumber, 1)
+		}
+		atomic.StoreInt64(bucket, 0)
+		return true
+	}
+}
+
+func deleteSyncMap(m *sync.Map) {
+	m.Range(func(k, v interface{}) bool {
+		m.Delete(k)
+		return true
+	})
+}
+
+func findSmallestKey(m *sync.Map) int {
+	result := math.MaxInt32
+	m.Range(func(k, v interface{}) bool {
+		key := k.(int)
+		if key < result {
+			result = key
+		}
+		return true
+	})
+	return result
+}
+
+func getLe(key int, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with a key
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its key
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := key << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := key & ((1 << schema) - 1)
+	frac := nativeHistogramBounds[schema][fracIdx]
+	exp := (key >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+	for count != atomic.LoadUint64(&counts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+	for {
+		loadedBits := atomic.LoadUint64(bits)
+		newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+		if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+			break
+		}
+	}
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to.  See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+	atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+	atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+	atomic.StoreUint64(&cold.count, 0)
+	coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+	atomicAddFloat(&hot.sumBits, coldSum)
+	atomic.StoreUint64(&cold.sumBits, 0)
+	for i := range hot.buckets {
+		atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+		atomic.StoreUint64(&cold.buckets[i], 0)
+	}
+	atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+	atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}

+ 60 - 0
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go

@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+	"math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+	if a == b {
+		return true
+	}
+	absA := math.Abs(a)
+	absB := math.Abs(b)
+	diff := math.Abs(a - b)
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+		return diff < epsilon*minNormalFloat64
+	}
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := range a {
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+			return false
+		}
+	}
+	return true
+}

+ 8 - 5
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go

@@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
 // If IsJunk is not defined:
 //
 // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-//     alo <= i <= i+k <= ahi
-//     blo <= j <= j+k <= bhi
+//
+//	alo <= i <= i+k <= ahi
+//	blo <= j <= j+k <= bhi
+//
 // and for all (i',j',k') meeting those conditions,
-//     k >= k'
-//     i <= i'
-//     and if i == i', j <= j'
+//
+//	k >= k'
+//	i <= i'
+//	and if i == i', j <= j'
 //
 // In other words, of all maximal matching blocks, return one that
 // starts earliest in a, and of all those maximal matching blocks that

+ 2 - 1
vendor/github.com/prometheus/client_golang/prometheus/labels.go

@@ -25,7 +25,8 @@ import (
 // Labels represents a collection of label name -> value mappings. This type is
 // commonly used with the With(Labels) and GetMetricWith(Labels) methods of
 // metric vector Collectors, e.g.:
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
 //
 // The other use-case is the specification of constant label pairs in Opts or to
 // create a Desc.

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/metric.go

@@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
 			} else {
 				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
 				b := &dto.Bucket{
-					CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
+					CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
 					UpperBound:      proto.Float64(math.Inf(1)),
 					Exemplar:        e,
 				}

+ 2 - 3
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go

@@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
 	return func(r *http.Request) (*http.Response, error) {
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			exemplarAdd(
+			addWithExemplar(
 				counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
 				1,
 				rtOpts.getExemplarFn(r.Context()),
 			)
-			counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
 		}
 		return resp, err
 	}
@@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
 		start := time.Now()
 		resp, err := next.RoundTrip(r)
 		if err == nil {
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
 				time.Since(start).Seconds(),
 				rtOpts.getExemplarFn(r.Context()),

+ 14 - 10
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go

@@ -28,7 +28,9 @@ import (
 // magicString is used for the hacky label test in checkLabels. Remove once fixed.
 const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
 
-func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
 	if labels == nil {
 		obs.Observe(val)
 		return
@@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str
 	obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
 }
 
-func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
 	if labels == nil {
 		obs.Add(val)
 		return
@@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				time.Since(now).Seconds(),
 				hOpts.getExemplarFn(r.Context()),
@@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
 		now := time.Now()
 		next.ServeHTTP(w, r)
 
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			time.Since(now).Seconds(),
 			hOpts.getExemplarFn(r.Context()),
@@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 
-			exemplarAdd(
+			addWithExemplar(
 				counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				1,
 				hOpts.getExemplarFn(r.Context()),
@@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
 
 	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
-		exemplarAdd(
+		addWithExemplar(
 			counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			1,
 			hOpts.getExemplarFn(r.Context()),
@@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
 	return func(w http.ResponseWriter, r *http.Request) {
 		now := time.Now()
 		d := newDelegator(w, func(status int) {
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
 				time.Since(now).Seconds(),
 				hOpts.getExemplarFn(r.Context()),
@@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
 			d := newDelegator(w, nil)
 			next.ServeHTTP(d, r)
 			size := computeApproximateRequestSize(r)
-			exemplarObserve(
+			observeWithExemplar(
 				obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 				float64(size),
 				hOpts.getExemplarFn(r.Context()),
@@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
 	return func(w http.ResponseWriter, r *http.Request) {
 		next.ServeHTTP(w, r)
 		size := computeApproximateRequestSize(r)
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
 			float64(size),
 			hOpts.getExemplarFn(r.Context()),
@@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		d := newDelegator(w, nil)
 		next.ServeHTTP(d, r)
-		exemplarObserve(
+		observeWithExemplar(
 			obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
 			float64(d.Written()),
 			hOpts.getExemplarFn(r.Context()),

+ 31 - 3
vendor/github.com/prometheus/client_golang/prometheus/registry.go

@@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error {
 }
 
 // Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
 type Registry struct {
 	mtx                   sync.RWMutex
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
@@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
 
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	// Only report the checked Collectors; unchecked collectors don't report any
+	// Desc.
+	for _, c := range r.collectorsByID {
+		c.Describe(ch)
+	}
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	for _, c := range r.collectorsByID {
+		c.Collect(ch)
+	}
+	for _, c := range r.uncheckedCollectors {
+		c.Collect(ch)
+	}
+}
+
 // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
 // Prometheus text format, and writes it to a temporary file. Upon success, the
 // temporary file is renamed to the provided filename.

+ 6 - 3
vendor/github.com/prometheus/client_golang/prometheus/summary.go

@@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
 // GetMetricWithLabelValues would have returned an error. Not returning an
 // error allows shortcuts like
-//     myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
 func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
 	s, err := v.GetMetricWithLabelValues(lvs...)
 	if err != nil {
@@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
 // returned an error. Not returning an error allows shortcuts like
-//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
 func (v *SummaryVec) With(labels Labels) Observer {
 	s, err := v.GetMetricWith(labels)
 	if err != nil {
@@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error {
 //
 // quantiles maps ranks to quantile values. For example, a median latency of
 // 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-//     map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
 //
 // NewConstSummary returns an error if the length of labelValues is not
 // consistent with the variable labels in Desc or if Desc is invalid.

+ 6 - 5
vendor/github.com/prometheus/client_golang/prometheus/timer.go

@@ -25,11 +25,12 @@ type Timer struct {
 // NewTimer creates a new Timer. The provided Observer is used to observe a
 // duration in seconds. Timer is usually used to time a function call in the
 // following way:
-//    func TimeMe() {
-//        timer := NewTimer(myHistogram)
-//        defer timer.ObserveDuration()
-//        // Do actual work.
-//    }
+//
+//	func TimeMe() {
+//	    timer := NewTimer(myHistogram)
+//	    defer timer.ObserveDuration()
+//	    // Do actual work.
+//	}
 func NewTimer(o Observer) *Timer {
 	return &Timer{
 		begin:    time.Now(),

+ 262 - 71
vendor/github.com/prometheus/client_model/go/metrics.pb.go

@@ -1,5 +1,5 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
+// source: io/prometheus/client/metrics.proto
 
 package io_prometheus_client
 
@@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 type MetricType int32
 
 const (
-	MetricType_COUNTER   MetricType = 0
-	MetricType_GAUGE     MetricType = 1
-	MetricType_SUMMARY   MetricType = 2
-	MetricType_UNTYPED   MetricType = 3
+	// COUNTER must use the Metric field "counter".
+	MetricType_COUNTER MetricType = 0
+	// GAUGE must use the Metric field "gauge".
+	MetricType_GAUGE MetricType = 1
+	// SUMMARY must use the Metric field "summary".
+	MetricType_SUMMARY MetricType = 2
+	// UNTYPED must use the Metric field "untyped".
+	MetricType_UNTYPED MetricType = 3
+	// HISTOGRAM must use the Metric field "histogram".
 	MetricType_HISTOGRAM MetricType = 4
+	// GAUGE_HISTOGRAM must use the Metric field "histogram".
+	MetricType_GAUGE_HISTOGRAM MetricType = 5
 )
 
 var MetricType_name = map[int32]string{
@@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{
 	2: "SUMMARY",
 	3: "UNTYPED",
 	4: "HISTOGRAM",
+	5: "GAUGE_HISTOGRAM",
 }
 
 var MetricType_value = map[string]int32{
-	"COUNTER":   0,
-	"GAUGE":     1,
-	"SUMMARY":   2,
-	"UNTYPED":   3,
-	"HISTOGRAM": 4,
+	"COUNTER":         0,
+	"GAUGE":           1,
+	"SUMMARY":         2,
+	"UNTYPED":         3,
+	"HISTOGRAM":       4,
+	"GAUGE_HISTOGRAM": 5,
 }
 
 func (x MetricType) Enum() *MetricType {
@@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
 }
 
 func (MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return fileDescriptor_d1e5ddb18987a258, []int{0}
 }
 
 type LabelPair struct {
@@ -82,7 +91,7 @@ func (m *LabelPair) Reset()         { *m = LabelPair{} }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (*LabelPair) ProtoMessage()    {}
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{0}
+	return fileDescriptor_d1e5ddb18987a258, []int{0}
 }
 
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
@@ -128,7 +137,7 @@ func (m *Gauge) Reset()         { *m = Gauge{} }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (*Gauge) ProtoMessage()    {}
 func (*Gauge) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{1}
+	return fileDescriptor_d1e5ddb18987a258, []int{1}
 }
 
 func (m *Gauge) XXX_Unmarshal(b []byte) error {
@@ -168,7 +177,7 @@ func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (*Counter) ProtoMessage()    {}
 func (*Counter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{2}
+	return fileDescriptor_d1e5ddb18987a258, []int{2}
 }
 
 func (m *Counter) XXX_Unmarshal(b []byte) error {
@@ -215,7 +224,7 @@ func (m *Quantile) Reset()         { *m = Quantile{} }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (*Quantile) ProtoMessage()    {}
 func (*Quantile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{3}
+	return fileDescriptor_d1e5ddb18987a258, []int{3}
 }
 
 func (m *Quantile) XXX_Unmarshal(b []byte) error {
@@ -263,7 +272,7 @@ func (m *Summary) Reset()         { *m = Summary{} }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (*Summary) ProtoMessage()    {}
 func (*Summary) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{4}
+	return fileDescriptor_d1e5ddb18987a258, []int{4}
 }
 
 func (m *Summary) XXX_Unmarshal(b []byte) error {
@@ -316,7 +325,7 @@ func (m *Untyped) Reset()         { *m = Untyped{} }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (*Untyped) ProtoMessage()    {}
 func (*Untyped) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{5}
+	return fileDescriptor_d1e5ddb18987a258, []int{5}
 }
 
 func (m *Untyped) XXX_Unmarshal(b []byte) error {
@@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 {
 }
 
 type Histogram struct {
-	SampleCount          *uint64   `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
-	SampleSum            *float64  `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
-	Bucket               []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	SampleCount      *uint64  `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+	SampleSum        *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	// Buckets for the conventional histogram.
+	Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+	// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+	// then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+	// In the future, more bucket schemas may be added using numbers < -4 or > 8.
+	Schema         *int32   `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+	ZeroThreshold  *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
+	ZeroCount      *uint64  `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
+	ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+	// Negative buckets for the native histogram.
+	NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+	// Use either "negative_delta" or "negative_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
+	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+	// Positive buckets for the native histogram.
+	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+	// Use either "positive_delta" or "positive_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDelta        []int64   `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
+	PositiveCount        []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
 	XXX_unrecognized     []byte    `json:"-"`
 	XXX_sizecache        int32     `json:"-"`
@@ -357,7 +391,7 @@ func (m *Histogram) Reset()         { *m = Histogram{} }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (*Histogram) ProtoMessage()    {}
 func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{6}
+	return fileDescriptor_d1e5ddb18987a258, []int{6}
 }
 
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
@@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 {
 	return 0
 }
 
+func (m *Histogram) GetSampleCountFloat() float64 {
+	if m != nil && m.SampleCountFloat != nil {
+		return *m.SampleCountFloat
+	}
+	return 0
+}
+
 func (m *Histogram) GetSampleSum() float64 {
 	if m != nil && m.SampleSum != nil {
 		return *m.SampleSum
@@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket {
 	return nil
 }
 
+func (m *Histogram) GetSchema() int32 {
+	if m != nil && m.Schema != nil {
+		return *m.Schema
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroThreshold() float64 {
+	if m != nil && m.ZeroThreshold != nil {
+		return *m.ZeroThreshold
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCount() uint64 {
+	if m != nil && m.ZeroCount != nil {
+		return *m.ZeroCount
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCountFloat() float64 {
+	if m != nil && m.ZeroCountFloat != nil {
+		return *m.ZeroCountFloat
+	}
+	return 0
+}
+
+func (m *Histogram) GetNegativeSpan() []*BucketSpan {
+	if m != nil {
+		return m.NegativeSpan
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeDelta() []int64 {
+	if m != nil {
+		return m.NegativeDelta
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeCount() []float64 {
+	if m != nil {
+		return m.NegativeCount
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveSpan() []*BucketSpan {
+	if m != nil {
+		return m.PositiveSpan
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveDelta() []int64 {
+	if m != nil {
+		return m.PositiveDelta
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveCount() []float64 {
+	if m != nil {
+		return m.PositiveCount
+	}
+	return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
 type Bucket struct {
 	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+	CumulativeCountFloat *float64  `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
 	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
 	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
@@ -412,7 +526,7 @@ func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (*Bucket) ProtoMessage()    {}
 func (*Bucket) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{7}
+	return fileDescriptor_d1e5ddb18987a258, []int{7}
 }
 
 func (m *Bucket) XXX_Unmarshal(b []byte) error {
@@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 {
 	return 0
 }
 
+func (m *Bucket) GetCumulativeCountFloat() float64 {
+	if m != nil && m.CumulativeCountFloat != nil {
+		return *m.CumulativeCountFloat
+	}
+	return 0
+}
+
 func (m *Bucket) GetUpperBound() float64 {
 	if m != nil && m.UpperBound != nil {
 		return *m.UpperBound
@@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar {
 	return nil
 }
 
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+	Offset               *int32   `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
+	Length               *uint32  `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BucketSpan) Reset()         { *m = BucketSpan{} }
+func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
+func (*BucketSpan) ProtoMessage()    {}
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1e5ddb18987a258, []int{8}
+}
+
+func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
+}
+func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+}
+func (m *BucketSpan) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BucketSpan.Merge(m, src)
+}
+func (m *BucketSpan) XXX_Size() int {
+	return xxx_messageInfo_BucketSpan.Size(m)
+}
+func (m *BucketSpan) XXX_DiscardUnknown() {
+	xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+
+func (m *BucketSpan) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return 0
+}
+
+func (m *BucketSpan) GetLength() uint32 {
+	if m != nil && m.Length != nil {
+		return *m.Length
+	}
+	return 0
+}
+
 type Exemplar struct {
 	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
 	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@@ -467,7 +641,7 @@ func (m *Exemplar) Reset()         { *m = Exemplar{} }
 func (m *Exemplar) String() string { return proto.CompactTextString(m) }
 func (*Exemplar) ProtoMessage()    {}
 func (*Exemplar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{8}
+	return fileDescriptor_d1e5ddb18987a258, []int{9}
 }
 
 func (m *Exemplar) XXX_Unmarshal(b []byte) error {
@@ -526,7 +700,7 @@ func (m *Metric) Reset()         { *m = Metric{} }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (*Metric) ProtoMessage()    {}
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{9}
+	return fileDescriptor_d1e5ddb18987a258, []int{10}
 }
 
 func (m *Metric) XXX_Unmarshal(b []byte) error {
@@ -610,7 +784,7 @@ func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (*MetricFamily) ProtoMessage()    {}
 func (*MetricFamily) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6039342a2ba47b72, []int{10}
+	return fileDescriptor_d1e5ddb18987a258, []int{11}
 }
 
 func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
@@ -669,55 +843,72 @@ func init() {
 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+	proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
 	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
 }
 
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
-
-var fileDescriptor_6039342a2ba47b72 = []byte{
-	// 665 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
-	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
-	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
-	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
-	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
-	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
-	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
-	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
-	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
-	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
-	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
-	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
-	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
-	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
-	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
-	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
-	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
-	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
-	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
-	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
-	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
-	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
-	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
-	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
-	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
-	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
-	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
-	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
-	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
-	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
-	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
-	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
-	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
-	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
-	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
-	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
-	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
-	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
-	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
-	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
-	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
-	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
+func init() {
+	proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
+}
+
+var fileDescriptor_d1e5ddb18987a258 = []byte{
+	// 896 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
+	0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
+	0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
+	0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
+	0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
+	0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
+	0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
+	0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
+	0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
+	0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
+	0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
+	0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
+	0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
+	0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
+	0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
+	0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
+	0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
+	0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
+	0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
+	0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
+	0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
+	0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
+	0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
+	0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
+	0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
+	0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
+	0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
+	0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
+	0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
+	0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
+	0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
+	0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
+	0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
+	0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
+	0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
+	0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
+	0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
+	0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
+	0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
+	0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
+	0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
+	0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
+	0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
+	0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
+	0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
+	0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
+	0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
+	0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
+	0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
+	0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
+	0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
+	0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
+	0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
+	0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
+	0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
+	0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
 }

+ 2 - 2
vendor/modules.txt

@@ -740,12 +740,12 @@ github.com/philhofer/fwd
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
-# github.com/prometheus/client_golang v1.13.0
+# github.com/prometheus/client_golang v1.14.0
 ## explicit; go 1.17
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/internal
 github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.2.0
+# github.com/prometheus/client_model v0.3.0
 ## explicit; go 1.9
 github.com/prometheus/client_model/go
 # github.com/prometheus/common v0.37.0