Kaynağa Gözat

Merge pull request #41178 from tao12345666333/update-runc-to-v1.0.0-rc91

vendor runc libraryv1.0.0-rc91-48-g67169a9d
Sebastiaan van Stijn 5 yıl önce
ebeveyn
işleme
79eef6e78c
100 değiştirilmiş dosya ile 3423 ekleme ve 1391 silme
  1. 1 1
      oci/devices_linux.go
  2. 11 11
      vendor.conf
  3. 2 2
      vendor/github.com/containerd/containerd/README.md
  4. 32 19
      vendor/github.com/containerd/containerd/cio/io.go
  5. 1 1
      vendor/github.com/containerd/containerd/cio/io_unix.go
  6. 4 0
      vendor/github.com/containerd/containerd/client.go
  7. 13 0
      vendor/github.com/containerd/containerd/client_opts.go
  8. 2 0
      vendor/github.com/containerd/containerd/container.go
  9. 4 2
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
  10. 1 1
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go
  11. 17 15
      vendor/github.com/containerd/containerd/image.go
  12. 33 8
      vendor/github.com/containerd/containerd/images/handlers.go
  13. 1 1
      vendor/github.com/containerd/containerd/images/image.go
  14. 30 1
      vendor/github.com/containerd/containerd/images/mediatypes.go
  15. 28 4
      vendor/github.com/containerd/containerd/mount/mount_linux.go
  16. 2 2
      vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
  17. 7 0
      vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
  18. 1 1
      vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
  19. 4 4
      vendor/github.com/containerd/containerd/pkg/process/init.go
  20. 1 1
      vendor/github.com/containerd/containerd/pkg/process/io.go
  21. 2 0
      vendor/github.com/containerd/containerd/pkg/process/utils.go
  22. 2 2
      vendor/github.com/containerd/containerd/platforms/cpuinfo.go
  23. 1 1
      vendor/github.com/containerd/containerd/pull.go
  24. 1 1
      vendor/github.com/containerd/containerd/remotes/docker/pusher.go
  25. 1 0
      vendor/github.com/containerd/containerd/remotes/docker/registry.go
  26. 3 0
      vendor/github.com/containerd/containerd/remotes/docker/resolver.go
  27. 15 5
      vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
  28. 35 0
      vendor/github.com/containerd/containerd/snapshotter_opts_unix.go
  29. 9 0
      vendor/github.com/containerd/containerd/task.go
  30. 2 2
      vendor/github.com/containerd/containerd/unpacker.go
  31. 82 82
      vendor/github.com/containerd/containerd/vendor.conf
  32. 1 1
      vendor/github.com/containerd/containerd/version/version.go
  33. 1 1
      vendor/github.com/containerd/fifo/fifo.go
  34. 6 3
      vendor/github.com/containerd/fifo/handle_linux.go
  35. 1 1
      vendor/github.com/containerd/fifo/raw.go
  36. 1 1
      vendor/github.com/golang/protobuf/go.mod
  37. 7 5
      vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
  38. 71 69
      vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
  39. 5 2
      vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
  40. 2 1
      vendor/github.com/golang/protobuf/ptypes/any/any.proto
  41. 4 2
      vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
  42. 1 2
      vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
  43. 3 1
      vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
  44. 3 1
      vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
  45. 0 1
      vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
  46. 23 17
      vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
  47. 20 17
      vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
  48. 39 19
      vendor/github.com/opencontainers/runc/README.md
  49. 26 0
      vendor/github.com/opencontainers/runc/go.mod
  50. 2 3
      vendor/github.com/opencontainers/runc/libcontainer/README.md
  51. 16 39
      vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
  52. 27 0
      vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
  53. 74 283
      vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
  54. 250 0
      vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go
  55. 17 11
      vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
  56. 70 26
      vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
  57. 137 24
      vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
  58. 0 111
      vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
  59. 16 0
      vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go
  60. 5 0
      vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go
  61. 23 20
      vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go
  62. 27 1
      vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c
  63. 6 6
      vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
  64. 13 17
      vendor/github.com/opencontainers/runc/libcontainer/user/user.go
  65. 0 31
      vendor/github.com/opencontainers/runc/vendor.conf
  66. 4 3
      vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
  67. 1 1
      vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
  68. 10 6
      vendor/github.com/prometheus/client_golang/go.mod
  69. 47 3
      vendor/github.com/prometheus/client_golang/prometheus/counter.go
  70. 18 19
      vendor/github.com/prometheus/client_golang/prometheus/doc.go
  71. 1 1
      vendor/github.com/prometheus/client_golang/prometheus/gauge.go
  72. 1 1
      vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
  73. 80 30
      vendor/github.com/prometheus/client_golang/prometheus/histogram.go
  74. 12 0
      vendor/github.com/prometheus/client_golang/prometheus/observer.go
  75. 14 10
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
  76. 7 3
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
  77. 56 26
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
  78. 46 4
      vendor/github.com/prometheus/client_golang/prometheus/value.go
  79. 12 0
      vendor/github.com/prometheus/client_golang/prometheus/vec.go
  80. 181 87
      vendor/github.com/prometheus/client_model/go/metrics.pb.go
  81. 99 25
      vendor/github.com/prometheus/common/expfmt/encode.go
  82. 7 4
      vendor/github.com/prometheus/common/expfmt/expfmt.go
  83. 527 0
      vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
  84. 1 2
      vendor/github.com/prometheus/common/expfmt/text_create.go
  85. 2 2
      vendor/github.com/prometheus/common/go.mod
  86. 91 69
      vendor/github.com/prometheus/procfs/crypto.go
  87. 1 0
      vendor/github.com/prometheus/procfs/go.mod
  88. 62 0
      vendor/github.com/prometheus/procfs/loadavg.go
  89. 6 6
      vendor/github.com/prometheus/procfs/mountinfo.go
  90. 153 0
      vendor/github.com/prometheus/procfs/net_conntrackstat.go
  91. 59 48
      vendor/github.com/prometheus/procfs/net_softnet.go
  92. 229 0
      vendor/github.com/prometheus/procfs/net_udp.go
  93. 104 118
      vendor/github.com/prometheus/procfs/net_unix.go
  94. 20 12
      vendor/github.com/prometheus/procfs/proc_fdinfo.go
  95. 208 0
      vendor/github.com/prometheus/procfs/proc_maps.go
  96. 21 16
      vendor/github.com/prometheus/procfs/proc_status.go
  97. 89 0
      vendor/github.com/prometheus/procfs/swaps.go
  98. 9 6
      vendor/go.etcd.io/bbolt/README.md
  99. 0 3
      vendor/go.etcd.io/bbolt/bolt_386.go
  100. 0 3
      vendor/go.etcd.io/bbolt/bolt_amd64.go

+ 1 - 1
oci/devices_linux.go

@@ -30,7 +30,7 @@ func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup {
 		Type:   string(d.Type),
 		Type:   string(d.Type),
 		Major:  &d.Major,
 		Major:  &d.Major,
 		Minor:  &d.Minor,
 		Minor:  &d.Minor,
-		Access: d.Permissions,
+		Access: string(d.Permissions),
 	}
 	}
 }
 }
 
 

+ 11 - 11
vendor.conf

@@ -13,7 +13,7 @@ github.com/konsorten/go-windows-terminal-sequences  edb144dfd453055e1e49a3d8b410
 github.com/sirupsen/logrus                          60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
 github.com/sirupsen/logrus                          60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
 github.com/tchap/go-patricia                        a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
 github.com/tchap/go-patricia                        a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
 golang.org/x/net                                    0de0cce0169b09b364e001f108dc0399ea8630b3
 golang.org/x/net                                    0de0cce0169b09b364e001f108dc0399ea8630b3
-golang.org/x/sys                                    85ca7c5b95cdf1e557abb38a283d1e61a5959c31
+golang.org/x/sys                                    9dae0f8f577553e0f21298e18926efc9644c281d
 github.com/docker/go-units                          519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
 github.com/docker/go-units                          519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
 github.com/docker/go-connections                    7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
 github.com/docker/go-connections                    7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
 github.com/moby/sys                                 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3
 github.com/moby/sys                                 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3
@@ -66,7 +66,7 @@ github.com/ugorji/go                                b4c50a2b199d93b13dc15e78929c
 github.com/hashicorp/consul                         9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2
 github.com/hashicorp/consul                         9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2
 github.com/miekg/dns                                6c0c4e6581f8e173cc562c8b3363ab984e4ae071 # v1.1.27
 github.com/miekg/dns                                6c0c4e6581f8e173cc562c8b3363ab984e4ae071 # v1.1.27
 github.com/ishidawataru/sctp                        6e2cb1366111dcf547c13531e3a263a067715847
 github.com/ishidawataru/sctp                        6e2cb1366111dcf547c13531e3a263a067715847
-go.etcd.io/bbolt                                    a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3
+go.etcd.io/bbolt                                    232d8fc87f50244f9c808f4745759e08a304c029 # v1.3.5
 
 
 # get graph and distribution packages
 # get graph and distribution packages
 github.com/docker/distribution                      0d3efadf0154c2b8a4e7b6621fff9809655cc580
 github.com/docker/distribution                      0d3efadf0154c2b8a4e7b6621fff9809655cc580
@@ -83,8 +83,8 @@ google.golang.org/grpc                              f495f5b15ae7ccda3b38c53a1bfc
 # the containerd project first, and update both after that is merged.
 # the containerd project first, and update both after that is merged.
 # This commit does not need to match RUNC_COMMIT as it is used for helper
 # This commit does not need to match RUNC_COMMIT as it is used for helper
 # packages but should be newer or equal.
 # packages but should be newer or equal.
-github.com/opencontainers/runc                      dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
-github.com/opencontainers/runtime-spec              c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2
+github.com/opencontainers/runc                      67169a9d43456ff0d5ae12b967acb8e366e2f181 # v1.0.0-rc91-48-g67169a9d
+github.com/opencontainers/runtime-spec              237cc4f519e2e8f9b235bacccfa8ef5a84df2875 # v1.0.3-0.20200520003142-237cc4f519e2
 github.com/opencontainers/image-spec                d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
 github.com/opencontainers/image-spec                d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
 github.com/seccomp/libseccomp-golang                689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
 github.com/seccomp/libseccomp-golang                689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
 
 
@@ -122,8 +122,8 @@ github.com/googleapis/gax-go                        317e0006254c44a0ac427cc52a0e
 google.golang.org/genproto                          3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
 google.golang.org/genproto                          3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
 
 
 # containerd
 # containerd
-github.com/containerd/containerd                    c80284d4b5291a351bb471bcdabb5c1d95e7a583 # master / v1.4.0-dev
-github.com/containerd/fifo                          ff969a566b00877c63489baf6e8c35d60af6142c
+github.com/containerd/containerd                    779ef60231a555f7eb9ba82b052d59b69ca2ef10 # master / v1.4.0-beta.1-150-g779ef602
+github.com/containerd/fifo                          f15a3290365b9d2627d189e619ab4008e0069caf
 github.com/containerd/continuity                    efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
 github.com/containerd/continuity                    efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
 github.com/containerd/cgroups                       318312a373405e5e91134d8063d04d59768a1bff
 github.com/containerd/cgroups                       318312a373405e5e91134d8063d04d59768a1bff
 github.com/containerd/console                       8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
 github.com/containerd/console                       8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
@@ -136,7 +136,7 @@ github.com/cilium/ebpf                              1c8d4c9ef7759622653a1d319284
 # cluster
 # cluster
 github.com/docker/swarmkit                          d6592ddefd8a5319aadff74c558b816b1a0b2590
 github.com/docker/swarmkit                          d6592ddefd8a5319aadff74c558b816b1a0b2590
 github.com/gogo/protobuf                            5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
 github.com/gogo/protobuf                            5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
-github.com/golang/protobuf                          d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
+github.com/golang/protobuf                          84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
 github.com/cloudflare/cfssl                         5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
 github.com/cloudflare/cfssl                         5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
 github.com/fernet/fernet-go                         9eac43b88a5efb8651d24de9b68e87567e029736
 github.com/fernet/fernet-go                         9eac43b88a5efb8651d24de9b68e87567e029736
 github.com/google/certificate-transparency-go       37a384cd035e722ea46e55029093e26687138edf # v1.0.20
 github.com/google/certificate-transparency-go       37a384cd035e722ea46e55029093e26687138edf # v1.0.20
@@ -149,11 +149,11 @@ github.com/coreos/pkg                               3ac0863d7acf3bc44daf49afef89
 code.cloudfoundry.org/clock                         02e53af36e6c978af692887ed449b74026d76fec # v1.0.0
 code.cloudfoundry.org/clock                         02e53af36e6c978af692887ed449b74026d76fec # v1.0.0
 
 
 # prometheus
 # prometheus
-github.com/prometheus/client_golang                 c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
+github.com/prometheus/client_golang                 6edbbd9e560190e318cdc5b4d3e630b442858380 # v1.6.0
 github.com/beorn7/perks                             37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
 github.com/beorn7/perks                             37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
-github.com/prometheus/client_model                  d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
-github.com/prometheus/common                        287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
-github.com/prometheus/procfs                        6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
+github.com/prometheus/client_model                  7bc5445566f0fe75b15de23e6b93886e982d7bf9 # v0.2.0
+github.com/prometheus/common                        d978bcb1309602d68bb4ba69cf3f8ed900e07308 # v0.9.1
+github.com/prometheus/procfs                        46159f73e74d1cb8dc223deef9b2d049286f46b1 # v0.0.11
 github.com/matttproud/golang_protobuf_extensions    c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
 github.com/matttproud/golang_protobuf_extensions    c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
 github.com/pkg/errors                               614d223910a179a466c1767a985424175c39b465 # v0.9.1
 github.com/pkg/errors                               614d223910a179a466c1767a985424175c39b465 # v0.9.1
 github.com/grpc-ecosystem/go-grpc-prometheus        c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
 github.com/grpc-ecosystem/go-grpc-prometheus        c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0

+ 2 - 2
vendor/github.com/containerd/containerd/README.md

@@ -154,7 +154,7 @@ Taking a container object and turning it into a runnable process on a system is
 
 
 ```go
 ```go
 // create a new task
 // create a new task
-task, err := redis.NewTask(context, cio.Stdio)
+task, err := redis.NewTask(context, cio.NewCreator(cio.WithStdio))
 defer task.Delete(context)
 defer task.Delete(context)
 
 
 // the task is now running and has a pid that can be use to setup networking
 // the task is now running and has a pid that can be use to setup networking
@@ -184,7 +184,7 @@ checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master")
 redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
 redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
 defer container.Delete(context)
 defer container.Delete(context)
 
 
-task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
+task, err = redis.NewTask(context, cio.NewCreator(cio.WithStdio), containerd.WithTaskCheckpoint(checkpoint))
 defer task.Delete(context)
 defer task.Delete(context)
 
 
 err := task.Start(context)
 err := task.Start(context)

+ 32 - 19
vendor/github.com/containerd/containerd/cio/io.go

@@ -245,19 +245,11 @@ func LogURI(uri *url.URL) Creator {
 // BinaryIO forwards container STDOUT|STDERR directly to a logging binary
 // BinaryIO forwards container STDOUT|STDERR directly to a logging binary
 func BinaryIO(binary string, args map[string]string) Creator {
 func BinaryIO(binary string, args map[string]string) Creator {
 	return func(_ string) (IO, error) {
 	return func(_ string) (IO, error) {
-		binary = filepath.Clean(binary)
-		if !strings.HasPrefix(binary, "/") {
-			return nil, errors.New("absolute path needed")
-		}
-		uri := &url.URL{
-			Scheme: "binary",
-			Path:   binary,
-		}
-		q := uri.Query()
-		for k, v := range args {
-			q.Set(k, v)
+		uri, err := LogURIGenerator("binary", binary, args)
+		if err != nil {
+			return nil, err
 		}
 		}
-		uri.RawQuery = q.Encode()
+
 		res := uri.String()
 		res := uri.String()
 		return &logURI{
 		return &logURI{
 			config: Config{
 			config: Config{
@@ -272,14 +264,11 @@ func BinaryIO(binary string, args map[string]string) Creator {
 // If the log file already exists, the logs will be appended to the file.
 // If the log file already exists, the logs will be appended to the file.
 func LogFile(path string) Creator {
 func LogFile(path string) Creator {
 	return func(_ string) (IO, error) {
 	return func(_ string) (IO, error) {
-		path = filepath.Clean(path)
-		if !strings.HasPrefix(path, "/") {
-			return nil, errors.New("absolute path needed")
-		}
-		uri := &url.URL{
-			Scheme: "file",
-			Path:   path,
+		uri, err := LogURIGenerator("file", path, nil)
+		if err != nil {
+			return nil, err
 		}
 		}
+
 		res := uri.String()
 		res := uri.String()
 		return &logURI{
 		return &logURI{
 			config: Config{
 			config: Config{
@@ -290,6 +279,30 @@ func LogFile(path string) Creator {
 	}
 	}
 }
 }
 
 
+// LogURIGenerator is the helper to generate log uri with specific scheme.
+func LogURIGenerator(scheme string, path string, args map[string]string) (*url.URL, error) {
+	path = filepath.Clean(path)
+	if !strings.HasPrefix(path, "/") {
+		return nil, errors.New("absolute path needed")
+	}
+
+	uri := &url.URL{
+		Scheme: scheme,
+		Path:   path,
+	}
+
+	if len(args) == 0 {
+		return uri, nil
+	}
+
+	q := uri.Query()
+	for k, v := range args {
+		q.Set(k, v)
+	}
+	uri.RawQuery = q.Encode()
+	return uri, nil
+}
+
 type logURI struct {
 type logURI struct {
 	config Config
 	config Config
 }
 }

+ 1 - 1
vendor/github.com/containerd/containerd/cio/io_unix.go

@@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (pipes, error) {
 			}
 			}
 		}()
 		}()
 	}
 	}
-	if fifos.Stderr != "" {
+	if !fifos.Terminal && fifos.Stderr != "" {
 		if f.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
 		if f.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
 			return f, errors.Wrapf(err, "failed to open stderr fifo")
 			return f, errors.Wrapf(err, "failed to open stderr fifo")
 		}
 		}

+ 4 - 0
vendor/github.com/containerd/containerd/client.go

@@ -351,6 +351,10 @@ type RemoteContext struct {
 
 
 	// AllMetadata downloads all manifests and known-configuration files
 	// AllMetadata downloads all manifests and known-configuration files
 	AllMetadata bool
 	AllMetadata bool
+
+	// ChildLabelMap sets the labels used to reference child objects in the content
+	// store. By default, all GC reference labels will be set for all fetched content.
+	ChildLabelMap func(ocispec.Descriptor) []string
 }
 }
 
 
 func defaultRemoteContext() *RemoteContext {
 func defaultRemoteContext() *RemoteContext {

+ 13 - 0
vendor/github.com/containerd/containerd/client_opts.go

@@ -23,6 +23,7 @@ import (
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/containerd/snapshots"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 
 
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
@@ -175,6 +176,18 @@ func WithPullLabels(labels map[string]string) RemoteOpt {
 	}
 	}
 }
 }
 
 
+// WithChildLabelMap sets the map function used to define the labels set
+// on referenced child content in the content store. This can be used
+// to overwrite the default GC labels or filter which labels get set
+// for content.
+// The default is `images.ChildGCLabels`.
+func WithChildLabelMap(fn func(ocispec.Descriptor) []string) RemoteOpt {
+	return func(_ *Client, c *RemoteContext) error {
+		c.ChildLabelMap = fn
+		return nil
+	}
+}
+
 // WithSchema1Conversion is used to convert Docker registry schema 1
 // WithSchema1Conversion is used to convert Docker registry schema 1
 // manifests to oci manifests on pull. Without this option schema 1
 // manifests to oci manifests on pull. Without this option schema 1
 // manifests will return a not supported error.
 // manifests will return a not supported error.

+ 2 - 0
vendor/github.com/containerd/containerd/container.go

@@ -290,6 +290,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
 		client: c.client,
 		client: c.client,
 		io:     i,
 		io:     i,
 		id:     c.id,
 		id:     c.id,
+		c:      c,
 	}
 	}
 	if info.Checkpoint != nil {
 	if info.Checkpoint != nil {
 		request.Checkpoint = info.Checkpoint
 		request.Checkpoint = info.Checkpoint
@@ -407,6 +408,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er
 		io:     i,
 		io:     i,
 		id:     response.Process.ID,
 		id:     response.Process.ID,
 		pid:    response.Process.Pid,
 		pid:    response.Process.Pid,
+		c:      c,
 	}
 	}
 	return t, nil
 	return t, nil
 }
 }

+ 4 - 2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go

@@ -47,7 +47,7 @@ func arches() []specs.Arch {
 	}
 	}
 }
 }
 
 
-// DefaultProfile defines the whitelist for the default seccomp profile.
+// DefaultProfile defines the allowed syscalls for the default seccomp profile.
 func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 	syscalls := []specs.LinuxSyscall{
 	syscalls := []specs.LinuxSyscall{
 		{
 		{
@@ -64,6 +64,8 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 				"chmod",
 				"chmod",
 				"chown",
 				"chown",
 				"chown32",
 				"chown32",
+				"clock_adjtime",
+				"clock_adjtime64",
 				"clock_getres",
 				"clock_getres",
 				"clock_getres_time64",
 				"clock_getres_time64",
 				"clock_gettime",
 				"clock_gettime",
@@ -253,6 +255,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 				"renameat2",
 				"renameat2",
 				"restart_syscall",
 				"restart_syscall",
 				"rmdir",
 				"rmdir",
+				"rseq",
 				"rt_sigaction",
 				"rt_sigaction",
 				"rt_sigpending",
 				"rt_sigpending",
 				"rt_sigprocmask",
 				"rt_sigprocmask",
@@ -513,7 +516,6 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 					"delete_module",
 					"delete_module",
 					"init_module",
 					"init_module",
 					"finit_module",
 					"finit_module",
-					"query_module",
 				},
 				},
 				Action: specs.ActAllow,
 				Action: specs.ActAllow,
 				Args:   []specs.LinuxSeccompArg{},
 				Args:   []specs.LinuxSeccompArg{},

+ 1 - 1
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go

@@ -20,7 +20,7 @@ package seccomp
 
 
 import specs "github.com/opencontainers/runtime-spec/specs-go"
 import specs "github.com/opencontainers/runtime-spec/specs-go"
 
 
-// DefaultProfile defines the whitelist for the default seccomp profile.
+// DefaultProfile defines the allowed syscalls for the default seccomp profile.
 func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
 	return &specs.LinuxSeccomp{}
 	return &specs.LinuxSeccomp{}
 }
 }

+ 17 - 15
vendor/github.com/containerd/containerd/image.go

@@ -203,24 +203,26 @@ func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) {
 				desc.Size = info.Size
 				desc.Size = info.Size
 			}
 			}
 
 
-			for k, v := range info.Labels {
-				const prefix = "containerd.io/gc.ref.snapshot."
-				if !strings.HasPrefix(k, prefix) {
-					continue
-				}
+			if config.snapshots {
+				for k, v := range info.Labels {
+					const prefix = "containerd.io/gc.ref.snapshot."
+					if !strings.HasPrefix(k, prefix) {
+						continue
+					}
 
 
-				sn := i.client.SnapshotService(k[len(prefix):])
-				if sn == nil {
-					continue
-				}
+					sn := i.client.SnapshotService(k[len(prefix):])
+					if sn == nil {
+						continue
+					}
 
 
-				u, err := sn.Usage(ctx, v)
-				if err != nil {
-					if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
-						return nil, err
+					u, err := sn.Usage(ctx, v)
+					if err != nil {
+						if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
+							return nil, err
+						}
+					} else {
+						usage += u.Size
 					}
 					}
-				} else {
-					usage += u.Size
 				}
 				}
 			}
 			}
 		}
 		}

+ 33 - 8
vendor/github.com/containerd/containerd/images/handlers.go

@@ -170,6 +170,19 @@ func ChildrenHandler(provider content.Provider) HandlerFunc {
 // the children returned by the handler and passes through the children.
 // the children returned by the handler and passes through the children.
 // Must follow a handler that returns the children to be labeled.
 // Must follow a handler that returns the children to be labeled.
 func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
 func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
+	return SetChildrenMappedLabels(manager, f, nil)
+}
+
+// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on
+// the children returned by the handler and passes through the children.
+// Must follow a handler that returns the children to be labeled.
+// The label map allows the caller to control the labels per child descriptor.
+// For returned labels, the index of the child will be appended to the end
+// except for the first index when the returned label does not end with '.'.
+func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc {
+	if labelMap == nil {
+		labelMap = ChildGCLabels
+	}
 	return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 	return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 		children, err := f(ctx, desc)
 		children, err := f(ctx, desc)
 		if err != nil {
 		if err != nil {
@@ -177,14 +190,26 @@ func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
 		}
 		}
 
 
 		if len(children) > 0 {
 		if len(children) > 0 {
-			info := content.Info{
-				Digest: desc.Digest,
-				Labels: map[string]string{},
-			}
-			fields := []string{}
-			for i, ch := range children {
-				info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String()
-				fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i))
+			var (
+				info = content.Info{
+					Digest: desc.Digest,
+					Labels: map[string]string{},
+				}
+				fields = []string{}
+				keys   = map[string]uint{}
+			)
+			for _, ch := range children {
+				labelKeys := labelMap(ch)
+				for _, key := range labelKeys {
+					idx := keys[key]
+					keys[key] = idx + 1
+					if idx > 0 || key[len(key)-1] == '.' {
+						key = fmt.Sprintf("%s%d", key, idx)
+					}
+
+					info.Labels[key] = ch.Digest.String()
+					fields = append(fields, "labels."+key)
+				}
 			}
 			}
 
 
 			_, err := manager.Update(ctx, info, fields...)
 			_, err := manager.Update(ctx, info, fields...)

+ 1 - 1
vendor/github.com/containerd/containerd/images/image.go

@@ -362,7 +362,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
 			// childless data types.
 			// childless data types.
 			return nil, nil
 			return nil, nil
 		}
 		}
-		log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
+		log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType)
 	}
 	}
 
 
 	return descs, nil
 	return descs, nil

+ 30 - 1
vendor/github.com/containerd/containerd/images/mediatypes.go

@@ -23,6 +23,7 @@ import (
 
 
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
 )
 )
 
 
 // mediatype definitions for image components handled in containerd.
 // mediatype definitions for image components handled in containerd.
@@ -81,7 +82,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
 		}
 		}
 		return "", nil
 		return "", nil
 	default:
 	default:
-		return "", errdefs.ErrNotImplemented
+		return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType)
 	}
 	}
 }
 }
 
 
@@ -124,3 +125,31 @@ func IsKnownConfig(mt string) bool {
 	}
 	}
 	return false
 	return false
 }
 }
+
+// ChildGCLabels returns the label for a given descriptor to reference it
+func ChildGCLabels(desc ocispec.Descriptor) []string {
+	mt := desc.MediaType
+	if IsKnownConfig(mt) {
+		return []string{"containerd.io/gc.ref.content.config"}
+	}
+
+	switch mt {
+	case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+		return []string{"containerd.io/gc.ref.content.m."}
+	}
+
+	if IsLayerType(mt) {
+		return []string{"containerd.io/gc.ref.content.l."}
+	}
+
+	return []string{"containerd.io/gc.ref.content."}
+}
+
+// ChildGCLabelsFilterLayers returns the labels for a given descriptor to
+// reference it, skipping layer media types
+func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string {
+	if IsLayerType(desc.MediaType) {
+		return nil
+	}
+	return ChildGCLabels(desc)
+}

+ 28 - 4
vendor/github.com/containerd/containerd/mount/mount_linux.go

@@ -363,10 +363,34 @@ func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error {
 		args = append(args, "-o", o)
 		args = append(args, "-o", o)
 	}
 	}
 	args = append(args, "-t", strings.TrimPrefix(m.Type, typePrefix))
 	args = append(args, "-t", strings.TrimPrefix(m.Type, typePrefix))
-	cmd := exec.Command(helperBinary, args...)
-	out, err := cmd.CombinedOutput()
+
+	infoBeforeMount, err := Lookup(target)
 	if err != nil {
 	if err != nil {
-		return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out))
+		return err
 	}
 	}
-	return nil
+
+	// cmd.CombinedOutput() may intermittently return ECHILD because of our signal handling in shim.
+	// See #4387 and wait(2).
+	const retriesOnECHILD = 10
+	for i := 0; i < retriesOnECHILD; i++ {
+		cmd := exec.Command(helperBinary, args...)
+		out, err := cmd.CombinedOutput()
+		if err == nil {
+			return nil
+		}
+		if !errors.Is(err, unix.ECHILD) {
+			return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out))
+		}
+		// We got ECHILD, we are not sure whether the mount was successful.
+		// If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed.
+		// So we attempt to unmount the new mount before retrying.
+		infoAfterMount, err := Lookup(target)
+		if err != nil {
+			return err
+		}
+		if infoAfterMount.ID != infoBeforeMount.ID {
+			_ = unmount(target, 0)
+		}
+	}
+	return errors.Errorf("mount helper [%s %v] failed with ECHILD (retired %d times)", helperBinary, args, retriesOnECHILD)
 }
 }

+ 2 - 2
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go

@@ -81,11 +81,11 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
 		p.Major, _ = strconv.Atoi(mm[0])
 		p.Major, _ = strconv.Atoi(mm[0])
 		p.Minor, _ = strconv.Atoi(mm[1])
 		p.Minor, _ = strconv.Atoi(mm[1])
 
 
-		p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
+		p.Root, err = strconv.Unquote(`"` + strings.Replace(fields[3], `"`, `\"`, -1) + `"`)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
 			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
 		}
 		}
-		p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
+		p.Mountpoint, err = strconv.Unquote(`"` + strings.Replace(fields[4], `"`, `\"`, -1) + `"`)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
 			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
 		}
 		}

+ 7 - 0
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go

@@ -118,3 +118,10 @@ func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
 		GID:      &stat.Gid,
 		GID:      &stat.Gid,
 	}, nil
 	}, nil
 }
 }
+
+// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period
+func WithCPUCFS(quota int64, period uint64) SpecOpts {
+	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+		return nil
+	}
+}

+ 1 - 1
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go

@@ -52,7 +52,7 @@ func WithWindowsIgnoreFlushesDuringBoot() SpecOpts {
 	}
 	}
 }
 }
 
 
-// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.IgnoreFlushesDuringBoot`.
+// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`.
 func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
 func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
 		if s.Windows == nil {
 		if s.Windows == nil {

+ 4 - 4
vendor/github.com/containerd/containerd/pkg/process/init.go

@@ -27,7 +27,6 @@ import (
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
-	"syscall"
 	"time"
 	"time"
 
 
 	"github.com/containerd/console"
 	"github.com/containerd/console"
@@ -39,6 +38,7 @@ import (
 	google_protobuf "github.com/gogo/protobuf/types"
 	google_protobuf "github.com/gogo/protobuf/types"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
 )
 )
 
 
 // Init represents an initial process for a container
 // Init represents an initial process for a container
@@ -87,7 +87,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru
 		Command:       runtime,
 		Command:       runtime,
 		Log:           filepath.Join(path, "log.json"),
 		Log:           filepath.Join(path, "log.json"),
 		LogFormat:     runc.JSON,
 		LogFormat:     runc.JSON,
-		PdeathSignal:  syscall.SIGKILL,
+		PdeathSignal:  unix.SIGKILL,
 		Root:          filepath.Join(root, namespace),
 		Root:          filepath.Join(root, namespace),
 		Criu:          criu,
 		Criu:          criu,
 		SystemdCgroup: systemd,
 		SystemdCgroup: systemd,
@@ -176,7 +176,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
 }
 }
 
 
 func (p *Init) openStdin(path string) error {
 func (p *Init) openStdin(path string) error {
-	sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+	sc, err := fifo.OpenFifo(context.Background(), path, unix.O_WRONLY|unix.O_NONBLOCK, 0)
 	if err != nil {
 	if err != nil {
 		return errors.Wrapf(err, "failed to open stdin fifo %s", path)
 		return errors.Wrapf(err, "failed to open stdin fifo %s", path)
 	}
 	}
@@ -361,7 +361,7 @@ func (p *Init) KillAll(ctx context.Context) error {
 	p.mu.Lock()
 	p.mu.Lock()
 	defer p.mu.Unlock()
 	defer p.mu.Unlock()
 
 
-	err := p.runtime.Kill(ctx, p.id, int(syscall.SIGKILL), &runc.KillOpts{
+	err := p.runtime.Kill(ctx, p.id, int(unix.SIGKILL), &runc.KillOpts{
 		All: true,
 		All: true,
 	})
 	})
 	return p.runtimeError(err, "OCI runtime killall failed")
 	return p.runtimeError(err, "OCI runtime killall failed")

+ 1 - 1
vendor/github.com/containerd/containerd/pkg/process/io.go

@@ -381,7 +381,7 @@ func (b *binaryIO) cancel() error {
 		return result.ErrorOrNil()
 		return result.ErrorOrNil()
 	}
 	}
 
 
-	done := make(chan error)
+	done := make(chan error, 1)
 	go func() {
 	go func() {
 		done <- b.cmd.Wait()
 		done <- b.cmd.Wait()
 	}()
 	}()

+ 2 - 0
vendor/github.com/containerd/containerd/pkg/process/utils.go

@@ -137,6 +137,8 @@ func checkKillError(err error) error {
 		strings.Contains(strings.ToLower(err.Error()), "no such process") ||
 		strings.Contains(strings.ToLower(err.Error()), "no such process") ||
 		err == unix.ESRCH {
 		err == unix.ESRCH {
 		return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
 		return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
+	} else if strings.Contains(err.Error(), "does not exist") {
+		return errors.Wrapf(errdefs.ErrNotFound, "no such container")
 	}
 	}
 	return errors.Wrapf(err, "unknown error after kill")
 	return errors.Wrapf(err, "unknown error after kill")
 }
 }

+ 2 - 2
vendor/github.com/containerd/containerd/platforms/cpuinfo.go

@@ -74,8 +74,8 @@ func getCPUInfo(pattern string) (info string, err error) {
 }
 }
 
 
 func getCPUVariant() string {
 func getCPUVariant() string {
-	if runtime.GOOS == "windows" {
-		// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
+	if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+		// Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
 		// runtime.GOARCH to determine the variants
 		// runtime.GOARCH to determine the variants
 		var variant string
 		var variant string
 		switch runtime.GOARCH {
 		switch runtime.GOARCH {

+ 1 - 1
vendor/github.com/containerd/containerd/pull.go

@@ -159,7 +159,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
 		// Get all the children for a descriptor
 		// Get all the children for a descriptor
 		childrenHandler := images.ChildrenHandler(store)
 		childrenHandler := images.ChildrenHandler(store)
 		// Set any children labels for that content
 		// Set any children labels for that content
-		childrenHandler = images.SetChildrenLabels(store, childrenHandler)
+		childrenHandler = images.SetChildrenMappedLabels(store, childrenHandler, rCtx.ChildLabelMap)
 		if rCtx.AllMetadata {
 		if rCtx.AllMetadata {
 			// Filter manifests by platforms but allow to handle manifest
 			// Filter manifests by platforms but allow to handle manifest
 			// and configuration for not-target platforms
 			// and configuration for not-target platforms

+ 1 - 1
vendor/github.com/containerd/containerd/remotes/docker/pusher.go

@@ -235,7 +235,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
 
 
 	go func() {
 	go func() {
 		defer close(respC)
 		defer close(respC)
-		resp, err = req.do(ctx)
+		resp, err := req.do(ctx)
 		if err != nil {
 		if err != nil {
 			pr.CloseWithError(err)
 			pr.CloseWithError(err)
 			return
 			return

+ 1 - 0
vendor/github.com/containerd/containerd/remotes/docker/registry.go

@@ -70,6 +70,7 @@ type RegistryHost struct {
 	Scheme       string
 	Scheme       string
 	Path         string
 	Path         string
 	Capabilities HostCapabilities
 	Capabilities HostCapabilities
+	Header       http.Header
 }
 }
 
 
 // RegistryHosts fetches the registry hosts for a given namespace,
 // RegistryHosts fetches the registry hosts for a given namespace,

+ 3 - 0
vendor/github.com/containerd/containerd/remotes/docker/resolver.go

@@ -450,6 +450,9 @@ func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *re
 	for key, value := range r.header {
 	for key, value := range r.header {
 		header[key] = append(header[key], value...)
 		header[key] = append(header[key], value...)
 	}
 	}
+	for key, value := range host.Header {
+		header[key] = append(header[key], value...)
+	}
 	parts := append([]string{"/", host.Path, r.namespace}, ps...)
 	parts := append([]string{"/", host.Path, r.namespace}, ps...)
 	p := path.Join(parts...)
 	p := path.Join(parts...)
 	// Join strips trailing slash, re-add ending "/" if included
 	// Join strips trailing slash, re-add ending "/" if included

+ 15 - 5
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go

@@ -324,21 +324,31 @@ func (c *Client) signalShim(ctx context.Context, sig syscall.Signal) error {
 	select {
 	select {
 	case <-ctx.Done():
 	case <-ctx.Done():
 		return ctx.Err()
 		return ctx.Err()
-	case <-c.waitForExit(pid):
+	case <-c.waitForExit(ctx, pid):
 		return nil
 		return nil
 	}
 	}
 }
 }
 
 
-func (c *Client) waitForExit(pid int) <-chan struct{} {
-	c.exitOnce.Do(func() {
+func (c *Client) waitForExit(ctx context.Context, pid int) <-chan struct{} {
+	go c.exitOnce.Do(func() {
+		defer close(c.exitCh)
+
+		ticker := time.NewTicker(10 * time.Millisecond)
+		defer ticker.Stop()
+
 		for {
 		for {
 			// use kill(pid, 0) here because the shim could have been reparented
 			// use kill(pid, 0) here because the shim could have been reparented
 			// and we are no longer able to waitpid(pid, ...) on the shim
 			// and we are no longer able to waitpid(pid, ...) on the shim
 			if err := unix.Kill(pid, 0); err == unix.ESRCH {
 			if err := unix.Kill(pid, 0); err == unix.ESRCH {
-				close(c.exitCh)
 				return
 				return
 			}
 			}
-			time.Sleep(10 * time.Millisecond)
+
+			select {
+			case <-ticker.C:
+			case <-ctx.Done():
+				log.G(ctx).WithField("pid", pid).Warn("timed out while waiting for shim to exit")
+				return
+			}
 		}
 		}
 	})
 	})
 	return c.exitCh
 	return c.exitCh

+ 35 - 0
vendor/github.com/containerd/containerd/snapshotter_opts_unix.go

@@ -0,0 +1,35 @@
+// +build !windows
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"fmt"
+
+	"github.com/containerd/containerd/snapshots"
+)
+
+// WithRemapperLabels creates the labels used by any supporting snapshotter
+// to shift the filesystem ownership (user namespace mapping) automatically; currently
+// supported by the fuse-overlayfs snapshotter
+func WithRemapperLabels(ctrUID, hostUID, ctrGID, hostGID, length uint32) snapshots.Opt {
+	return snapshots.WithLabels(map[string]string{
+		"containerd.io/snapshot/uidmapping": fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length),
+		"containerd.io/snapshot/gidmapping": fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length),
+	})
+}

+ 9 - 0
vendor/github.com/containerd/containerd/task.go

@@ -35,6 +35,7 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/runtime/linux/runctypes"
 	"github.com/containerd/containerd/runtime/linux/runctypes"
@@ -175,18 +176,26 @@ type Task interface {
 	// For the built in Linux runtime, github.com/containerd/cgroups.Metrics
 	// For the built in Linux runtime, github.com/containerd/cgroups.Metrics
 	// are returned in protobuf format
 	// are returned in protobuf format
 	Metrics(context.Context) (*types.Metric, error)
 	Metrics(context.Context) (*types.Metric, error)
+	// Spec returns the current OCI specification for the task
+	Spec(context.Context) (*oci.Spec, error)
 }
 }
 
 
 var _ = (Task)(&task{})
 var _ = (Task)(&task{})
 
 
 type task struct {
 type task struct {
 	client *Client
 	client *Client
+	c      Container
 
 
 	io  cio.IO
 	io  cio.IO
 	id  string
 	id  string
 	pid uint32
 	pid uint32
 }
 }
 
 
+// Spec returns the current OCI specification for the task
+func (t *task) Spec(ctx context.Context) (*oci.Spec, error) {
+	return t.c.Spec(ctx)
+}
+
 // ID of the task
 // ID of the task
 func (t *task) ID() string {
 func (t *task) ID() string {
 	return t.id
 	return t.id

+ 2 - 2
vendor/github.com/containerd/containerd/unpacker.go

@@ -178,13 +178,13 @@ EachLayer:
 				fetchC[i] = make(chan struct{})
 				fetchC[i] = make(chan struct{})
 			}
 			}
 
 
-			go func() {
+			go func(i int) {
 				err := u.fetch(ctx, h, layers[i:], fetchC)
 				err := u.fetch(ctx, h, layers[i:], fetchC)
 				if err != nil {
 				if err != nil {
 					fetchErr <- err
 					fetchErr <- err
 				}
 				}
 				close(fetchErr)
 				close(fetchErr)
-			}()
+			}(i)
 		}
 		}
 
 
 		select {
 		select {

+ 82 - 82
vendor/github.com/containerd/containerd/vendor.conf

@@ -1,102 +1,102 @@
-github.com/beorn7/perks                             37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
-github.com/BurntSushi/toml                          3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1
-github.com/cespare/xxhash/v2                        d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
+github.com/beorn7/perks                             v1.0.1
+github.com/BurntSushi/toml                          v0.3.1
+github.com/cespare/xxhash/v2                        v2.1.1
 github.com/containerd/btrfs                         153935315f4ab9be5bf03650a1341454b05efa5d
 github.com/containerd/btrfs                         153935315f4ab9be5bf03650a1341454b05efa5d
-github.com/containerd/cgroups                       b4448137398923af7f4918b8b2ad8249172ca7a6
-github.com/containerd/console                       8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6 # v1.0.0
-github.com/containerd/continuity                    0ec596719c75bfd42908850990acea594b7593ac
-github.com/containerd/fifo                          bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13
-github.com/containerd/go-runc                       a5c2862aed5e6358b305b0e16bfce58e0549b1cd
-github.com/containerd/ttrpc                         72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
-github.com/containerd/typeurl                       cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
-github.com/coreos/go-systemd/v22                    2d78030078ef61b3cae27f42ad6d0e46db51b339 # v22.0.0
-github.com/cpuguy83/go-md2man                       7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19 # v1.0.10
+github.com/containerd/cgroups                       318312a373405e5e91134d8063d04d59768a1bff
+github.com/containerd/console                       v1.0.0
+github.com/containerd/continuity                    efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
+github.com/containerd/fifo                          f15a3290365b9d2627d189e619ab4008e0069caf
+github.com/containerd/go-runc                       7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
+github.com/containerd/ttrpc                         v1.0.1
+github.com/containerd/typeurl                       v1.0.1
+github.com/coreos/go-systemd/v22                    v22.1.0
+github.com/cpuguy83/go-md2man/v2                    v2.0.0
 github.com/docker/go-events                         e31b211e4f1cd09aa76fe4ac244571fab96ae47f
 github.com/docker/go-events                         e31b211e4f1cd09aa76fe4ac244571fab96ae47f
-github.com/docker/go-metrics                        b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
-github.com/docker/go-units                          519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
-github.com/godbus/dbus/v5                           37bf87eef99d69c4f1d3528bd66e3a87dc201472 # v5.0.3
-github.com/gogo/googleapis                          01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
-github.com/gogo/protobuf                            5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
-github.com/golang/protobuf                          d23c5127dc24889085f8ccea5c9d560a57a879d8 # v1.3.3
-github.com/google/go-cmp                            3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
-github.com/google/uuid                              0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
-github.com/grpc-ecosystem/go-grpc-prometheus        c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0
-github.com/hashicorp/errwrap                        8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0
-github.com/hashicorp/go-multierror                  886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0
-github.com/hashicorp/golang-lru                     7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
-github.com/imdario/mergo                            7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
-github.com/konsorten/go-windows-terminal-sequences  edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3
-github.com/matttproud/golang_protobuf_extensions    c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1
-github.com/Microsoft/go-winio                       6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
-github.com/Microsoft/hcsshim                        5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
-github.com/opencontainers/go-digest                 c9281466c8b2f606084ac71339773efd177436e7
-github.com/opencontainers/image-spec                d60099175f88c47cd379c4738d158884749ed235 # v1.0.1
-github.com/opencontainers/runc                      dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
-github.com/opencontainers/runtime-spec              c4ee7d12c742ffe806cd9350b6af3b4b19faed6f # v1.0.2
-github.com/pkg/errors                               614d223910a179a466c1767a985424175c39b465 # v0.9.1
-github.com/prometheus/client_golang                 c42bebe5a5cddfc6b28cd639103369d8a75dfa89 # v1.3.0
-github.com/prometheus/client_model                  d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
-github.com/prometheus/common                        287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
-github.com/prometheus/procfs                        6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
-github.com/russross/blackfriday                     05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
-github.com/sirupsen/logrus                          60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
+github.com/docker/go-metrics                        v0.0.1
+github.com/docker/go-units                          v0.4.0
+github.com/godbus/dbus/v5                           v5.0.3
+github.com/gogo/googleapis                          v1.3.2
+github.com/gogo/protobuf                            v1.3.1
+github.com/golang/protobuf                          v1.3.5
+github.com/google/go-cmp                            v0.2.0
+github.com/google/uuid                              v1.1.1
+github.com/grpc-ecosystem/go-grpc-prometheus        v1.2.0
+github.com/hashicorp/errwrap                        v1.0.0
+github.com/hashicorp/go-multierror                  v1.0.0
+github.com/hashicorp/golang-lru                     v0.5.3
+github.com/imdario/mergo                            v0.3.7
+github.com/konsorten/go-windows-terminal-sequences  v1.0.3
+github.com/matttproud/golang_protobuf_extensions    v1.0.1
+github.com/Microsoft/go-winio                       v0.4.14
+github.com/Microsoft/hcsshim                        v0.8.9
+github.com/opencontainers/go-digest                 v1.0.0
+github.com/opencontainers/image-spec                v1.0.1
+github.com/opencontainers/runc                      67169a9d43456ff0d5ae12b967acb8e366e2f181 # v1.0.0-rc91-48-g67169a9d
+github.com/opencontainers/runtime-spec              237cc4f519e2e8f9b235bacccfa8ef5a84df2875 # v1.0.3-0.20200520003142-237cc4f519e2
+github.com/pkg/errors                               v0.9.1
+github.com/prometheus/client_golang                 v1.6.0
+github.com/prometheus/client_model                  v0.2.0
+github.com/prometheus/common                        v0.9.1
+github.com/prometheus/procfs                        v0.0.11
+github.com/russross/blackfriday/v2                  v2.0.1
+github.com/shurcooL/sanitized_anchor_name           v1.0.0
+github.com/sirupsen/logrus                          v1.6.0
 github.com/syndtr/gocapability                      d98352740cb2c55f81556b63d4a1ec64c5a319c2
 github.com/syndtr/gocapability                      d98352740cb2c55f81556b63d4a1ec64c5a319c2
-github.com/urfave/cli                               bfe2e925cfb6d44b40ad3a779165ea7e8aff9212 # v1.22.0
-go.etcd.io/bbolt                                    a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3
-go.opencensus.io                                    9c377598961b706d1542bd2d84d538b5094d596e # v0.22.0
+github.com/urfave/cli                               v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
+go.etcd.io/bbolt                                    v1.3.5
+go.opencensus.io                                    v0.22.0
 golang.org/x/net                                    f3200d17e092c607f615320ecaad13d87ad9a2b3
 golang.org/x/net                                    f3200d17e092c607f615320ecaad13d87ad9a2b3
 golang.org/x/sync                                   42b317875d0fa942474b76e1b46a6060d720ae6e
 golang.org/x/sync                                   42b317875d0fa942474b76e1b46a6060d720ae6e
-golang.org/x/sys                                    5c8b2ff67527cb88b770f693cebf3799036d8bc0
-golang.org/x/text                                   19e51611da83d6be54ddafce4a4af510cb3e9ea4
+golang.org/x/sys                                    9dae0f8f577553e0f21298e18926efc9644c281d
+golang.org/x/text                                   v0.3.3
 google.golang.org/genproto                          e50cd9704f63023d62cd06a1994b98227fc4d21a
 google.golang.org/genproto                          e50cd9704f63023d62cd06a1994b98227fc4d21a
-google.golang.org/grpc                              f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
-gotest.tools/v3                                     bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2
+google.golang.org/grpc                              v1.27.1
+gotest.tools/v3                                     v3.0.2
 
 
 # cgroups dependencies
 # cgroups dependencies
-github.com/cilium/ebpf                              4032b1d8aae306b7bb94a2a11002932caf88c644
+github.com/cilium/ebpf                              1c8d4c9ef7759622653a1d319284a44652333b28
 
 
 # cri dependencies
 # cri dependencies
-github.com/containerd/cri                           65830369b6b2b4edc454bf5cebbd9b76c1c1ac66 # master
-github.com/davecgh/go-spew                          8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
-github.com/docker/distribution                      0d3efadf0154c2b8a4e7b6621fff9809655cc580
+github.com/containerd/cri                           8448b92d237e877bed1e4aa7a0baf0dee234dbcb # master
+github.com/davecgh/go-spew                          v1.1.1
 github.com/docker/docker                            4634ce647cf2ce2c6031129ccd109e557244986f
 github.com/docker/docker                            4634ce647cf2ce2c6031129ccd109e557244986f
 github.com/docker/spdystream                        449fdfce4d962303d702fec724ef0ad181c92528
 github.com/docker/spdystream                        449fdfce4d962303d702fec724ef0ad181c92528
-github.com/emicklei/go-restful                      b993709ae1a4f6dd19cfa475232614441b11c9d5 # v2.9.5
-github.com/google/gofuzz                            db92cf7ae75e4a7a28abc005addab2b394362888 # v1.1.0
-github.com/json-iterator/go                         03217c3e97663914aec3faafde50d081f197a0a2 # v1.1.8
-github.com/modern-go/concurrent                     bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
-github.com/modern-go/reflect2                       4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
-github.com/opencontainers/selinux                   0d49ba2a6aae052c614dfe5de62a158711a6c461 # 1.5.1
-github.com/seccomp/libseccomp-golang                689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
-github.com/stretchr/testify                         221dbe5ed46703ee255b1da0dec05086f5035f62 # v1.4.0
-github.com/tchap/go-patricia                        666120de432aea38ab06bd5c818f04f4129882c9 # v2.2.6
+github.com/emicklei/go-restful                      v2.9.5
+github.com/go-logr/logr                             v0.2.0
+github.com/google/gofuzz                            v1.1.0
+github.com/json-iterator/go                         v1.1.9
+github.com/modern-go/concurrent                     1.0.3
+github.com/modern-go/reflect2                       v1.0.1
+github.com/opencontainers/selinux                   v1.6.0
+github.com/seccomp/libseccomp-golang                v0.9.1
+github.com/tchap/go-patricia                        v2.2.6
+github.com/willf/bitset                             d5bec3311243426a3c6d1b7a795f24b17c686dbb # 1.1.10+ used by selinux pkg
 golang.org/x/crypto                                 bac4c82f69751a6dd76e702d54b3ceb88adab236
 golang.org/x/crypto                                 bac4c82f69751a6dd76e702d54b3ceb88adab236
-golang.org/x/oauth2                                 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33
-golang.org/x/time                                   9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
-gopkg.in/inf.v0                                     d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
-gopkg.in/yaml.v2                                    53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
-k8s.io/api                                          d2dce8e1788e4be2be3a62b6439b3eaa087df0df # v0.18.0
-k8s.io/apimachinery                                 105e0c6d63f10531ed07f3b5a2195771a0fa444b # v0.18.0
-k8s.io/apiserver                                    5c8e895629a454efd75a453d1dea5b8142db0013 # v0.18.0
-k8s.io/client-go                                    0b19784585bd0a0ee5509855829ead81feaa2bdc # v0.18.0
-k8s.io/cri-api                                      3d1680d8d202aa12c5dc5689170c3c03a488d35b # v0.18.0
-k8s.io/klog                                         2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 # v1.0.0
-k8s.io/kubernetes                                   9e991415386e4cf155a24b1da15becaa390438d8 # v1.18.0
-k8s.io/utils                                        a9aa75ae1b89e1b992c33383f48e942d97e52dae
-sigs.k8s.io/structured-merge-diff/v3                877aee05330847a873a1a8998b40e12a1e0fde25 # v3.0.0
-sigs.k8s.io/yaml                                    9fc95527decd95bb9d28cc2eab08179b2d0f6971 # v1.2.0
+golang.org/x/oauth2                                 858c2ad4c8b6c5d10852cb89079f6ca1c7309787
+golang.org/x/time                                   555d28b269f0569763d25dbe1a237ae74c6bcc82
+gopkg.in/inf.v0                                     v0.9.1
+gopkg.in/yaml.v2                                    v2.2.8
+k8s.io/api                                          v0.19.0-beta.2
+k8s.io/apimachinery                                 v0.19.0-beta.2
+k8s.io/apiserver                                    v0.19.0-beta.2
+k8s.io/client-go                                    v0.19.0-beta.2
+k8s.io/cri-api                                      v0.19.0-beta.2
+k8s.io/klog/v2                                      v2.2.0
+k8s.io/utils                                        2df71ebbae66f39338aed4cd0bb82d2212ee33cc
+sigs.k8s.io/structured-merge-diff/v3                v3.0.0
+sigs.k8s.io/yaml                                    v1.2.0
 
 
 # cni dependencies
 # cni dependencies
-github.com/containerd/go-cni                        0d360c50b10b350b6bb23863fd4dfb1c232b01c9
-github.com/containernetworking/cni                  4cfb7b568922a3c79a23e438dc52fe537fc9687e # v0.7.1
-github.com/containernetworking/plugins              9f96827c7cabb03f21d86326000c00f61e181f6a # v0.7.6
-github.com/fsnotify/fsnotify                        4bf2d1fec78374803a39307bfb8d340688f4f28e # v1.4.8
+github.com/containerd/go-cni                        v1.0.0
+github.com/containernetworking/cni                  v0.7.1
+github.com/containernetworking/plugins              v0.7.6
+github.com/fsnotify/fsnotify                        v1.4.9
 
 
 # image decrypt depedencies
 # image decrypt depedencies
-github.com/containerd/imgcrypt                      9e761ccd6069fb707ec9493435f31475b5524b38 # v1.0.1
-github.com/containers/ocicrypt                      0343cc6053fd65069df55bce6838096e09b4033a # v1.0.1 from containerd/imgcrypt
-github.com/fullsailor/pkcs7                         8306686428a5fe132eac8cb7c4848af725098bd4 #        from containers/ocicrypt
-gopkg.in/square/go-jose.v2                          730df5f748271903322feb182be83b43ebbbe27d # v2.3.1 from containers/ocicrypt
+github.com/containerd/imgcrypt                      v1.0.1
+github.com/containers/ocicrypt                      v1.0.1
+github.com/fullsailor/pkcs7                         8306686428a5fe132eac8cb7c4848af725098bd4
+gopkg.in/square/go-jose.v2                          v2.3.1
 
 
 # zfs dependencies
 # zfs dependencies
 github.com/containerd/zfs                           9abf673ca6ff9ab8d9bd776a4ceff8f6dc699c3d
 github.com/containerd/zfs                           9abf673ca6ff9ab8d9bd776a4ceff8f6dc699c3d

+ 1 - 1
vendor/github.com/containerd/containerd/version/version.go

@@ -23,7 +23,7 @@ var (
 	Package = "github.com/containerd/containerd"
 	Package = "github.com/containerd/containerd"
 
 
 	// Version holds the complete version number. Filled in at linking time.
 	// Version holds the complete version number. Filled in at linking time.
-	Version = "1.3.0+unknown"
+	Version = "1.4.0-beta.2+unknown"
 
 
 	// Revision is filled with the VCS (e.g. git) revision being used to build
 	// Revision is filled with the VCS (e.g. git) revision being used to build
 	// the program at linking time.
 	// the program at linking time.

+ 1 - 1
vendor/github.com/containerd/fifo/fifo.go

@@ -17,13 +17,13 @@
 package fifo
 package fifo
 
 
 import (
 import (
+	"context"
 	"io"
 	"io"
 	"os"
 	"os"
 	"runtime"
 	"runtime"
 	"sync"
 	"sync"
 	"syscall"
 	"syscall"
 
 
-	"context"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 

+ 6 - 3
vendor/github.com/containerd/fifo/handle_linux.go

@@ -27,6 +27,7 @@ import (
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
+//nolint:golint
 const O_PATH = 010000000
 const O_PATH = 010000000
 
 
 type handle struct {
 type handle struct {
@@ -56,9 +57,10 @@ func getHandle(fn string) (*handle, error) {
 	h := &handle{
 	h := &handle{
 		f:    f,
 		f:    f,
 		name: fn,
 		name: fn,
-		dev:  uint64(stat.Dev),
-		ino:  stat.Ino,
-		fd:   fd,
+		//nolint:unconvert
+		dev: uint64(stat.Dev),
+		ino: stat.Ino,
+		fd:  fd,
 	}
 	}
 
 
 	// check /proc just in case
 	// check /proc just in case
@@ -83,6 +85,7 @@ func (h *handle) Path() (string, error) {
 	if err := syscall.Stat(h.procPath(), &stat); err != nil {
 	if err := syscall.Stat(h.procPath(), &stat); err != nil {
 		return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
 		return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
 	}
 	}
+	//nolint:unconvert
 	if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
 	if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
 		return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
 		return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
 	}
 	}

+ 1 - 1
vendor/github.com/containerd/fifo/raw.go

@@ -23,7 +23,7 @@ import (
 )
 )
 
 
 // SyscallConn provides raw access to the fifo's underlying filedescrptor.
 // SyscallConn provides raw access to the fifo's underlying filedescrptor.
-// See syscall.Conn for guarentees provided by this interface.
+// See syscall.Conn for guarantees provided by this interface.
 func (f *fifo) SyscallConn() (syscall.RawConn, error) {
 func (f *fifo) SyscallConn() (syscall.RawConn, error) {
 	// deterministic check for closed
 	// deterministic check for closed
 	select {
 	select {

+ 1 - 1
vendor/github.com/golang/protobuf/go.mod

@@ -1,3 +1,3 @@
 module github.com/golang/protobuf
 module github.com/golang/protobuf
 
 
-go 1.12
+go 1.9

+ 7 - 5
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go

@@ -1376,8 +1376,8 @@ type FileOptions struct {
 	// determining the namespace.
 	// determining the namespace.
 	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
 	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
 	// Use this option to change the namespace of php generated metadata classes.
 	// Use this option to change the namespace of php generated metadata classes.
-	// Default is empty. When this option is empty, the proto file name will be used
-	// for determining the namespace.
+	// Default is empty. When this option is empty, the proto file name will be
+	// used for determining the namespace.
 	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
 	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
 	// Use this option to change the package of ruby generated classes. Default
 	// Use this option to change the package of ruby generated classes. Default
 	// is empty. When this option is not set, the package name will be used for
 	// is empty. When this option is not set, the package name will be used for
@@ -1627,7 +1627,7 @@ type MessageOptions struct {
 	//
 	//
 	// Implementations may choose not to generate the map_entry=true message, but
 	// Implementations may choose not to generate the map_entry=true message, but
 	// use a native map in the target language to hold the keys and values.
 	// use a native map in the target language to hold the keys and values.
-	// The reflection APIs in such implementions still need to work as
+	// The reflection APIs in such implementations still need to work as
 	// if the field is a repeated message field.
 	// if the field is a repeated message field.
 	//
 	//
 	// NOTE: Do not set the option in .proto files. Always use the maps syntax
 	// NOTE: Do not set the option in .proto files. Always use the maps syntax
@@ -2377,7 +2377,7 @@ type SourceCodeInfo struct {
 	//   beginning of the "extend" block and is shared by all extensions within
 	//   beginning of the "extend" block and is shared by all extensions within
 	//   the block.
 	//   the block.
 	// - Just because a location's span is a subset of some other location's span
 	// - Just because a location's span is a subset of some other location's span
-	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   does not mean that it is a descendant.  For example, a "group" defines
 	//   both a type and a field in a single declaration.  Thus, the locations
 	//   both a type and a field in a single declaration.  Thus, the locations
 	//   corresponding to the type and field and their components will overlap.
 	//   corresponding to the type and field and their components will overlap.
 	// - Code which tries to interpret locations should probably be designed to
 	// - Code which tries to interpret locations should probably be designed to
@@ -2718,7 +2718,9 @@ func init() {
 	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
 	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+func init() {
+	proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177)
+}
 
 
 var fileDescriptor_e5baabe45344a177 = []byte{
 var fileDescriptor_e5baabe45344a177 = []byte{
 	// 2589 bytes of a gzipped FileDescriptorProto
 	// 2589 bytes of a gzipped FileDescriptorProto

+ 71 - 69
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto

@@ -40,6 +40,7 @@
 syntax = "proto2";
 syntax = "proto2";
 
 
 package google.protobuf;
 package google.protobuf;
+
 option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
 option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
 option java_package = "com.google.protobuf";
 option java_package = "com.google.protobuf";
 option java_outer_classname = "DescriptorProtos";
 option java_outer_classname = "DescriptorProtos";
@@ -59,8 +60,8 @@ message FileDescriptorSet {
 
 
 // Describes a complete .proto file.
 // Describes a complete .proto file.
 message FileDescriptorProto {
 message FileDescriptorProto {
-  optional string name = 1;       // file name, relative to root of source tree
-  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+  optional string name = 1;     // file name, relative to root of source tree
+  optional string package = 2;  // e.g. "foo", "foo.bar", etc.
 
 
   // Names of files imported by this file.
   // Names of files imported by this file.
   repeated string dependency = 3;
   repeated string dependency = 3;
@@ -100,8 +101,8 @@ message DescriptorProto {
   repeated EnumDescriptorProto enum_type = 4;
   repeated EnumDescriptorProto enum_type = 4;
 
 
   message ExtensionRange {
   message ExtensionRange {
-    optional int32 start = 1;
-    optional int32 end = 2;
+    optional int32 start = 1;  // Inclusive.
+    optional int32 end = 2;    // Exclusive.
 
 
     optional ExtensionRangeOptions options = 3;
     optional ExtensionRangeOptions options = 3;
   }
   }
@@ -115,8 +116,8 @@ message DescriptorProto {
   // fields or extension ranges in the same message. Reserved ranges may
   // fields or extension ranges in the same message. Reserved ranges may
   // not overlap.
   // not overlap.
   message ReservedRange {
   message ReservedRange {
-    optional int32 start = 1; // Inclusive.
-    optional int32 end = 2;   // Exclusive.
+    optional int32 start = 1;  // Inclusive.
+    optional int32 end = 2;    // Exclusive.
   }
   }
   repeated ReservedRange reserved_range = 9;
   repeated ReservedRange reserved_range = 9;
   // Reserved field names, which may not be used by fields in the same message.
   // Reserved field names, which may not be used by fields in the same message.
@@ -137,42 +138,42 @@ message FieldDescriptorProto {
   enum Type {
   enum Type {
     // 0 is reserved for errors.
     // 0 is reserved for errors.
     // Order is weird for historical reasons.
     // Order is weird for historical reasons.
-    TYPE_DOUBLE         = 1;
-    TYPE_FLOAT          = 2;
+    TYPE_DOUBLE = 1;
+    TYPE_FLOAT = 2;
     // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
     // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
     // negative values are likely.
     // negative values are likely.
-    TYPE_INT64          = 3;
-    TYPE_UINT64         = 4;
+    TYPE_INT64 = 3;
+    TYPE_UINT64 = 4;
     // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
     // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
     // negative values are likely.
     // negative values are likely.
-    TYPE_INT32          = 5;
-    TYPE_FIXED64        = 6;
-    TYPE_FIXED32        = 7;
-    TYPE_BOOL           = 8;
-    TYPE_STRING         = 9;
+    TYPE_INT32 = 5;
+    TYPE_FIXED64 = 6;
+    TYPE_FIXED32 = 7;
+    TYPE_BOOL = 8;
+    TYPE_STRING = 9;
     // Tag-delimited aggregate.
     // Tag-delimited aggregate.
     // Group type is deprecated and not supported in proto3. However, Proto3
     // Group type is deprecated and not supported in proto3. However, Proto3
     // implementations should still be able to parse the group wire format and
     // implementations should still be able to parse the group wire format and
     // treat group fields as unknown fields.
     // treat group fields as unknown fields.
-    TYPE_GROUP          = 10;
-    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+    TYPE_GROUP = 10;
+    TYPE_MESSAGE = 11;  // Length-delimited aggregate.
 
 
     // New in version 2.
     // New in version 2.
-    TYPE_BYTES          = 12;
-    TYPE_UINT32         = 13;
-    TYPE_ENUM           = 14;
-    TYPE_SFIXED32       = 15;
-    TYPE_SFIXED64       = 16;
-    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
-    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
-  };
+    TYPE_BYTES = 12;
+    TYPE_UINT32 = 13;
+    TYPE_ENUM = 14;
+    TYPE_SFIXED32 = 15;
+    TYPE_SFIXED64 = 16;
+    TYPE_SINT32 = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64 = 18;  // Uses ZigZag encoding.
+  }
 
 
   enum Label {
   enum Label {
     // 0 is reserved for errors
     // 0 is reserved for errors
-    LABEL_OPTIONAL      = 1;
-    LABEL_REQUIRED      = 2;
-    LABEL_REPEATED      = 3;
-  };
+    LABEL_OPTIONAL = 1;
+    LABEL_REQUIRED = 2;
+    LABEL_REPEATED = 3;
+  }
 
 
   optional string name = 1;
   optional string name = 1;
   optional int32 number = 3;
   optional int32 number = 3;
@@ -234,8 +235,8 @@ message EnumDescriptorProto {
   // is inclusive such that it can appropriately represent the entire int32
   // is inclusive such that it can appropriately represent the entire int32
   // domain.
   // domain.
   message EnumReservedRange {
   message EnumReservedRange {
-    optional int32 start = 1; // Inclusive.
-    optional int32 end = 2;   // Inclusive.
+    optional int32 start = 1;  // Inclusive.
+    optional int32 end = 2;    // Inclusive.
   }
   }
 
 
   // Range of reserved numeric values. Reserved numeric values may not be used
   // Range of reserved numeric values. Reserved numeric values may not be used
@@ -276,9 +277,9 @@ message MethodDescriptorProto {
   optional MethodOptions options = 4;
   optional MethodOptions options = 4;
 
 
   // Identifies if client streams multiple client messages
   // Identifies if client streams multiple client messages
-  optional bool client_streaming = 5 [default=false];
+  optional bool client_streaming = 5 [default = false];
   // Identifies if server streams multiple server messages
   // Identifies if server streams multiple server messages
-  optional bool server_streaming = 6 [default=false];
+  optional bool server_streaming = 6 [default = false];
 }
 }
 
 
 
 
@@ -314,7 +315,6 @@ message MethodDescriptorProto {
 //   If this turns out to be popular, a web service will be set up
 //   If this turns out to be popular, a web service will be set up
 //   to automatically assign option numbers.
 //   to automatically assign option numbers.
 
 
-
 message FileOptions {
 message FileOptions {
 
 
   // Sets the Java package where classes generated from this .proto will be
   // Sets the Java package where classes generated from this .proto will be
@@ -337,7 +337,7 @@ message FileOptions {
   // named by java_outer_classname.  However, the outer class will still be
   // named by java_outer_classname.  However, the outer class will still be
   // generated to contain the file's getDescriptor() method as well as any
   // generated to contain the file's getDescriptor() method as well as any
   // top-level extensions defined in the file.
   // top-level extensions defined in the file.
-  optional bool java_multiple_files = 10 [default=false];
+  optional bool java_multiple_files = 10 [default = false];
 
 
   // This option does nothing.
   // This option does nothing.
   optional bool java_generate_equals_and_hash = 20 [deprecated=true];
   optional bool java_generate_equals_and_hash = 20 [deprecated=true];
@@ -348,17 +348,17 @@ message FileOptions {
   // Message reflection will do the same.
   // Message reflection will do the same.
   // However, an extension field still accepts non-UTF-8 byte sequences.
   // However, an extension field still accepts non-UTF-8 byte sequences.
   // This option has no effect on when used with the lite runtime.
   // This option has no effect on when used with the lite runtime.
-  optional bool java_string_check_utf8 = 27 [default=false];
+  optional bool java_string_check_utf8 = 27 [default = false];
 
 
 
 
   // Generated classes can be optimized for speed or code size.
   // Generated classes can be optimized for speed or code size.
   enum OptimizeMode {
   enum OptimizeMode {
-    SPEED = 1;        // Generate complete code for parsing, serialization,
-                      // etc.
-    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
-    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+    SPEED = 1;         // Generate complete code for parsing, serialization,
+                       // etc.
+    CODE_SIZE = 2;     // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3;  // Generate code using MessageLite and the lite runtime.
   }
   }
-  optional OptimizeMode optimize_for = 9 [default=SPEED];
+  optional OptimizeMode optimize_for = 9 [default = SPEED];
 
 
   // Sets the Go package where structs generated from this .proto will be
   // Sets the Go package where structs generated from this .proto will be
   // placed. If omitted, the Go package will be derived from the following:
   // placed. If omitted, the Go package will be derived from the following:
@@ -369,6 +369,7 @@ message FileOptions {
 
 
 
 
 
 
+
   // Should generic services be generated in each language?  "Generic" services
   // Should generic services be generated in each language?  "Generic" services
   // are not specific to any particular RPC system.  They are generated by the
   // are not specific to any particular RPC system.  They are generated by the
   // main code generators in each language (without additional plugins).
   // main code generators in each language (without additional plugins).
@@ -379,20 +380,20 @@ message FileOptions {
   // that generate code specific to your particular RPC system.  Therefore,
   // that generate code specific to your particular RPC system.  Therefore,
   // these default to false.  Old code which depends on generic services should
   // these default to false.  Old code which depends on generic services should
   // explicitly set them to true.
   // explicitly set them to true.
-  optional bool cc_generic_services = 16 [default=false];
-  optional bool java_generic_services = 17 [default=false];
-  optional bool py_generic_services = 18 [default=false];
-  optional bool php_generic_services = 42 [default=false];
+  optional bool cc_generic_services = 16 [default = false];
+  optional bool java_generic_services = 17 [default = false];
+  optional bool py_generic_services = 18 [default = false];
+  optional bool php_generic_services = 42 [default = false];
 
 
   // Is this file deprecated?
   // Is this file deprecated?
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for everything in the file, or it will be completely ignored; in the very
   // for everything in the file, or it will be completely ignored; in the very
   // least, this is a formalization for deprecating files.
   // least, this is a formalization for deprecating files.
-  optional bool deprecated = 23 [default=false];
+  optional bool deprecated = 23 [default = false];
 
 
   // Enables the use of arenas for the proto messages in this file. This applies
   // Enables the use of arenas for the proto messages in this file. This applies
   // only to generated classes for C++.
   // only to generated classes for C++.
-  optional bool cc_enable_arenas = 31 [default=false];
+  optional bool cc_enable_arenas = 31 [default = false];
 
 
 
 
   // Sets the objective c class prefix which is prepended to all objective c
   // Sets the objective c class prefix which is prepended to all objective c
@@ -417,10 +418,9 @@ message FileOptions {
   // determining the namespace.
   // determining the namespace.
   optional string php_namespace = 41;
   optional string php_namespace = 41;
 
 
-
   // Use this option to change the namespace of php generated metadata classes.
   // Use this option to change the namespace of php generated metadata classes.
-  // Default is empty. When this option is empty, the proto file name will be used
-  // for determining the namespace.
+  // Default is empty. When this option is empty, the proto file name will be
+  // used for determining the namespace.
   optional string php_metadata_namespace = 44;
   optional string php_metadata_namespace = 44;
 
 
   // Use this option to change the package of ruby generated classes. Default
   // Use this option to change the package of ruby generated classes. Default
@@ -428,6 +428,7 @@ message FileOptions {
   // determining the ruby package.
   // determining the ruby package.
   optional string ruby_package = 45;
   optional string ruby_package = 45;
 
 
+
   // The parser stores options it doesn't recognize here.
   // The parser stores options it doesn't recognize here.
   // See the documentation for the "Options" section above.
   // See the documentation for the "Options" section above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
@@ -458,18 +459,18 @@ message MessageOptions {
   //
   //
   // Because this is an option, the above two restrictions are not enforced by
   // Because this is an option, the above two restrictions are not enforced by
   // the protocol compiler.
   // the protocol compiler.
-  optional bool message_set_wire_format = 1 [default=false];
+  optional bool message_set_wire_format = 1 [default = false];
 
 
   // Disables the generation of the standard "descriptor()" accessor, which can
   // Disables the generation of the standard "descriptor()" accessor, which can
   // conflict with a field of the same name.  This is meant to make migration
   // conflict with a field of the same name.  This is meant to make migration
   // from proto1 easier; new code should avoid fields named "descriptor".
   // from proto1 easier; new code should avoid fields named "descriptor".
-  optional bool no_standard_descriptor_accessor = 2 [default=false];
+  optional bool no_standard_descriptor_accessor = 2 [default = false];
 
 
   // Is this message deprecated?
   // Is this message deprecated?
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for the message, or it will be completely ignored; in the very least,
   // for the message, or it will be completely ignored; in the very least,
   // this is a formalization for deprecating messages.
   // this is a formalization for deprecating messages.
-  optional bool deprecated = 3 [default=false];
+  optional bool deprecated = 3 [default = false];
 
 
   // Whether the message is an automatically generated map entry type for the
   // Whether the message is an automatically generated map entry type for the
   // maps field.
   // maps field.
@@ -486,7 +487,7 @@ message MessageOptions {
   //
   //
   // Implementations may choose not to generate the map_entry=true message, but
   // Implementations may choose not to generate the map_entry=true message, but
   // use a native map in the target language to hold the keys and values.
   // use a native map in the target language to hold the keys and values.
-  // The reflection APIs in such implementions still need to work as
+  // The reflection APIs in such implementations still need to work as
   // if the field is a repeated message field.
   // if the field is a repeated message field.
   //
   //
   // NOTE: Do not set the option in .proto files. Always use the maps syntax
   // NOTE: Do not set the option in .proto files. Always use the maps syntax
@@ -497,6 +498,7 @@ message MessageOptions {
   reserved 8;  // javalite_serializable
   reserved 8;  // javalite_serializable
   reserved 9;  // javanano_as_lite
   reserved 9;  // javanano_as_lite
 
 
+
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
 
 
@@ -576,16 +578,16 @@ message FieldOptions {
   // implementation must either *always* check its required fields, or *never*
   // implementation must either *always* check its required fields, or *never*
   // check its required fields, regardless of whether or not the message has
   // check its required fields, regardless of whether or not the message has
   // been parsed.
   // been parsed.
-  optional bool lazy = 5 [default=false];
+  optional bool lazy = 5 [default = false];
 
 
   // Is this field deprecated?
   // Is this field deprecated?
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for accessors, or it will be completely ignored; in the very least, this
   // for accessors, or it will be completely ignored; in the very least, this
   // is a formalization for deprecating fields.
   // is a formalization for deprecating fields.
-  optional bool deprecated = 3 [default=false];
+  optional bool deprecated = 3 [default = false];
 
 
   // For Google-internal migration only. Do not use.
   // For Google-internal migration only. Do not use.
-  optional bool weak = 10 [default=false];
+  optional bool weak = 10 [default = false];
 
 
 
 
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
@@ -615,7 +617,7 @@ message EnumOptions {
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for the enum, or it will be completely ignored; in the very least, this
   // for the enum, or it will be completely ignored; in the very least, this
   // is a formalization for deprecating enums.
   // is a formalization for deprecating enums.
-  optional bool deprecated = 3 [default=false];
+  optional bool deprecated = 3 [default = false];
 
 
   reserved 5;  // javanano_as_lite
   reserved 5;  // javanano_as_lite
 
 
@@ -631,7 +633,7 @@ message EnumValueOptions {
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for the enum value, or it will be completely ignored; in the very least,
   // for the enum value, or it will be completely ignored; in the very least,
   // this is a formalization for deprecating enum values.
   // this is a formalization for deprecating enum values.
-  optional bool deprecated = 1 [default=false];
+  optional bool deprecated = 1 [default = false];
 
 
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
@@ -651,7 +653,7 @@ message ServiceOptions {
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for the service, or it will be completely ignored; in the very least,
   // for the service, or it will be completely ignored; in the very least,
   // this is a formalization for deprecating services.
   // this is a formalization for deprecating services.
-  optional bool deprecated = 33 [default=false];
+  optional bool deprecated = 33 [default = false];
 
 
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
@@ -671,18 +673,18 @@ message MethodOptions {
   // Depending on the target platform, this can emit Deprecated annotations
   // Depending on the target platform, this can emit Deprecated annotations
   // for the method, or it will be completely ignored; in the very least,
   // for the method, or it will be completely ignored; in the very least,
   // this is a formalization for deprecating methods.
   // this is a formalization for deprecating methods.
-  optional bool deprecated = 33 [default=false];
+  optional bool deprecated = 33 [default = false];
 
 
   // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
   // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
   // or neither? HTTP based RPC implementation may choose GET verb for safe
   // or neither? HTTP based RPC implementation may choose GET verb for safe
   // methods, and PUT verb for idempotent methods instead of the default POST.
   // methods, and PUT verb for idempotent methods instead of the default POST.
   enum IdempotencyLevel {
   enum IdempotencyLevel {
     IDEMPOTENCY_UNKNOWN = 0;
     IDEMPOTENCY_UNKNOWN = 0;
-    NO_SIDE_EFFECTS     = 1; // implies idempotent
-    IDEMPOTENT          = 2; // idempotent, but may have side effects
+    NO_SIDE_EFFECTS = 1;  // implies idempotent
+    IDEMPOTENT = 2;       // idempotent, but may have side effects
   }
   }
-  optional IdempotencyLevel idempotency_level =
-      34 [default=IDEMPOTENCY_UNKNOWN];
+  optional IdempotencyLevel idempotency_level = 34
+      [default = IDEMPOTENCY_UNKNOWN];
 
 
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
@@ -763,7 +765,7 @@ message SourceCodeInfo {
   //   beginning of the "extend" block and is shared by all extensions within
   //   beginning of the "extend" block and is shared by all extensions within
   //   the block.
   //   the block.
   // - Just because a location's span is a subset of some other location's span
   // - Just because a location's span is a subset of some other location's span
-  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   does not mean that it is a descendant.  For example, a "group" defines
   //   both a type and a field in a single declaration.  Thus, the locations
   //   both a type and a field in a single declaration.  Thus, the locations
   //   corresponding to the type and field and their components will overlap.
   //   corresponding to the type and field and their components will overlap.
   // - Code which tries to interpret locations should probably be designed to
   // - Code which tries to interpret locations should probably be designed to
@@ -794,14 +796,14 @@ message SourceCodeInfo {
     //   [ 4, 3, 2, 7 ]
     //   [ 4, 3, 2, 7 ]
     // this path refers to the whole field declaration (from the beginning
     // this path refers to the whole field declaration (from the beginning
     // of the label to the terminating semicolon).
     // of the label to the terminating semicolon).
-    repeated int32 path = 1 [packed=true];
+    repeated int32 path = 1 [packed = true];
 
 
     // Always has exactly three or four elements: start line, start column,
     // Always has exactly three or four elements: start line, start column,
     // end line (optional, otherwise assumed same as start line), end column.
     // end line (optional, otherwise assumed same as start line), end column.
     // These are packed into a single field for efficiency.  Note that line
     // These are packed into a single field for efficiency.  Note that line
     // and column numbers are zero-based -- typically you will want to add
     // and column numbers are zero-based -- typically you will want to add
     // 1 to each before displaying to a user.
     // 1 to each before displaying to a user.
-    repeated int32 span = 2 [packed=true];
+    repeated int32 span = 2 [packed = true];
 
 
     // If this SourceCodeInfo represents a complete declaration, these are any
     // If this SourceCodeInfo represents a complete declaration, these are any
     // comments appearing before and after the declaration which appear to be
     // comments appearing before and after the declaration which appear to be
@@ -866,7 +868,7 @@ message GeneratedCodeInfo {
   message Annotation {
   message Annotation {
     // Identifies the element in the original source .proto file. This field
     // Identifies the element in the original source .proto file. This field
     // is formatted the same as SourceCodeInfo.Location.path.
     // is formatted the same as SourceCodeInfo.Location.path.
-    repeated int32 path = 1 [packed=true];
+    repeated int32 path = 1 [packed = true];
 
 
     // Identifies the filesystem path to the original source .proto.
     // Identifies the filesystem path to the original source .proto.
     optional string source_file = 2;
     optional string source_file = 2;

+ 5 - 2
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go

@@ -102,7 +102,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 //
 //
 type Any struct {
 type Any struct {
 	// A URL/resource name that uniquely identifies the type of the serialized
 	// A URL/resource name that uniquely identifies the type of the serialized
-	// protocol buffer message. The last segment of the URL's path must represent
+	// protocol buffer message. This string must contain at least
+	// one "/" character. The last segment of the URL's path must represent
 	// the fully qualified name of the type (as in
 	// the fully qualified name of the type (as in
 	// `path/google.protobuf.Duration`). The name should be in a canonical form
 	// `path/google.protobuf.Duration`). The name should be in a canonical form
 	// (e.g., leading "." is not accepted).
 	// (e.g., leading "." is not accepted).
@@ -181,7 +182,9 @@ func init() {
 	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
 	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+func init() {
+	proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
+}
 
 
 var fileDescriptor_b53526c13ae22eb4 = []byte{
 var fileDescriptor_b53526c13ae22eb4 = []byte{
 	// 185 bytes of a gzipped FileDescriptorProto
 	// 185 bytes of a gzipped FileDescriptorProto

+ 2 - 1
vendor/github.com/golang/protobuf/ptypes/any/any.proto

@@ -121,7 +121,8 @@ option objc_class_prefix = "GPB";
 //
 //
 message Any {
 message Any {
   // A URL/resource name that uniquely identifies the type of the serialized
   // A URL/resource name that uniquely identifies the type of the serialized
-  // protocol buffer message. The last segment of the URL's path must represent
+  // protocol buffer message. This string must contain at least
+  // one "/" character. The last segment of the URL's path must represent
   // the fully qualified name of the type (as in
   // the fully qualified name of the type (as in
   // `path/google.protobuf.Duration`). The name should be in a canonical form
   // `path/google.protobuf.Duration`). The name should be in a canonical form
   // (e.g., leading "." is not accepted).
   // (e.g., leading "." is not accepted).

+ 4 - 2
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go

@@ -41,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 //     if (duration.seconds < 0 && duration.nanos > 0) {
 //     if (duration.seconds < 0 && duration.nanos > 0) {
 //       duration.seconds += 1;
 //       duration.seconds += 1;
 //       duration.nanos -= 1000000000;
 //       duration.nanos -= 1000000000;
-//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//     } else if (duration.seconds > 0 && duration.nanos < 0) {
 //       duration.seconds -= 1;
 //       duration.seconds -= 1;
 //       duration.nanos += 1000000000;
 //       duration.nanos += 1000000000;
 //     }
 //     }
@@ -142,7 +142,9 @@ func init() {
 	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
 	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+func init() {
+	proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
+}
 
 
 var fileDescriptor_23597b2ebd7ac6c5 = []byte{
 var fileDescriptor_23597b2ebd7ac6c5 = []byte{
 	// 190 bytes of a gzipped FileDescriptorProto
 	// 190 bytes of a gzipped FileDescriptorProto

+ 1 - 2
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto

@@ -61,7 +61,7 @@ option objc_class_prefix = "GPB";
 //     if (duration.seconds < 0 && duration.nanos > 0) {
 //     if (duration.seconds < 0 && duration.nanos > 0) {
 //       duration.seconds += 1;
 //       duration.seconds += 1;
 //       duration.nanos -= 1000000000;
 //       duration.nanos -= 1000000000;
-//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//     } else if (duration.seconds > 0 && duration.nanos < 0) {
 //       duration.seconds -= 1;
 //       duration.seconds -= 1;
 //       duration.nanos += 1000000000;
 //       duration.nanos += 1000000000;
 //     }
 //     }
@@ -101,7 +101,6 @@ option objc_class_prefix = "GPB";
 //
 //
 //
 //
 message Duration {
 message Duration {
-
   // Signed seconds of the span of time. Must be from -315,576,000,000
   // Signed seconds of the span of time. Must be from -315,576,000,000
   // to +315,576,000,000 inclusive. Note: these bounds are computed from:
   // to +315,576,000,000 inclusive. Note: these bounds are computed from:
   // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
   // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years

+ 3 - 1
vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go

@@ -66,7 +66,9 @@ func init() {
 	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
 	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+func init() {
+	proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8)
+}
 
 
 var fileDescriptor_900544acb223d5b8 = []byte{
 var fileDescriptor_900544acb223d5b8 = []byte{
 	// 148 bytes of a gzipped FileDescriptorProto
 	// 148 bytes of a gzipped FileDescriptorProto

+ 3 - 1
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go

@@ -302,7 +302,9 @@ func init() {
 	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
 	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+func init() {
+	proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402)
+}
 
 
 var fileDescriptor_df322afd6c9fb402 = []byte{
 var fileDescriptor_df322afd6c9fb402 = []byte{
 	// 417 bytes of a gzipped FileDescriptorProto
 	// 417 bytes of a gzipped FileDescriptorProto

+ 0 - 1
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto

@@ -40,7 +40,6 @@ option java_outer_classname = "StructProto";
 option java_multiple_files = true;
 option java_multiple_files = true;
 option objc_class_prefix = "GPB";
 option objc_class_prefix = "GPB";
 
 
-
 // `Struct` represents a structured data value, consisting of fields
 // `Struct` represents a structured data value, consisting of fields
 // which map to dynamically typed values. In some languages, `Struct`
 // which map to dynamically typed values. In some languages, `Struct`
 // might be supported by a native representation. For example, in
 // might be supported by a native representation. For example, in

+ 23 - 17
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go

@@ -20,17 +20,19 @@ var _ = math.Inf
 // proto package needs to be updated.
 // proto package needs to be updated.
 const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 
 
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
 //
 //
 // # Examples
 // # Examples
 //
 //
@@ -91,12 +93,14 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 // 01:30 UTC on January 15, 2017.
 // 01:30 UTC on January 15, 2017.
 //
 //
 // In JavaScript, one can convert a Date object to this format using the
 // In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
 // method. In Python, a standard `datetime.datetime` object can be converted
 // method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
 // ) to obtain a formatter capable of generating timestamps in this format.
 // ) to obtain a formatter capable of generating timestamps in this format.
 //
 //
 //
 //
@@ -160,7 +164,9 @@ func init() {
 	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
 	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
 }
 }
 
 
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+func init() {
+	proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
+}
 
 
 var fileDescriptor_292007bbfe81227e = []byte{
 var fileDescriptor_292007bbfe81227e = []byte{
 	// 191 bytes of a gzipped FileDescriptorProto
 	// 191 bytes of a gzipped FileDescriptorProto

+ 20 - 17
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto

@@ -40,17 +40,19 @@ option java_outer_classname = "TimestampProto";
 option java_multiple_files = true;
 option java_multiple_files = true;
 option objc_class_prefix = "GPB";
 option objc_class_prefix = "GPB";
 
 
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
 //
 //
 // # Examples
 // # Examples
 //
 //
@@ -111,17 +113,18 @@ option objc_class_prefix = "GPB";
 // 01:30 UTC on January 15, 2017.
 // 01:30 UTC on January 15, 2017.
 //
 //
 // In JavaScript, one can convert a Date object to this format using the
 // In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
 // method. In Python, a standard `datetime.datetime` object can be converted
 // method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
 // ) to obtain a formatter capable of generating timestamps in this format.
 // ) to obtain a formatter capable of generating timestamps in this format.
 //
 //
 //
 //
 message Timestamp {
 message Timestamp {
-
   // Represents seconds of UTC time since Unix epoch
   // Represents seconds of UTC time since Unix epoch
   // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
   // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
   // 9999-12-31T23:59:59Z inclusive.
   // 9999-12-31T23:59:59Z inclusive.

+ 39 - 19
vendor/github.com/opencontainers/runc/README.md

@@ -3,6 +3,7 @@
 [![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc)
 [![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc)
 [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc)
 [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc)
 [![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc)
 [![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc)
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/588/badge)](https://bestpractices.coreinfrastructure.org/projects/588)
 
 
 ## Introduction
 ## Introduction
 
 
@@ -18,22 +19,23 @@ You can find official releases of `runc` on the [release](https://github.com/ope
 
 
 Currently, the following features are not considered to be production-ready:
 Currently, the following features are not considered to be production-ready:
 
 
-* Support for cgroup v2
+* [Support for cgroup v2](./docs/cgroup-v2.md)
 
 
 ## Security
 ## Security
 
 
-The reporting process and disclosure communications are outlined in [/org/security](https://github.com/opencontainers/org/blob/master/security/).
+The reporting process and disclosure communications are outlined [here](https://github.com/opencontainers/org/blob/master/SECURITY.md).
+
+### Security Audit
+A third party security audit was performed by Cure53, you can see the full report [here](https://github.com/opencontainers/runc/blob/master/docs/Security-Audit.pdf).
 
 
 ## Building
 ## Building
 
 
 `runc` currently supports the Linux platform with various architecture support.
 `runc` currently supports the Linux platform with various architecture support.
-It must be built with Go version 1.6 or higher in order for some features to function properly.
+It must be built with Go version 1.13 or higher.
 
 
 In order to enable seccomp support you will need to install `libseccomp` on your platform.
 In order to enable seccomp support you will need to install `libseccomp` on your platform.
 > e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
 > e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
 
 
-Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
-
 ```bash
 ```bash
 # create a 'github.com/opencontainers' in your GOPATH/src
 # create a 'github.com/opencontainers' in your GOPATH/src
 cd github.com/opencontainers
 cd github.com/opencontainers
@@ -58,20 +60,22 @@ sudo make install
 
 
 #### Build Tags
 #### Build Tags
 
 
-`runc` supports optional build tags for compiling support of various features.
-To add build tags to the make option the `BUILDTAGS` variable must be set.
+`runc` supports optional build tags for compiling support of various features,
+with some of them enabled by default (see `BUILDTAGS` in top-level `Makefile`).
+
+To change build tags from the default, set the `BUILDTAGS` variable for make,
+e.g.
 
 
 ```bash
 ```bash
 make BUILDTAGS='seccomp apparmor'
 make BUILDTAGS='seccomp apparmor'
 ```
 ```
 
 
-| Build Tag | Feature                            | Dependency  |
-|-----------|------------------------------------|-------------|
-| seccomp   | Syscall filtering                  | libseccomp  |
-| selinux   | selinux process and mount labeling | <none>      |
-| apparmor  | apparmor profile support           | <none>      |
-| ambient   | ambient capability support         | kernel 4.3  |
-| nokmem    | disable kernel memory account      | <none>      |
+| Build Tag | Feature                            | Enabled by default | Dependency |
+|-----------|------------------------------------|--------------------|------------|
+| seccomp   | Syscall filtering                  | yes                | libseccomp |
+| selinux   | selinux process and mount labeling | yes                | <none>     |
+| apparmor  | apparmor profile support           | yes                | <none>     |
+| nokmem    | disable kernel memory accounting   | no                 | <none>     |
 
 
 
 
 ### Running the test suite
 ### Running the test suite
@@ -97,17 +101,30 @@ You can run a specific integration test by setting the `TESTPATH` variable.
 # make test TESTPATH="/checkpoint.bats"
 # make test TESTPATH="/checkpoint.bats"
 ```
 ```
 
 
-You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables.
+You can run a specific rootless integration test by setting the `ROOTLESS_TESTPATH` variable.
 
 
 ```bash
 ```bash
-# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/"
+# make test ROOTLESS_TESTPATH="/checkpoint.bats"
+```
+
+You can run a test using your container engine's flags by setting `CONTAINER_ENGINE_BUILD_FLAGS` and `CONTAINER_ENGINE_RUN_FLAGS` variables.
+
+```bash
+# make test CONTAINER_ENGINE_BUILD_FLAGS="--build-arg http_proxy=http://yourproxy/" CONTAINER_ENGINE_RUN_FLAGS="-e http_proxy=http://yourproxy/"
 ```
 ```
 
 
 ### Dependencies Management
 ### Dependencies Management
 
 
-`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
-Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
-new dependencies.
+`runc` uses [Go Modules](https://github.com/golang/go/wiki/Modules) for dependencies management.
+Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for how to add or update
+new dependencies. When updating dependencies, be sure that you are running Go `1.14` or newer.
+
+```
+# Update vendored dependencies
+make vendor
+# Verify all dependencies
+make verify-dependencies
+```
 
 
 ## Using runc
 ## Using runc
 
 
@@ -275,6 +292,9 @@ PIDFile=/run/mycontainerid.pid
 WantedBy=multi-user.target
 WantedBy=multi-user.target
 ```
 ```
 
 
+#### cgroup v2
+See [`./docs/cgroup-v2.md`](./docs/cgroup-v2.md).
+
 ## License
 ## License
 
 
 The code and docs are released under the [Apache 2.0 license](LICENSE).
 The code and docs are released under the [Apache 2.0 license](LICENSE).

+ 26 - 0
vendor/github.com/opencontainers/runc/go.mod

@@ -0,0 +1,26 @@
+module github.com/opencontainers/runc
+
+go 1.14
+
+require (
+	github.com/checkpoint-restore/go-criu/v4 v4.0.2
+	github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
+	github.com/containerd/console v1.0.0
+	github.com/coreos/go-systemd/v22 v22.0.0
+	github.com/cyphar/filepath-securejoin v0.2.2
+	github.com/docker/go-units v0.4.0
+	github.com/godbus/dbus/v5 v5.0.3
+	github.com/golang/protobuf v1.3.5
+	github.com/moby/sys/mountinfo v0.1.3
+	github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
+	github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+	github.com/opencontainers/selinux v1.5.1
+	github.com/pkg/errors v0.9.1
+	github.com/seccomp/libseccomp-golang v0.9.1
+	github.com/sirupsen/logrus v1.6.0
+	github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
+	// NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
+	github.com/urfave/cli v1.22.1
+	github.com/vishvananda/netlink v1.1.0
+	golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
+)

+ 2 - 3
vendor/github.com/opencontainers/runc/libcontainer/README.md

@@ -155,8 +155,7 @@ config := &configs.Config{
 		Parent: "system",
 		Parent: "system",
 		Resources: &configs.Resources{
 		Resources: &configs.Resources{
 			MemorySwappiness: nil,
 			MemorySwappiness: nil,
-			AllowAllDevices:  nil,
-			AllowedDevices:   configs.DefaultAllowedDevices,
+			Devices:          specconv.AllowedDevices,
 		},
 		},
 	},
 	},
 	MaskPaths: []string{
 	MaskPaths: []string{
@@ -166,7 +165,7 @@ config := &configs.Config{
 	ReadonlyPaths: []string{
 	ReadonlyPaths: []string{
 		"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
 		"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
 	},
 	},
-	Devices:  configs.DefaultAutoCreatedDevices,
+	Devices:  specconv.AllowedDevices,
 	Hostname: "testing",
 	Hostname: "testing",
 	Mounts: []*configs.Mount{
 	Mounts: []*configs.Mount{
 		{
 		{

+ 16 - 39
vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go

@@ -3,8 +3,6 @@
 package cgroups
 package cgroups
 
 
 import (
 import (
-	"fmt"
-
 	"github.com/opencontainers/runc/libcontainer/configs"
 	"github.com/opencontainers/runc/libcontainer/configs"
 )
 )
 
 
@@ -27,48 +25,27 @@ type Manager interface {
 	// Destroys the cgroup set
 	// Destroys the cgroup set
 	Destroy() error
 	Destroy() error
 
 
-	// The option func SystemdCgroups() and Cgroupfs() require following attributes:
-	// 	Paths   map[string]string
-	// 	Cgroups *configs.Cgroup
-	// Paths maps cgroup subsystem to path at which it is mounted.
-	// Cgroups specifies specific cgroup settings for the various subsystems
-
-	// Returns cgroup paths to save in a state file and to be able to
-	// restore the object later.
-	GetPaths() map[string]string
-
-	// GetUnifiedPath returns the unified path when running in unified mode.
-	// The value corresponds to the all values of GetPaths() map.
-	//
-	// GetUnifiedPath returns error when running in hybrid mode as well as
-	// in legacy mode.
-	GetUnifiedPath() (string, error)
+	// Path returns a cgroup path to the specified controller/subsystem.
+	// For cgroupv2, the argument is unused and can be empty.
+	Path(string) string
 
 
 	// Sets the cgroup as configured.
 	// Sets the cgroup as configured.
 	Set(container *configs.Config) error
 	Set(container *configs.Config) error
 
 
-	// Gets the cgroup as configured.
-	GetCgroups() (*configs.Cgroup, error)
-}
-
-type NotFoundError struct {
-	Subsystem string
-}
+	// GetPaths returns cgroup path(s) to save in a state file in order to restore later.
+	//
+	// For cgroup v1, a key is cgroup subsystem name, and the value is the path
+	// to the cgroup for this subsystem.
+	//
+	// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
+	GetPaths() map[string]string
 
 
-func (e *NotFoundError) Error() string {
-	return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
-}
+	// GetCgroups returns the cgroup data as configured.
+	GetCgroups() (*configs.Cgroup, error)
 
 
-func NewNotFoundError(sub string) error {
-	return &NotFoundError{
-		Subsystem: sub,
-	}
-}
+	// GetFreezerState retrieves the current FreezerState of the cgroup.
+	GetFreezerState() (configs.FreezerState, error)
 
 
-func IsNotFound(err error) bool {
-	if err == nil {
-		return false
-	}
-	_, ok := err.(*NotFoundError)
-	return ok
+	// Whether the cgroup path exists or not
+	Exists() bool
 }
 }

+ 27 - 0
vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go

@@ -20,6 +20,12 @@ type CpuUsage struct {
 	// Total CPU time consumed per core.
 	// Total CPU time consumed per core.
 	// Units: nanoseconds.
 	// Units: nanoseconds.
 	PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
 	PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+	// CPU time consumed per core in kernel mode
+	// Units: nanoseconds.
+	PercpuUsageInKernelmode []uint64 `json:"percpu_usage_in_kernelmode"`
+	// CPU time consumed per core in user mode
+	// Units: nanoseconds.
+	PercpuUsageInUsermode []uint64 `json:"percpu_usage_in_usermode"`
 	// Time spent by tasks of the cgroup in kernel mode.
 	// Time spent by tasks of the cgroup in kernel mode.
 	// Units: nanoseconds.
 	// Units: nanoseconds.
 	UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
 	UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
@@ -51,12 +57,33 @@ type MemoryStats struct {
 	KernelUsage MemoryData `json:"kernel_usage,omitempty"`
 	KernelUsage MemoryData `json:"kernel_usage,omitempty"`
 	// usage of kernel TCP memory
 	// usage of kernel TCP memory
 	KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
 	KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
+	// usage of memory pages by NUMA node
+	// see chapter 5.6 of memory controller documentation
+	PageUsageByNUMA PageUsageByNUMA `json:"page_usage_by_numa,omitempty"`
 	// if true, memory usage is accounted for throughout a hierarchy of cgroups.
 	// if true, memory usage is accounted for throughout a hierarchy of cgroups.
 	UseHierarchy bool `json:"use_hierarchy"`
 	UseHierarchy bool `json:"use_hierarchy"`
 
 
 	Stats map[string]uint64 `json:"stats,omitempty"`
 	Stats map[string]uint64 `json:"stats,omitempty"`
 }
 }
 
 
+type PageUsageByNUMA struct {
+	// Embedding is used as types can't be recursive.
+	PageUsageByNUMAInner
+	Hierarchical PageUsageByNUMAInner `json:"hierarchical,omitempty"`
+}
+
+type PageUsageByNUMAInner struct {
+	Total       PageStats `json:"total,omitempty"`
+	File        PageStats `json:"file,omitempty"`
+	Anon        PageStats `json:"anon,omitempty"`
+	Unevictable PageStats `json:"unevictable,omitempty"`
+}
+
+type PageStats struct {
+	Total uint64           `json:"total,omitempty"`
+	Nodes map[uint8]uint64 `json:"nodes,omitempty"`
+}
+
 type PidsStats struct {
 type PidsStats struct {
 	// number of pids in the cgroup
 	// number of pids in the cgroup
 	Current uint64 `json:"current,omitempty"`
 	Current uint64 `json:"current,omitempty"`

+ 74 - 283
vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go

@@ -4,6 +4,7 @@ package cgroups
 
 
 import (
 import (
 	"bufio"
 	"bufio"
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -12,7 +13,6 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
-	"syscall"
 	"time"
 	"time"
 
 
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
@@ -20,7 +20,6 @@ import (
 )
 )
 
 
 const (
 const (
-	CgroupNamePrefix  = "name="
 	CgroupProcesses   = "cgroup.procs"
 	CgroupProcesses   = "cgroup.procs"
 	unifiedMountpoint = "/sys/fs/cgroup"
 	unifiedMountpoint = "/sys/fs/cgroup"
 )
 )
@@ -40,8 +39,8 @@ var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
 // IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
 // IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
 func IsCgroup2UnifiedMode() bool {
 func IsCgroup2UnifiedMode() bool {
 	isUnifiedOnce.Do(func() {
 	isUnifiedOnce.Do(func() {
-		var st syscall.Statfs_t
-		if err := syscall.Statfs(unifiedMountpoint, &st); err != nil {
+		var st unix.Statfs_t
+		if err := unix.Statfs(unifiedMountpoint, &st); err != nil {
 			panic("cannot statfs cgroup root")
 			panic("cannot statfs cgroup root")
 		}
 		}
 		isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
 		isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
@@ -49,191 +48,19 @@ func IsCgroup2UnifiedMode() bool {
 	return isUnified
 	return isUnified
 }
 }
 
 
-// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
-func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
-	if IsCgroup2UnifiedMode() {
-		return unifiedMountpoint, nil
-	}
-	mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
-	return mnt, err
-}
-
-func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) {
-	// We are not using mount.GetMounts() because it's super-inefficient,
-	// parsing it directly sped up x10 times because of not using Sscanf.
-	// It was one of two major performance drawbacks in container start.
-	if !isSubsystemAvailable(subsystem) {
-		return "", "", NewNotFoundError(subsystem)
-	}
-
-	f, err := os.Open("/proc/self/mountinfo")
-	if err != nil {
-		return "", "", err
-	}
-	defer f.Close()
-
-	if IsCgroup2UnifiedMode() {
-		subsystem = ""
-	}
-
-	return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
-}
-
-func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) {
-	scanner := bufio.NewScanner(reader)
-	for scanner.Scan() {
-		txt := scanner.Text()
-		fields := strings.Fields(txt)
-		if len(fields) < 9 {
-			continue
-		}
-		if strings.HasPrefix(fields[4], cgroupPath) {
-			for _, opt := range strings.Split(fields[len(fields)-1], ",") {
-				if (subsystem == "" && fields[9] == "cgroup2") || opt == subsystem {
-					return fields[4], fields[3], nil
-				}
-			}
-		}
-	}
-	if err := scanner.Err(); err != nil {
-		return "", "", err
-	}
-
-	return "", "", NewNotFoundError(subsystem)
-}
-
-func isSubsystemAvailable(subsystem string) bool {
-	if IsCgroup2UnifiedMode() {
-		controllers, err := GetAllSubsystems()
-		if err != nil {
-			return false
-		}
-		for _, c := range controllers {
-			if c == subsystem {
-				return true
-			}
-		}
-		return false
-	}
-
-	cgroups, err := ParseCgroupFile("/proc/self/cgroup")
-	if err != nil {
-		return false
-	}
-	_, avail := cgroups[subsystem]
-	return avail
-}
-
-func GetClosestMountpointAncestor(dir, mountinfo string) string {
-	deepestMountPoint := ""
-	for _, mountInfoEntry := range strings.Split(mountinfo, "\n") {
-		mountInfoParts := strings.Fields(mountInfoEntry)
-		if len(mountInfoParts) < 5 {
-			continue
-		}
-		mountPoint := mountInfoParts[4]
-		if strings.HasPrefix(mountPoint, deepestMountPoint) && strings.HasPrefix(dir, mountPoint) {
-			deepestMountPoint = mountPoint
-		}
-	}
-	return deepestMountPoint
-}
-
-func FindCgroupMountpointDir() (string, error) {
-	f, err := os.Open("/proc/self/mountinfo")
-	if err != nil {
-		return "", err
-	}
-	defer f.Close()
-
-	scanner := bufio.NewScanner(f)
-	for scanner.Scan() {
-		text := scanner.Text()
-		fields := strings.Split(text, " ")
-		// Safe as mountinfo encodes mountpoints with spaces as \040.
-		index := strings.Index(text, " - ")
-		postSeparatorFields := strings.Fields(text[index+3:])
-		numPostFields := len(postSeparatorFields)
-
-		// This is an error as we can't detect if the mount is for "cgroup"
-		if numPostFields == 0 {
-			return "", fmt.Errorf("Found no fields post '-' in %q", text)
-		}
-
-		if postSeparatorFields[0] == "cgroup" || postSeparatorFields[0] == "cgroup2" {
-			// Check that the mount is properly formatted.
-			if numPostFields < 3 {
-				return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
-			}
-
-			return filepath.Dir(fields[4]), nil
-		}
-	}
-	if err := scanner.Err(); err != nil {
-		return "", err
-	}
-
-	return "", NewNotFoundError("cgroup")
-}
-
 type Mount struct {
 type Mount struct {
 	Mountpoint string
 	Mountpoint string
 	Root       string
 	Root       string
 	Subsystems []string
 	Subsystems []string
 }
 }
 
 
-func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) {
-	if len(m.Subsystems) == 0 {
-		return "", fmt.Errorf("no subsystem for mount")
-	}
-
-	return getControllerPath(m.Subsystems[0], cgroups)
-}
-
-func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) {
-	res := make([]Mount, 0, len(ss))
-	scanner := bufio.NewScanner(mi)
-	numFound := 0
-	for scanner.Scan() && numFound < len(ss) {
-		txt := scanner.Text()
-		sepIdx := strings.Index(txt, " - ")
-		if sepIdx == -1 {
-			return nil, fmt.Errorf("invalid mountinfo format")
-		}
-		if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" {
-			continue
-		}
-		fields := strings.Split(txt, " ")
-		m := Mount{
-			Mountpoint: fields[4],
-			Root:       fields[3],
-		}
-		for _, opt := range strings.Split(fields[len(fields)-1], ",") {
-			seen, known := ss[opt]
-			if !known || (!all && seen) {
-				continue
-			}
-			ss[opt] = true
-			if strings.HasPrefix(opt, CgroupNamePrefix) {
-				opt = opt[len(CgroupNamePrefix):]
-			}
-			m.Subsystems = append(m.Subsystems, opt)
-			numFound++
-		}
-		if len(m.Subsystems) > 0 || all {
-			res = append(res, m)
-		}
-	}
-	if err := scanner.Err(); err != nil {
-		return nil, err
-	}
-	return res, nil
-}
-
 // GetCgroupMounts returns the mounts for the cgroup subsystems.
 // GetCgroupMounts returns the mounts for the cgroup subsystems.
 // all indicates whether to return just the first instance or all the mounts.
 // all indicates whether to return just the first instance or all the mounts.
+// This function should not be used from cgroupv2 code, as in this case
+// all the controllers are available under the constant unifiedMountpoint.
 func GetCgroupMounts(all bool) ([]Mount, error) {
 func GetCgroupMounts(all bool) ([]Mount, error) {
 	if IsCgroup2UnifiedMode() {
 	if IsCgroup2UnifiedMode() {
+		// TODO: remove cgroupv2 case once all external users are converted
 		availableControllers, err := GetAllSubsystems()
 		availableControllers, err := GetAllSubsystems()
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
@@ -246,22 +73,7 @@ func GetCgroupMounts(all bool) ([]Mount, error) {
 		return []Mount{m}, nil
 		return []Mount{m}, nil
 	}
 	}
 
 
-	f, err := os.Open("/proc/self/mountinfo")
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
-	if err != nil {
-		return nil, err
-	}
-
-	allMap := make(map[string]bool)
-	for s := range allSubsystems {
-		allMap[s] = false
-	}
-	return getCgroupMountsHelper(allMap, f, all)
+	return getCgroupMountsV1(all)
 }
 }
 
 
 // GetAllSubsystems returns all the cgroup subsystems supported by the kernel
 // GetAllSubsystems returns all the cgroup subsystems supported by the kernel
@@ -305,61 +117,8 @@ func GetAllSubsystems() ([]string, error) {
 	return subsystems, nil
 	return subsystems, nil
 }
 }
 
 
-// GetOwnCgroup returns the relative path to the cgroup docker is running in.
-func GetOwnCgroup(subsystem string) (string, error) {
-	cgroups, err := ParseCgroupFile("/proc/self/cgroup")
-	if err != nil {
-		return "", err
-	}
-
-	return getControllerPath(subsystem, cgroups)
-}
-
-func GetOwnCgroupPath(subsystem string) (string, error) {
-	cgroup, err := GetOwnCgroup(subsystem)
-	if err != nil {
-		return "", err
-	}
-
-	return getCgroupPathHelper(subsystem, cgroup)
-}
-
-func GetInitCgroup(subsystem string) (string, error) {
-	cgroups, err := ParseCgroupFile("/proc/1/cgroup")
-	if err != nil {
-		return "", err
-	}
-
-	return getControllerPath(subsystem, cgroups)
-}
-
-func GetInitCgroupPath(subsystem string) (string, error) {
-	cgroup, err := GetInitCgroup(subsystem)
-	if err != nil {
-		return "", err
-	}
-
-	return getCgroupPathHelper(subsystem, cgroup)
-}
-
-func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
-	mnt, root, err := FindCgroupMountpointAndRoot("", subsystem)
-	if err != nil {
-		return "", err
-	}
-
-	// This is needed for nested containers, because in /proc/self/cgroup we
-	// see paths from host, which don't exist in container.
-	relCgroup, err := filepath.Rel(root, cgroup)
-	if err != nil {
-		return "", err
-	}
-
-	return filepath.Join(mnt, relCgroup), nil
-}
-
-func readProcsFile(dir string) ([]int, error) {
-	f, err := os.Open(filepath.Join(dir, CgroupProcesses))
+func readProcsFile(file string) ([]int, error) {
+	f, err := os.Open(file)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -379,11 +138,18 @@ func readProcsFile(dir string) ([]int, error) {
 			out = append(out, pid)
 			out = append(out, pid)
 		}
 		}
 	}
 	}
-	return out, nil
+	return out, s.Err()
 }
 }
 
 
-// ParseCgroupFile parses the given cgroup file, typically from
-// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
+// ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup
+// or /proc/<pid>/cgroup, into a map of subsystems to cgroup paths, e.g.
+//   "cpu": "/user.slice/user-1000.slice"
+//   "pids": "/user.slice/user-1000.slice"
+// etc.
+//
+// Note that for cgroup v2 unified hierarchy, there are no per-controller
+// cgroup paths, so the resulting map will have a single element where the key
+// is empty string ("") and the value is the cgroup path the <pid> is in.
 func ParseCgroupFile(path string) (map[string]string, error) {
 func ParseCgroupFile(path string) (map[string]string, error) {
 	f, err := os.Open(path)
 	f, err := os.Open(path)
 	if err != nil {
 	if err != nil {
@@ -423,22 +189,6 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
 	return cgroups, nil
 	return cgroups, nil
 }
 }
 
 
-func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
-	if IsCgroup2UnifiedMode() {
-		return "/", nil
-	}
-
-	if p, ok := cgroups[subsystem]; ok {
-		return p, nil
-	}
-
-	if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
-		return p, nil
-	}
-
-	return "", NewNotFoundError(subsystem)
-}
-
 func PathExists(path string) bool {
 func PathExists(path string) bool {
 	if _, err := os.Stat(path); err != nil {
 	if _, err := os.Stat(path); err != nil {
 		return false
 		return false
@@ -514,8 +264,8 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
 }
 }
 
 
 // GetPids returns all pids, that were added to cgroup at path.
 // GetPids returns all pids, that were added to cgroup at path.
-func GetPids(path string) ([]int, error) {
-	return readProcsFile(path)
+func GetPids(dir string) ([]int, error) {
+	return readProcsFile(filepath.Join(dir, CgroupProcesses))
 }
 }
 
 
 // GetAllPids returns all pids, that were added to cgroup at path and to all its
 // GetAllPids returns all pids, that were added to cgroup at path and to all its
@@ -524,14 +274,13 @@ func GetAllPids(path string) ([]int, error) {
 	var pids []int
 	var pids []int
 	// collect pids from all sub-cgroups
 	// collect pids from all sub-cgroups
 	err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
 	err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
-		dir, file := filepath.Split(p)
-		if file != CgroupProcesses {
-			return nil
-		}
 		if iErr != nil {
 		if iErr != nil {
 			return iErr
 			return iErr
 		}
 		}
-		cPids, err := readProcsFile(dir)
+		if info.IsDir() || info.Name() != CgroupProcesses {
+			return nil
+		}
+		cPids, err := readProcsFile(p)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -568,7 +317,7 @@ func WriteCgroupProc(dir string, pid int) error {
 
 
 		// EINVAL might mean that the task being added to cgroup.procs is in state
 		// EINVAL might mean that the task being added to cgroup.procs is in state
 		// TASK_NEW. We should attempt to do so again.
 		// TASK_NEW. We should attempt to do so again.
-		if isEINVAL(err) {
+		if errors.Is(err, unix.EINVAL) {
 			time.Sleep(30 * time.Millisecond)
 			time.Sleep(30 * time.Millisecond)
 			continue
 			continue
 		}
 		}
@@ -578,11 +327,53 @@ func WriteCgroupProc(dir string, pid int) error {
 	return err
 	return err
 }
 }
 
 
-func isEINVAL(err error) bool {
-	switch err := err.(type) {
-	case *os.PathError:
-		return err.Err == unix.EINVAL
-	default:
-		return false
+// Since the OCI spec is designed for cgroup v1, in some cases
+// there is need to convert from the cgroup v1 configuration to cgroup v2
+// the formula for BlkIOWeight is y = (1 + (x - 10) * 9999 / 990)
+// convert linearly from [10-1000] to [1-10000]
+func ConvertBlkIOToCgroupV2Value(blkIoWeight uint16) uint64 {
+	if blkIoWeight == 0 {
+		return 0
+	}
+	return uint64(1 + (uint64(blkIoWeight)-10)*9999/990)
+}
+
+// Since the OCI spec is designed for cgroup v1, in some cases
+// there is need to convert from the cgroup v1 configuration to cgroup v2
+// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142)
+// convert from [2-262144] to [1-10000]
+// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)"
+func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 {
+	if cpuShares == 0 {
+		return 0
 	}
 	}
+	return (1 + ((cpuShares-2)*9999)/262142)
+}
+
+// ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec
+// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap
+// is defined as memory+swap combined, while in cgroup v2 swap is a separate value.
+func ConvertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) {
+	// for compatibility with cgroup1 controller, set swap to unlimited in
+	// case the memory is set to unlimited, and swap is not explicitly set,
+	// treating the request as "set both memory and swap to unlimited".
+	if memory == -1 && memorySwap == 0 {
+		return -1, nil
+	}
+	if memorySwap == -1 || memorySwap == 0 {
+		// -1 is "max", 0 is "unset", so treat as is
+		return memorySwap, nil
+	}
+	// sanity checks
+	if memory == 0 || memory == -1 {
+		return 0, errors.New("unable to set swap limit without memory limit")
+	}
+	if memory < 0 {
+		return 0, fmt.Errorf("invalid memory value: %d", memory)
+	}
+	if memorySwap < memory {
+		return 0, errors.New("memory+swap limit should be >= memory limit")
+	}
+
+	return memorySwap - memory, nil
 }
 }

+ 250 - 0
vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go

@@ -0,0 +1,250 @@
+package cgroups
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// Code in this source file are specific to cgroup v1,
+// and must not be used from any cgroup v2 code.
+
+const (
+	CgroupNamePrefix = "name="
+)
+
+var (
+	errUnified = errors.New("not implemented for cgroup v2 unified hierarchy")
+)
+
+type NotFoundError struct {
+	Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+	return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+	return &NotFoundError{
+		Subsystem: sub,
+	}
+}
+
+func IsNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*NotFoundError)
+	return ok
+}
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
+func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
+	if IsCgroup2UnifiedMode() {
+		return "", errUnified
+	}
+	mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
+	return mnt, err
+}
+
+func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) {
+	if IsCgroup2UnifiedMode() {
+		return "", "", errUnified
+	}
+
+	// We are not using mount.GetMounts() because it's super-inefficient,
+	// parsing it directly sped up x10 times because of not using Sscanf.
+	// It was one of two major performance drawbacks in container start.
+	if !isSubsystemAvailable(subsystem) {
+		return "", "", NewNotFoundError(subsystem)
+	}
+
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return "", "", err
+	}
+	defer f.Close()
+
+	return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
+}
+
+func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		txt := scanner.Text()
+		fields := strings.Fields(txt)
+		if len(fields) < 9 {
+			continue
+		}
+		if strings.HasPrefix(fields[4], cgroupPath) {
+			for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+				if opt == subsystem {
+					return fields[4], fields[3], nil
+				}
+			}
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return "", "", err
+	}
+
+	return "", "", NewNotFoundError(subsystem)
+}
+
+func isSubsystemAvailable(subsystem string) bool {
+	if IsCgroup2UnifiedMode() {
+		panic("don't call isSubsystemAvailable from cgroupv2 code")
+	}
+
+	cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+	if err != nil {
+		return false
+	}
+	_, avail := cgroups[subsystem]
+	return avail
+}
+
+func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) {
+	if len(m.Subsystems) == 0 {
+		return "", fmt.Errorf("no subsystem for mount")
+	}
+
+	return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) {
+	res := make([]Mount, 0, len(ss))
+	scanner := bufio.NewScanner(mi)
+	numFound := 0
+	for scanner.Scan() && numFound < len(ss) {
+		txt := scanner.Text()
+		sepIdx := strings.Index(txt, " - ")
+		if sepIdx == -1 {
+			return nil, fmt.Errorf("invalid mountinfo format")
+		}
+		if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" {
+			continue
+		}
+		fields := strings.Split(txt, " ")
+		m := Mount{
+			Mountpoint: fields[4],
+			Root:       fields[3],
+		}
+		for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+			seen, known := ss[opt]
+			if !known || (!all && seen) {
+				continue
+			}
+			ss[opt] = true
+			opt = strings.TrimPrefix(opt, CgroupNamePrefix)
+			m.Subsystems = append(m.Subsystems, opt)
+			numFound++
+		}
+		if len(m.Subsystems) > 0 || all {
+			res = append(res, m)
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, err
+	}
+	return res, nil
+}
+
+func getCgroupMountsV1(all bool) ([]Mount, error) {
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
+	if err != nil {
+		return nil, err
+	}
+
+	allMap := make(map[string]bool)
+	for s := range allSubsystems {
+		allMap[s] = false
+	}
+	return getCgroupMountsHelper(allMap, f, all)
+}
+
+// GetOwnCgroup returns the relative path to the cgroup docker is running in.
+func GetOwnCgroup(subsystem string) (string, error) {
+	if IsCgroup2UnifiedMode() {
+		return "", errUnified
+	}
+	cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+	if err != nil {
+		return "", err
+	}
+
+	return getControllerPath(subsystem, cgroups)
+}
+
+func GetOwnCgroupPath(subsystem string) (string, error) {
+	cgroup, err := GetOwnCgroup(subsystem)
+	if err != nil {
+		return "", err
+	}
+
+	return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func GetInitCgroup(subsystem string) (string, error) {
+	if IsCgroup2UnifiedMode() {
+		return "", errUnified
+	}
+	cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+	if err != nil {
+		return "", err
+	}
+
+	return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupPath(subsystem string) (string, error) {
+	cgroup, err := GetInitCgroup(subsystem)
+	if err != nil {
+		return "", err
+	}
+
+	return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
+	mnt, root, err := FindCgroupMountpointAndRoot("", subsystem)
+	if err != nil {
+		return "", err
+	}
+
+	// This is needed for nested containers, because in /proc/self/cgroup we
+	// see paths from host, which don't exist in container.
+	relCgroup, err := filepath.Rel(root, cgroup)
+	if err != nil {
+		return "", err
+	}
+
+	return filepath.Join(mnt, relCgroup), nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+	if IsCgroup2UnifiedMode() {
+		return "", errUnified
+	}
+
+	if p, ok := cgroups[subsystem]; ok {
+		return p, nil
+	}
+
+	if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
+		return p, nil
+	}
+
+	return "", NewNotFoundError(subsystem)
+}

+ 17 - 11
vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go

@@ -1,5 +1,9 @@
 package configs
 package configs
 
 
+import (
+	systemdDbus "github.com/coreos/go-systemd/v22/dbus"
+)
+
 type FreezerState string
 type FreezerState string
 
 
 const (
 const (
@@ -29,18 +33,16 @@ type Cgroup struct {
 
 
 	// Resources contains various cgroups settings to apply
 	// Resources contains various cgroups settings to apply
 	*Resources
 	*Resources
+
+	// SystemdProps are any additional properties for systemd,
+	// derived from org.systemd.property.xxx annotations.
+	// Ignored unless systemd is used for managing cgroups.
+	SystemdProps []systemdDbus.Property `json:"-"`
 }
 }
 
 
 type Resources struct {
 type Resources struct {
-	// If this is true allow access to any kind of device within the container.  If false, allow access only to devices explicitly listed in the allowed_devices list.
-	// Deprecated
-	AllowAllDevices *bool `json:"allow_all_devices,omitempty"`
-	// Deprecated
-	AllowedDevices []*Device `json:"allowed_devices,omitempty"`
-	// Deprecated
-	DeniedDevices []*Device `json:"denied_devices,omitempty"`
-
-	Devices []*Device `json:"devices"`
+	// Devices is the set of access rules for devices in the container.
+	Devices []*DeviceRule `json:"devices"`
 
 
 	// Memory limit (in bytes)
 	// Memory limit (in bytes)
 	Memory int64 `json:"memory"`
 	Memory int64 `json:"memory"`
@@ -125,6 +127,10 @@ type Resources struct {
 	// CpuWeight sets a proportional bandwidth limit.
 	// CpuWeight sets a proportional bandwidth limit.
 	CpuWeight uint64 `json:"cpu_weight"`
 	CpuWeight uint64 `json:"cpu_weight"`
 
 
-	// CpuMax sets she maximum bandwidth limit (format: max period).
-	CpuMax string `json:"cpu_max"`
+	// SkipDevices allows to skip configuring device permissions.
+	// Used by e.g. kubelet while creating a parent cgroup (kubepods)
+	// common for many containers.
+	//
+	// NOTE it is impossible to start a container which has this flag set.
+	SkipDevices bool `json:"skip_devices"`
 }
 }

+ 70 - 26
vendor/github.com/opencontainers/runc/libcontainer/configs/config.go

@@ -8,7 +8,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/opencontainers/runtime-spec/specs-go"
-
+	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
 
 
@@ -70,9 +70,10 @@ type Arg struct {
 
 
 // Syscall is a rule to match a syscall in Seccomp
 // Syscall is a rule to match a syscall in Seccomp
 type Syscall struct {
 type Syscall struct {
-	Name   string `json:"name"`
-	Action Action `json:"action"`
-	Args   []*Arg `json:"args"`
+	Name     string `json:"name"`
+	Action   Action `json:"action"`
+	ErrnoRet *uint  `json:"errnoRet"`
+	Args     []*Arg `json:"args"`
 }
 }
 
 
 // TODO Windows. Many of these fields should be factored out into those parts
 // TODO Windows. Many of these fields should be factored out into those parts
@@ -175,7 +176,7 @@ type Config struct {
 
 
 	// Hooks are a collection of actions to perform at various container lifecycle events.
 	// Hooks are a collection of actions to perform at various container lifecycle events.
 	// CommandHooks are serialized to JSON, but other hooks are not.
 	// CommandHooks are serialized to JSON, but other hooks are not.
-	Hooks *Hooks
+	Hooks Hooks
 
 
 	// Version is the version of opencontainer specification that is supported.
 	// Version is the version of opencontainer specification that is supported.
 	Version string `json:"version"`
 	Version string `json:"version"`
@@ -202,17 +203,50 @@ type Config struct {
 	RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
 	RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
 }
 }
 
 
-type Hooks struct {
+type HookName string
+type HookList []Hook
+type Hooks map[HookName]HookList
+
+const (
 	// Prestart commands are executed after the container namespaces are created,
 	// Prestart commands are executed after the container namespaces are created,
 	// but before the user supplied command is executed from init.
 	// but before the user supplied command is executed from init.
-	Prestart []Hook
+	// Note: This hook is now deprecated
+	// Prestart commands are called in the Runtime namespace.
+	Prestart HookName = "prestart"
+
+	// CreateRuntime commands MUST be called as part of the create operation after
+	// the runtime environment has been created but before the pivot_root has been executed.
+	// CreateRuntime is called immediately after the deprecated Prestart hook.
+	// CreateRuntime commands are called in the Runtime Namespace.
+	CreateRuntime = "createRuntime"
+
+	// CreateContainer commands MUST be called as part of the create operation after
+	// the runtime environment has been created but before the pivot_root has been executed.
+	// CreateContainer commands are called in the Container namespace.
+	CreateContainer = "createContainer"
+
+	// StartContainer commands MUST be called as part of the start operation and before
+	// the container process is started.
+	// StartContainer commands are called in the Container namespace.
+	StartContainer = "startContainer"
 
 
 	// Poststart commands are executed after the container init process starts.
 	// Poststart commands are executed after the container init process starts.
-	Poststart []Hook
+	// Poststart commands are called in the Runtime Namespace.
+	Poststart = "poststart"
 
 
 	// Poststop commands are executed after the container init process exits.
 	// Poststop commands are executed after the container init process exits.
-	Poststop []Hook
-}
+	// Poststop commands are called in the Runtime Namespace.
+	Poststop = "poststop"
+)
+
+// TODO move this to runtime-spec
+// See: https://github.com/opencontainers/runtime-spec/pull/1046
+const (
+	Creating = "creating"
+	Created  = "created"
+	Running  = "running"
+	Stopped  = "stopped"
+)
 
 
 type Capabilities struct {
 type Capabilities struct {
 	// Bounding is the set of capabilities checked by the kernel.
 	// Bounding is the set of capabilities checked by the kernel.
@@ -227,32 +261,39 @@ type Capabilities struct {
 	Ambient []string
 	Ambient []string
 }
 }
 
 
-func (hooks *Hooks) UnmarshalJSON(b []byte) error {
-	var state struct {
-		Prestart  []CommandHook
-		Poststart []CommandHook
-		Poststop  []CommandHook
+func (hooks HookList) RunHooks(state *specs.State) error {
+	for i, h := range hooks {
+		if err := h.Run(state); err != nil {
+			return errors.Wrapf(err, "Running hook #%d:", i)
+		}
 	}
 	}
 
 
+	return nil
+}
+
+func (hooks *Hooks) UnmarshalJSON(b []byte) error {
+	var state map[HookName][]CommandHook
+
 	if err := json.Unmarshal(b, &state); err != nil {
 	if err := json.Unmarshal(b, &state); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	deserialize := func(shooks []CommandHook) (hooks []Hook) {
-		for _, shook := range shooks {
-			hooks = append(hooks, shook)
+	*hooks = Hooks{}
+	for n, commandHooks := range state {
+		if len(commandHooks) == 0 {
+			continue
 		}
 		}
 
 
-		return hooks
+		(*hooks)[n] = HookList{}
+		for _, h := range commandHooks {
+			(*hooks)[n] = append((*hooks)[n], h)
+		}
 	}
 	}
 
 
-	hooks.Prestart = deserialize(state.Prestart)
-	hooks.Poststart = deserialize(state.Poststart)
-	hooks.Poststop = deserialize(state.Poststop)
 	return nil
 	return nil
 }
 }
 
 
-func (hooks Hooks) MarshalJSON() ([]byte, error) {
+func (hooks *Hooks) MarshalJSON() ([]byte, error) {
 	serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
 	serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
 		for _, hook := range hooks {
 		for _, hook := range hooks {
 			switch chook := hook.(type) {
 			switch chook := hook.(type) {
@@ -267,9 +308,12 @@ func (hooks Hooks) MarshalJSON() ([]byte, error) {
 	}
 	}
 
 
 	return json.Marshal(map[string]interface{}{
 	return json.Marshal(map[string]interface{}{
-		"prestart":  serialize(hooks.Prestart),
-		"poststart": serialize(hooks.Poststart),
-		"poststop":  serialize(hooks.Poststop),
+		"prestart":        serialize((*hooks)[Prestart]),
+		"createRuntime":   serialize((*hooks)[CreateRuntime]),
+		"createContainer": serialize((*hooks)[CreateContainer]),
+		"startContainer":  serialize((*hooks)[StartContainer]),
+		"poststart":       serialize((*hooks)[Poststart]),
+		"poststop":        serialize((*hooks)[Poststop]),
 	})
 	})
 }
 }
 
 

+ 137 - 24
vendor/github.com/opencontainers/runc/libcontainer/configs/device.go

@@ -3,30 +3,19 @@ package configs
 import (
 import (
 	"fmt"
 	"fmt"
 	"os"
 	"os"
+	"strconv"
 )
 )
 
 
 const (
 const (
 	Wildcard = -1
 	Wildcard = -1
 )
 )
 
 
-// TODO Windows: This can be factored out in the future
-
 type Device struct {
 type Device struct {
-	// Device type, block, char, etc.
-	Type rune `json:"type"`
+	DeviceRule
 
 
 	// Path to the device.
 	// Path to the device.
 	Path string `json:"path"`
 	Path string `json:"path"`
 
 
-	// Major is the device's major number.
-	Major int64 `json:"major"`
-
-	// Minor is the device's minor number.
-	Minor int64 `json:"minor"`
-
-	// Cgroup permissions format, rwm.
-	Permissions string `json:"permissions"`
-
 	// FileMode permission bits for the device.
 	// FileMode permission bits for the device.
 	FileMode os.FileMode `json:"file_mode"`
 	FileMode os.FileMode `json:"file_mode"`
 
 
@@ -35,23 +24,147 @@ type Device struct {
 
 
 	// Gid of the device.
 	// Gid of the device.
 	Gid uint32 `json:"gid"`
 	Gid uint32 `json:"gid"`
+}
 
 
-	// Write the file to the allowed list
-	Allow bool `json:"allow"`
+// DevicePermissions is a cgroupv1-style string to represent device access. It
+// has to be a string for backward compatibility reasons, hence why it has
+// methods to do set operations.
+type DevicePermissions string
+
+const (
+	deviceRead uint = (1 << iota)
+	deviceWrite
+	deviceMknod
+)
+
+func (p DevicePermissions) toSet() uint {
+	var set uint
+	for _, perm := range p {
+		switch perm {
+		case 'r':
+			set |= deviceRead
+		case 'w':
+			set |= deviceWrite
+		case 'm':
+			set |= deviceMknod
+		}
+	}
+	return set
+}
+
+func fromSet(set uint) DevicePermissions {
+	var perm string
+	if set&deviceRead == deviceRead {
+		perm += "r"
+	}
+	if set&deviceWrite == deviceWrite {
+		perm += "w"
+	}
+	if set&deviceMknod == deviceMknod {
+		perm += "m"
+	}
+	return DevicePermissions(perm)
+}
+
+// Union returns the union of the two sets of DevicePermissions.
+func (p DevicePermissions) Union(o DevicePermissions) DevicePermissions {
+	lhs := p.toSet()
+	rhs := o.toSet()
+	return fromSet(lhs | rhs)
+}
+
+// Difference returns the set difference of the two sets of DevicePermissions.
+// In set notation, A.Difference(B) gives you A\B.
+func (p DevicePermissions) Difference(o DevicePermissions) DevicePermissions {
+	lhs := p.toSet()
+	rhs := o.toSet()
+	return fromSet(lhs &^ rhs)
+}
+
+// Intersection computes the intersection of the two sets of DevicePermissions.
+func (p DevicePermissions) Intersection(o DevicePermissions) DevicePermissions {
+	lhs := p.toSet()
+	rhs := o.toSet()
+	return fromSet(lhs & rhs)
 }
 }
 
 
-func (d *Device) CgroupString() string {
-	return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+// IsEmpty returns whether the set of permissions in a DevicePermissions is
+// empty.
+func (p DevicePermissions) IsEmpty() bool {
+	return p == DevicePermissions("")
 }
 }
 
 
-func (d *Device) Mkdev() int {
-	return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
+// IsValid returns whether the set of permissions is a subset of valid
+// permissions (namely, {r,w,m}).
+func (p DevicePermissions) IsValid() bool {
+	return p == fromSet(p.toSet())
 }
 }
 
 
-// deviceNumberString converts the device number to a string return result.
-func deviceNumberString(number int64) string {
-	if number == Wildcard {
-		return "*"
+type DeviceType rune
+
+const (
+	WildcardDevice DeviceType = 'a'
+	BlockDevice    DeviceType = 'b'
+	CharDevice     DeviceType = 'c' // or 'u'
+	FifoDevice     DeviceType = 'p'
+)
+
+func (t DeviceType) IsValid() bool {
+	switch t {
+	case WildcardDevice, BlockDevice, CharDevice, FifoDevice:
+		return true
+	default:
+		return false
+	}
+}
+
+func (t DeviceType) CanMknod() bool {
+	switch t {
+	case BlockDevice, CharDevice, FifoDevice:
+		return true
+	default:
+		return false
+	}
+}
+
+func (t DeviceType) CanCgroup() bool {
+	switch t {
+	case WildcardDevice, BlockDevice, CharDevice:
+		return true
+	default:
+		return false
+	}
+}
+
+type DeviceRule struct {
+	// Type of device ('c' for char, 'b' for block). If set to 'a', this rule
+	// acts as a wildcard and all fields other than Allow are ignored.
+	Type DeviceType `json:"type"`
+
+	// Major is the device's major number.
+	Major int64 `json:"major"`
+
+	// Minor is the device's minor number.
+	Minor int64 `json:"minor"`
+
+	// Permissions is the set of permissions that this rule applies to (in the
+	// cgroupv1 format -- any combination of "rwm").
+	Permissions DevicePermissions `json:"permissions"`
+
+	// Allow specifies whether this rule is allowed.
+	Allow bool `json:"allow"`
+}
+
+func (d *DeviceRule) CgroupString() string {
+	var (
+		major = strconv.FormatInt(d.Major, 10)
+		minor = strconv.FormatInt(d.Minor, 10)
+	)
+	if d.Major == Wildcard {
+		major = "*"
+	}
+	if d.Minor == Wildcard {
+		minor = "*"
 	}
 	}
-	return fmt.Sprint(number)
+	return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions)
 }
 }

+ 0 - 111
vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go

@@ -1,111 +0,0 @@
-// +build linux
-
-package configs
-
-var (
-	// DefaultSimpleDevices are devices that are to be both allowed and created.
-	DefaultSimpleDevices = []*Device{
-		// /dev/null and zero
-		{
-			Path:        "/dev/null",
-			Type:        'c',
-			Major:       1,
-			Minor:       3,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-		{
-			Path:        "/dev/zero",
-			Type:        'c',
-			Major:       1,
-			Minor:       5,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-
-		{
-			Path:        "/dev/full",
-			Type:        'c',
-			Major:       1,
-			Minor:       7,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-
-		// consoles and ttys
-		{
-			Path:        "/dev/tty",
-			Type:        'c',
-			Major:       5,
-			Minor:       0,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-
-		// /dev/urandom,/dev/random
-		{
-			Path:        "/dev/urandom",
-			Type:        'c',
-			Major:       1,
-			Minor:       9,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-		{
-			Path:        "/dev/random",
-			Type:        'c',
-			Major:       1,
-			Minor:       8,
-			Permissions: "rwm",
-			FileMode:    0666,
-		},
-	}
-	DefaultAllowedDevices = append([]*Device{
-		// allow mknod for any device
-		{
-			Type:        'c',
-			Major:       Wildcard,
-			Minor:       Wildcard,
-			Permissions: "m",
-		},
-		{
-			Type:        'b',
-			Major:       Wildcard,
-			Minor:       Wildcard,
-			Permissions: "m",
-		},
-
-		{
-			Path:        "/dev/console",
-			Type:        'c',
-			Major:       5,
-			Minor:       1,
-			Permissions: "rwm",
-		},
-		// /dev/pts/ - pts namespaces are "coming soon"
-		{
-			Path:        "",
-			Type:        'c',
-			Major:       136,
-			Minor:       Wildcard,
-			Permissions: "rwm",
-		},
-		{
-			Path:        "",
-			Type:        'c',
-			Major:       5,
-			Minor:       2,
-			Permissions: "rwm",
-		},
-
-		// tuntap
-		{
-			Path:        "",
-			Type:        'c',
-			Major:       10,
-			Minor:       200,
-			Permissions: "rwm",
-		},
-	}, DefaultSimpleDevices...)
-	DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...)
-)

+ 16 - 0
vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go

@@ -0,0 +1,16 @@
+// +build !windows
+
+package configs
+
+import (
+	"errors"
+
+	"golang.org/x/sys/unix"
+)
+
+func (d *DeviceRule) Mkdev() (uint64, error) {
+	if d.Major == Wildcard || d.Minor == Wildcard {
+		return 0, errors.New("cannot mkdev() device with wildcards")
+	}
+	return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
+}

+ 5 - 0
vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go

@@ -0,0 +1,5 @@
+package configs
+
+func (d *DeviceRule) Mkdev() (uint64, error) {
+	return 0, nil
+}

+ 23 - 20
vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go

@@ -31,33 +31,33 @@ func DeviceFromPath(path, permissions string) (*configs.Device, error) {
 	}
 	}
 
 
 	var (
 	var (
+		devType   configs.DeviceType
+		mode      = stat.Mode
 		devNumber = uint64(stat.Rdev)
 		devNumber = uint64(stat.Rdev)
 		major     = unix.Major(devNumber)
 		major     = unix.Major(devNumber)
 		minor     = unix.Minor(devNumber)
 		minor     = unix.Minor(devNumber)
 	)
 	)
-	if major == 0 {
+	switch mode & unix.S_IFMT {
+	case unix.S_IFBLK:
+		devType = configs.BlockDevice
+	case unix.S_IFCHR:
+		devType = configs.CharDevice
+	case unix.S_IFIFO:
+		devType = configs.FifoDevice
+	default:
 		return nil, ErrNotADevice
 		return nil, ErrNotADevice
 	}
 	}
-
-	var (
-		devType rune
-		mode    = stat.Mode
-	)
-	switch {
-	case mode&unix.S_IFBLK == unix.S_IFBLK:
-		devType = 'b'
-	case mode&unix.S_IFCHR == unix.S_IFCHR:
-		devType = 'c'
-	}
 	return &configs.Device{
 	return &configs.Device{
-		Type:        devType,
-		Path:        path,
-		Major:       int64(major),
-		Minor:       int64(minor),
-		Permissions: permissions,
-		FileMode:    os.FileMode(mode),
-		Uid:         stat.Uid,
-		Gid:         stat.Gid,
+		DeviceRule: configs.DeviceRule{
+			Type:        devType,
+			Major:       int64(major),
+			Minor:       int64(minor),
+			Permissions: configs.DevicePermissions(permissions),
+		},
+		Path:     path,
+		FileMode: os.FileMode(mode),
+		Uid:      stat.Uid,
+		Gid:      stat.Gid,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -104,6 +104,9 @@ func GetDevices(path string) ([]*configs.Device, error) {
 			}
 			}
 			return nil, err
 			return nil, err
 		}
 		}
+		if device.Type == configs.FifoDevice {
+			continue
+		}
 		out = append(out, device)
 		out = append(out, device)
 	}
 	}
 	return out, nil
 	return out, nil

+ 27 - 1
vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c

@@ -1,7 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
 /*
 /*
  * Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
  * Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
  * Copyright (C) 2019 SUSE LLC
  * Copyright (C) 2019 SUSE LLC
  *
  *
+ * This work is dual licensed under the following licenses. You may use,
+ * redistribute, and/or modify the work under the conditions of either (or
+ * both) licenses.
+ *
+ * === Apache-2.0 ===
+ *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
  * You may obtain a copy of the License at
@@ -13,6 +20,23 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
+ *
+ * === LGPL-2.1-or-later ===
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <https://www.gnu.org/licenses/>.
+ *
  */
  */
 
 
 #define _GNU_SOURCE
 #define _GNU_SOURCE
@@ -95,8 +119,10 @@ static int is_self_cloned(void)
 	struct statfs fsbuf = {};
 	struct statfs fsbuf = {};
 
 
 	fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC);
 	fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC);
-	if (fd < 0)
+	if (fd < 0) {
+		fprintf(stderr, "you have no read access to runc binary file\n");
 		return -ENOTRECOVERABLE;
 		return -ENOTRECOVERABLE;
+	}
 
 
 	/*
 	/*
 	 * Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for
 	 * Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for

+ 6 - 6
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c

@@ -714,12 +714,12 @@ void nsexec(void)
 			 * ready, so we can receive all possible error codes
 			 * ready, so we can receive all possible error codes
 			 * generated by children.
 			 * generated by children.
 			 */
 			 */
+			syncfd = sync_child_pipe[1];
+			close(sync_child_pipe[0]);
+
 			while (!ready) {
 			while (!ready) {
 				enum sync_t s;
 				enum sync_t s;
 
 
-				syncfd = sync_child_pipe[1];
-				close(sync_child_pipe[0]);
-
 				if (read(syncfd, &s, sizeof(s)) != sizeof(s))
 				if (read(syncfd, &s, sizeof(s)) != sizeof(s))
 					bail("failed to sync with child: next state");
 					bail("failed to sync with child: next state");
 
 
@@ -789,13 +789,13 @@ void nsexec(void)
 
 
 			/* Now sync with grandchild. */
 			/* Now sync with grandchild. */
 
 
+			syncfd = sync_grandchild_pipe[1];
+			close(sync_grandchild_pipe[0]);
+
 			ready = false;
 			ready = false;
 			while (!ready) {
 			while (!ready) {
 				enum sync_t s;
 				enum sync_t s;
 
 
-				syncfd = sync_grandchild_pipe[1];
-				close(sync_grandchild_pipe[0]);
-
 				s = SYNC_GRANDCHILD;
 				s = SYNC_GRANDCHILD;
 				if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
 				if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
 					kill(child, SIGKILL);
 					kill(child, SIGKILL);

+ 13 - 17
vendor/github.com/opencontainers/runc/libcontainer/user/user.go

@@ -60,7 +60,7 @@ type Group struct {
 
 
 // groupFromOS converts an os/user.(*Group) to local Group
 // groupFromOS converts an os/user.(*Group) to local Group
 //
 //
-// (This does not include Pass, Shell or Gecos)
+// (This does not include Pass or List)
 func groupFromOS(g *user.Group) (Group, error) {
 func groupFromOS(g *user.Group) (Group, error) {
 	newGroup := Group{
 	newGroup := Group{
 		Name: g.Name,
 		Name: g.Name,
@@ -162,10 +162,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
 	)
 	)
 
 
 	for s.Scan() {
 	for s.Scan() {
-		if err := s.Err(); err != nil {
-			return nil, err
-		}
-
 		line := strings.TrimSpace(s.Text())
 		line := strings.TrimSpace(s.Text())
 		if line == "" {
 		if line == "" {
 			continue
 			continue
@@ -183,6 +179,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
 			out = append(out, p)
 			out = append(out, p)
 		}
 		}
 	}
 	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
 
 
 	return out, nil
 	return out, nil
 }
 }
@@ -221,10 +220,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
 	)
 	)
 
 
 	for s.Scan() {
 	for s.Scan() {
-		if err := s.Err(); err != nil {
-			return nil, err
-		}
-
 		text := s.Text()
 		text := s.Text()
 		if text == "" {
 		if text == "" {
 			continue
 			continue
@@ -242,6 +237,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
 			out = append(out, p)
 			out = append(out, p)
 		}
 		}
 	}
 	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
 
 
 	return out, nil
 	return out, nil
 }
 }
@@ -532,10 +530,6 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
 	)
 	)
 
 
 	for s.Scan() {
 	for s.Scan() {
-		if err := s.Err(); err != nil {
-			return nil, err
-		}
-
 		line := strings.TrimSpace(s.Text())
 		line := strings.TrimSpace(s.Text())
 		if line == "" {
 		if line == "" {
 			continue
 			continue
@@ -549,6 +543,9 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
 			out = append(out, p)
 			out = append(out, p)
 		}
 		}
 	}
 	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
 
 
 	return out, nil
 	return out, nil
 }
 }
@@ -586,10 +583,6 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
 	)
 	)
 
 
 	for s.Scan() {
 	for s.Scan() {
-		if err := s.Err(); err != nil {
-			return nil, err
-		}
-
 		line := strings.TrimSpace(s.Text())
 		line := strings.TrimSpace(s.Text())
 		if line == "" {
 		if line == "" {
 			continue
 			continue
@@ -603,6 +596,9 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
 			out = append(out, p)
 			out = append(out, p)
 		}
 		}
 	}
 	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
 
 
 	return out, nil
 	return out, nil
 }
 }

+ 0 - 31
vendor/github.com/opencontainers/runc/vendor.conf

@@ -1,31 +0,0 @@
-# OCI runtime-spec. When updating this, make sure you use a version tag rather
-# than a commit ID so it's much more obvious what version of the spec we are
-# using.
-github.com/opencontainers/runtime-spec  29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
-
-# Core libcontainer functionality.
-github.com/checkpoint-restore/go-criu   17b0214f6c48980c45dc47ecb0cfd6d9e02df723 # v3.11
-github.com/mrunalp/fileutils            7d4729fb36185a7c1719923406c9d40e54fb93c7
-github.com/opencontainers/selinux       5215b1806f52b1fcc2070a8826c542c9d33cd3cf # v1.3.0 (+ CVE-2019-16884)
-github.com/seccomp/libseccomp-golang    689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
-github.com/sirupsen/logrus              8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
-github.com/syndtr/gocapability          d98352740cb2c55f81556b63d4a1ec64c5a319c2
-github.com/vishvananda/netlink          1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
-
-# systemd integration.
-github.com/coreos/go-systemd            95778dfbb74eb7e4dbaf43bf7d71809650ef8076 # v19
-github.com/godbus/dbus                  2ff6f7ffd60f0f2410b3105864bdd12c7894f844 # v5.0.1
-github.com/golang/protobuf              925541529c1fa6821df4e44ce2723319eb2be768 # v1.0.0
-
-# Command-line interface.
-github.com/cyphar/filepath-securejoin   a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2
-github.com/docker/go-units              47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
-github.com/urfave/cli                   cfb38830724cc34fedffe9a2a29fb54fa9169cd1 # v1.20.0
-golang.org/x/sys                        9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys
-
-# console dependencies
-github.com/containerd/console           0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
-github.com/pkg/errors                   ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
-
-# ebpf dependencies
-github.com/cilium/ebpf                  95b36a581eed7b0f127306ed1d16cc0ddc06cf67

+ 4 - 3
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go

@@ -667,9 +667,10 @@ type LinuxSeccompArg struct {
 
 
 // LinuxSyscall is used to match a syscall in Seccomp
 // LinuxSyscall is used to match a syscall in Seccomp
 type LinuxSyscall struct {
 type LinuxSyscall struct {
-	Names  []string           `json:"names"`
-	Action LinuxSeccompAction `json:"action"`
-	Args   []LinuxSeccompArg  `json:"args,omitempty"`
+	Names    []string           `json:"names"`
+	Action   LinuxSeccompAction `json:"action"`
+	ErrnoRet *uint              `json:"errnoRet,omitempty"`
+	Args     []LinuxSeccompArg  `json:"args,omitempty"`
 }
 }
 
 
 // LinuxIntelRdt has container runtime resource constraints for Intel RDT
 // LinuxIntelRdt has container runtime resource constraints for Intel RDT

+ 1 - 1
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go

@@ -11,7 +11,7 @@ const (
 	VersionPatch = 2
 	VersionPatch = 2
 
 
 	// VersionDev indicates development branch. Releases will be empty string.
 	// VersionDev indicates development branch. Releases will be empty string.
-	VersionDev = ""
+	VersionDev = "-dev"
 )
 )
 
 
 // Version is the specification version that the package types support.
 // Version is the specification version that the package types support.

+ 10 - 6
vendor/github.com/prometheus/client_golang/go.mod

@@ -3,12 +3,16 @@ module github.com/prometheus/client_golang
 require (
 require (
 	github.com/beorn7/perks v1.0.1
 	github.com/beorn7/perks v1.0.1
 	github.com/cespare/xxhash/v2 v2.1.1
 	github.com/cespare/xxhash/v2 v2.1.1
-	github.com/golang/protobuf v1.3.2
-	github.com/json-iterator/go v1.1.8
-	github.com/prometheus/client_model v0.1.0
-	github.com/prometheus/common v0.7.0
-	github.com/prometheus/procfs v0.0.8
-	golang.org/x/sys v0.0.0-20191220142924-d4481acd189f
+	github.com/golang/protobuf v1.4.0
+	github.com/json-iterator/go v1.1.9
+	github.com/kr/pretty v0.1.0 // indirect
+	github.com/prometheus/client_model v0.2.0
+	github.com/prometheus/common v0.9.1
+	github.com/prometheus/procfs v0.0.11
+	github.com/stretchr/testify v1.4.0 // indirect
+	golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
+	gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+	gopkg.in/yaml.v2 v2.2.5 // indirect
 )
 )
 
 
 go 1.11
 go 1.11

+ 47 - 3
vendor/github.com/prometheus/client_golang/prometheus/counter.go

@@ -17,6 +17,7 @@ import (
 	"errors"
 	"errors"
 	"math"
 	"math"
 	"sync/atomic"
 	"sync/atomic"
+	"time"
 
 
 	dto "github.com/prometheus/client_model/go"
 	dto "github.com/prometheus/client_model/go"
 )
 )
@@ -42,11 +43,27 @@ type Counter interface {
 	Add(float64)
 	Add(float64)
 }
 }
 
 
+// ExemplarAdder is implemented by Counters that offer the option of adding a
+// value to the Counter together with an exemplar. Its AddWithExemplar method
+// works like the Add method of the Counter interface but also replaces the
+// currently saved exemplar (if any) with a new one, created from the provided
+// value, the current time as timestamp, and the provided labels. Empty Labels
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
+// of the provided labels are invalid, or if the provided labels contain more
+// than 64 runes in total.
+type ExemplarAdder interface {
+	AddWithExemplar(value float64, exemplar Labels)
+}
+
 // CounterOpts is an alias for Opts. See there for doc comments.
 // CounterOpts is an alias for Opts. See there for doc comments.
 type CounterOpts Opts
 type CounterOpts Opts
 
 
 // NewCounter creates a new Counter based on the provided CounterOpts.
 // NewCounter creates a new Counter based on the provided CounterOpts.
 //
 //
+// The returned implementation also implements ExemplarAdder. It is safe to
+// perform the corresponding type assertion.
+//
 // The returned implementation tracks the counter value in two separate
 // The returned implementation tracks the counter value in two separate
 // variables, a float64 and a uint64. The latter is used to track calls of the
 // variables, a float64 and a uint64. The latter is used to track calls of the
 // Inc method and calls of the Add method with a value that can be represented
 // Inc method and calls of the Add method with a value that can be represented
@@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
 		nil,
 		nil,
 		opts.ConstLabels,
 		opts.ConstLabels,
 	)
 	)
-	result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
 	result.init(result) // Init self-collection.
 	result.init(result) // Init self-collection.
 	return result
 	return result
 }
 }
@@ -78,6 +95,9 @@ type counter struct {
 	desc *Desc
 	desc *Desc
 
 
 	labelPairs []*dto.LabelPair
 	labelPairs []*dto.LabelPair
+	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
+
+	now func() time.Time // To mock out time.Now() for testing.
 }
 }
 
 
 func (c *counter) Desc() *Desc {
 func (c *counter) Desc() *Desc {
@@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
 	if v < 0 {
 	if v < 0 {
 		panic(errors.New("counter cannot decrease in value"))
 		panic(errors.New("counter cannot decrease in value"))
 	}
 	}
+
 	ival := uint64(v)
 	ival := uint64(v)
 	if float64(ival) == v {
 	if float64(ival) == v {
 		atomic.AddUint64(&c.valInt, ival)
 		atomic.AddUint64(&c.valInt, ival)
@@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
 	}
 	}
 }
 }
 
 
+func (c *counter) AddWithExemplar(v float64, e Labels) {
+	c.Add(v)
+	c.updateExemplar(v, e)
+}
+
 func (c *counter) Inc() {
 func (c *counter) Inc() {
 	atomic.AddUint64(&c.valInt, 1)
 	atomic.AddUint64(&c.valInt, 1)
 }
 }
@@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
 	ival := atomic.LoadUint64(&c.valInt)
 	ival := atomic.LoadUint64(&c.valInt)
 	val := fval + float64(ival)
 	val := fval + float64(ival)
 
 
-	return populateMetric(CounterValue, val, c.labelPairs, out)
+	var exemplar *dto.Exemplar
+	if e := c.exemplar.Load(); e != nil {
+		exemplar = e.(*dto.Exemplar)
+	}
+
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
+}
+
+func (c *counter) updateExemplar(v float64, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, c.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	c.exemplar.Store(e)
 }
 }
 
 
 // CounterVec is a Collector that bundles a set of Counters that all share the
 // CounterVec is a Collector that bundles a set of Counters that all share the
@@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
 			if len(lvs) != len(desc.variableLabels) {
 			if len(lvs) != len(desc.variableLabels) {
 				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
 				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
 			}
 			}
-			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
 			result.init(result) // Init self-collection.
 			result.init(result) // Init self-collection.
 			return result
 			return result
 		}),
 		}),
@@ -267,6 +309,8 @@ type CounterFunc interface {
 // provided function must be concurrency-safe. The function should also honor
 // provided function must be concurrency-safe. The function should also honor
 // the contract for a Counter (values only go up, not down), but compliance will
 // the contract for a Counter (values only go up, not down), but compliance will
 // not be checked.
 // not be checked.
+//
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
 func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
 func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
 	return newValueFunc(NewDesc(
 	return newValueFunc(NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

+ 18 - 19
vendor/github.com/prometheus/client_golang/prometheus/doc.go

@@ -84,25 +84,21 @@
 // of those four metric types can be found in the Prometheus docs:
 // of those four metric types can be found in the Prometheus docs:
 // https://prometheus.io/docs/concepts/metric_types/
 // https://prometheus.io/docs/concepts/metric_types/
 //
 //
-// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
-// Prometheus server not to assume anything about its type.
-//
-// In addition to the fundamental metric types Gauge, Counter, Summary,
-// Histogram, and Untyped, a very important part of the Prometheus data model is
-// the partitioning of samples along dimensions called labels, which results in
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
 // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
 // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// HistogramVec, and UntypedVec.
+// and HistogramVec.
 //
 //
 // While only the fundamental metric types implement the Metric interface, both
 // While only the fundamental metric types implement the Metric interface, both
 // the metrics and their vector versions implement the Collector interface. A
 // the metrics and their vector versions implement the Collector interface. A
 // Collector manages the collection of a number of Metrics, but for convenience,
 // Collector manages the collection of a number of Metrics, but for convenience,
-// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
-// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
-// SummaryVec, HistogramVec, and UntypedVec are not.
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec are not.
 //
 //
 // To create instances of Metrics and their vector versions, you need a suitable
 // To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
-// UntypedOpts.
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
 //
 //
 // Custom Collectors and constant Metrics
 // Custom Collectors and constant Metrics
 //
 //
@@ -118,13 +114,16 @@
 // existing numbers into Prometheus Metrics during collection. An own
 // existing numbers into Prometheus Metrics during collection. An own
 // implementation of the Collector interface is perfect for that. You can create
 // implementation of the Collector interface is perfect for that. You can create
 // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
 // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
-// NewConstSummary (and their respective Must… versions). That will happen in
-// the Collect method. The Describe method has to return separate Desc
-// instances, representative of the “throw-away” metrics to be created later.
-// NewDesc comes in handy to create those Desc instances. Alternatively, you
-// could return no Desc at all, which will mark the Collector “unchecked”.  No
-// checks are performed at registration time, but metric consistency will still
-// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
+// for all metric types with just a float64 as their value: Counter, Gauge, and
+// a special “type” called Untyped. Use the latter if you are not sure if the
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
+// happens in the Collect method. The Describe method has to return separate
+// Desc instances, representative of the “throw-away” metrics to be created
+// later.  NewDesc comes in handy to create those Desc instances. Alternatively,
+// you could return no Desc at all, which will mark the Collector “unchecked”.
+// No checks are performed at registration time, but metric consistency will
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
 // errors. Thus, with unchecked Collectors, the responsibility to not collect
 // errors. Thus, with unchecked Collectors, the responsibility to not collect
 // metrics that lead to inconsistencies in the total scrape result lies with the
 // metrics that lead to inconsistencies in the total scrape result lies with the
 // implementer of the Collector. While this is not a desirable state, it is
 // implementer of the Collector. While this is not a desirable state, it is

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/gauge.go

@@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
 
 
 func (g *gauge) Write(out *dto.Metric) error {
 func (g *gauge) Write(out *dto.Metric) error {
 	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
 	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
-	return populateMetric(GaugeValue, val, g.labelPairs, out)
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
 }
 }
 
 
 // GaugeVec is a Collector that bundles a set of Gauges that all share the same
 // GaugeVec is a Collector that bundles a set of Gauges that all share the same

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go

@@ -73,7 +73,7 @@ func NewGoCollector() Collector {
 			nil, nil),
 			nil, nil),
 		gcDesc: NewDesc(
 		gcDesc: NewDesc(
 			"go_gc_duration_seconds",
 			"go_gc_duration_seconds",
-			"A summary of the GC invocation durations.",
+			"A summary of the pause duration of garbage collection cycles.",
 			nil, nil),
 			nil, nil),
 		goInfoDesc: NewDesc(
 		goInfoDesc: NewDesc(
 			"go_info",
 			"go_info",

+ 80 - 30
vendor/github.com/prometheus/client_golang/prometheus/histogram.go

@@ -20,6 +20,7 @@ import (
 	"sort"
 	"sort"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
+	"time"
 
 
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/proto"
 
 
@@ -151,6 +152,10 @@ type HistogramOpts struct {
 
 
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
 // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
 // panics if the buckets in HistogramOpts are not in strictly increasing order.
 // panics if the buckets in HistogramOpts are not in strictly increasing order.
+//
+// The returned implementation also implements ExemplarObserver. It is safe to
+// perform the corresponding type assertion. Exemplars are tracked separately
+// for each bucket.
 func NewHistogram(opts HistogramOpts) Histogram {
 func NewHistogram(opts HistogramOpts) Histogram {
 	return newHistogram(
 	return newHistogram(
 		NewDesc(
 		NewDesc(
@@ -188,6 +193,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		upperBounds: opts.Buckets,
 		upperBounds: opts.Buckets,
 		labelPairs:  makeLabelPairs(desc, labelValues),
 		labelPairs:  makeLabelPairs(desc, labelValues),
 		counts:      [2]*histogramCounts{{}, {}},
 		counts:      [2]*histogramCounts{{}, {}},
+		now:         time.Now,
 	}
 	}
 	for i, upperBound := range h.upperBounds {
 	for i, upperBound := range h.upperBounds {
 		if i < len(h.upperBounds)-1 {
 		if i < len(h.upperBounds)-1 {
@@ -205,9 +211,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		}
 		}
 	}
 	}
 	// Finally we know the final length of h.upperBounds and can make buckets
 	// Finally we know the final length of h.upperBounds and can make buckets
-	// for both counts:
+	// for both counts as well as exemplars:
 	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
 
 
 	h.init(h) // Init self-collection.
 	h.init(h) // Init self-collection.
 	return h
 	return h
@@ -254,6 +261,9 @@ type histogram struct {
 
 
 	upperBounds []float64
 	upperBounds []float64
 	labelPairs  []*dto.LabelPair
 	labelPairs  []*dto.LabelPair
+	exemplars   []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+
+	now func() time.Time // To mock out time.Now() for testing.
 }
 }
 
 
 func (h *histogram) Desc() *Desc {
 func (h *histogram) Desc() *Desc {
@@ -261,36 +271,13 @@ func (h *histogram) Desc() *Desc {
 }
 }
 
 
 func (h *histogram) Observe(v float64) {
 func (h *histogram) Observe(v float64) {
-	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
-	// slightly faster than the binary search. If we really care, we could
-	// switch from one search strategy to the other depending on the number
-	// of buckets.
-	//
-	// Microbenchmarks (BenchmarkHistogramNoLabels):
-	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
-	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
-	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
-	i := sort.SearchFloat64s(h.upperBounds, v)
-
-	// We increment h.countAndHotIdx so that the counter in the lower
-	// 63 bits gets incremented. At the same time, we get the new value
-	// back, which we can use to find the currently-hot counts.
-	n := atomic.AddUint64(&h.countAndHotIdx, 1)
-	hotCounts := h.counts[n>>63]
+	h.observe(v, h.findBucket(v))
+}
 
 
-	if i < len(h.upperBounds) {
-		atomic.AddUint64(&hotCounts.buckets[i], 1)
-	}
-	for {
-		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
-		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
-		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
-			break
-		}
-	}
-	// Increment count last as we take it as a signal that the observation
-	// is complete.
-	atomic.AddUint64(&hotCounts.count, 1)
+func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
+	i := h.findBucket(v)
+	h.observe(v, i)
+	h.updateExemplar(v, i, e)
 }
 }
 
 
 func (h *histogram) Write(out *dto.Metric) error {
 func (h *histogram) Write(out *dto.Metric) error {
@@ -329,6 +316,18 @@ func (h *histogram) Write(out *dto.Metric) error {
 			CumulativeCount: proto.Uint64(cumCount),
 			CumulativeCount: proto.Uint64(cumCount),
 			UpperBound:      proto.Float64(upperBound),
 			UpperBound:      proto.Float64(upperBound),
 		}
 		}
+		if e := h.exemplars[i].Load(); e != nil {
+			his.Bucket[i].Exemplar = e.(*dto.Exemplar)
+		}
+	}
+	// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
+	if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
+		b := &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(math.Inf(1)),
+			Exemplar:        e.(*dto.Exemplar),
+		}
+		his.Bucket = append(his.Bucket, b)
 	}
 	}
 
 
 	out.Histogram = his
 	out.Histogram = his
@@ -352,6 +351,57 @@ func (h *histogram) Write(out *dto.Metric) error {
 	return nil
 	return nil
 }
 }
 
 
+// findBucket returns the index of the bucket for the provided value, or
+// len(h.upperBounds) for the +Inf bucket.
+func (h *histogram) findBucket(v float64) int {
+	// TODO(beorn7): For small numbers of buckets (<30), a linear search is
+	// slightly faster than the binary search. If we really care, we could
+	// switch from one search strategy to the other depending on the number
+	// of buckets.
+	//
+	// Microbenchmarks (BenchmarkHistogramNoLabels):
+	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+	return sort.SearchFloat64s(h.upperBounds, v)
+}
+
+// observe is the implementation for Observe without the findBucket part.
+func (h *histogram) observe(v float64, bucket int) {
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1)
+	hotCounts := h.counts[n>>63]
+
+	if bucket < len(h.upperBounds) {
+		atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+	}
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			break
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
+}
+
+// updateExemplar replaces the exemplar for the provided bucket. With empty
+// labels, it's a no-op. It panics if any of the labels is invalid.
+func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, h.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	h.exemplars[bucket].Store(e)
+}
+
 // HistogramVec is a Collector that bundles a set of Histograms that all share the
 // HistogramVec is a Collector that bundles a set of Histograms that all share the
 // same Desc, but have different values for their variable labels. This is used
 // same Desc, but have different values for their variable labels. This is used
 // if you want to count the same thing partitioned by various dimensions
 // if you want to count the same thing partitioned by various dimensions

+ 12 - 0
vendor/github.com/prometheus/client_golang/prometheus/observer.go

@@ -50,3 +50,15 @@ type ObserverVec interface {
 
 
 	Collector
 	Collector
 }
 }
+
+// ExemplarObserver is implemented by Observers that offer the option of
+// observing a value together with an exemplar. Its ObserveWithExemplar method
+// works like the Observe method of an Observer but also replaces the currently
+// saved exemplar (if any) with a new one, created from the provided value, the
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
+// left in place. ObserveWithExemplar panics if any of the provided labels are
+// invalid or if the provided labels contain more than 64 runes in total.
+type ExemplarObserver interface {
+	ObserveWithExemplar(value float64, exemplar Labels)
+}

+ 14 - 10
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go

@@ -33,18 +33,22 @@ var (
 )
 )
 
 
 type processMemoryCounters struct {
 type processMemoryCounters struct {
-	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
+	// System interface description
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
+
+	// Refer to the Golang internal implementation
+	// https://golang.org/src/internal/syscall/windows/psapi_windows.go
 	_                          uint32
 	_                          uint32
 	PageFaultCount             uint32
 	PageFaultCount             uint32
-	PeakWorkingSetSize         uint64
-	WorkingSetSize             uint64
-	QuotaPeakPagedPoolUsage    uint64
-	QuotaPagedPoolUsage        uint64
-	QuotaPeakNonPagedPoolUsage uint64
-	QuotaNonPagedPoolUsage     uint64
-	PagefileUsage              uint64
-	PeakPagefileUsage          uint64
-	PrivateUsage               uint64
+	PeakWorkingSetSize         uintptr
+	WorkingSetSize             uintptr
+	QuotaPeakPagedPoolUsage    uintptr
+	QuotaPagedPoolUsage        uintptr
+	QuotaPeakNonPagedPoolUsage uintptr
+	QuotaNonPagedPoolUsage     uintptr
+	PagefileUsage              uintptr
+	PeakPagefileUsage          uintptr
+	PrivateUsage               uintptr
 }
 }
 
 
 func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
 func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {

+ 7 - 3
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go

@@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 {
 }
 }
 
 
 func (r *responseWriterDelegator) WriteHeader(code int) {
 func (r *responseWriterDelegator) WriteHeader(code int) {
+	if r.observeWriteHeader != nil && !r.wroteHeader {
+		// Only call observeWriteHeader for the 1st time. It's a bug if
+		// WriteHeader is called more than once, but we want to protect
+		// against it here. Note that we still delegate the WriteHeader
+		// to the original ResponseWriter to not mask the bug from it.
+		r.observeWriteHeader(code)
+	}
 	r.status = code
 	r.status = code
 	r.wroteHeader = true
 	r.wroteHeader = true
 	r.ResponseWriter.WriteHeader(code)
 	r.ResponseWriter.WriteHeader(code)
-	if r.observeWriteHeader != nil {
-		r.observeWriteHeader(code)
-	}
 }
 }
 
 
 func (r *responseWriterDelegator) Write(b []byte) (int, error) {
 func (r *responseWriterDelegator) Write(b []byte) (int, error) {

+ 56 - 26
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go

@@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 			}
 			}
 		}
 		}
 
 
-		contentType := expfmt.Negotiate(req.Header)
+		var contentType expfmt.Format
+		if opts.EnableOpenMetrics {
+			contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
+		} else {
+			contentType = expfmt.Negotiate(req.Header)
+		}
 		header := rsp.Header()
 		header := rsp.Header()
 		header.Set(contentTypeHeader, string(contentType))
 		header.Set(contentTypeHeader, string(contentType))
 
 
@@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 
 
 		enc := expfmt.NewEncoder(w, contentType)
 		enc := expfmt.NewEncoder(w, contentType)
 
 
-		var lastErr error
-		for _, mf := range mfs {
-			if err := enc.Encode(mf); err != nil {
-				lastErr = err
-				if opts.ErrorLog != nil {
-					opts.ErrorLog.Println("error encoding and sending metric family:", err)
-				}
-				errCnt.WithLabelValues("encoding").Inc()
-				switch opts.ErrorHandling {
-				case PanicOnError:
-					panic(err)
-				case ContinueOnError:
-					// Handled later.
-				case HTTPErrorOnError:
-					httpError(rsp, err)
-					return
-				}
+		// handleError handles the error according to opts.ErrorHandling
+		// and returns true if we have to abort after the handling.
+		handleError := func(err error) bool {
+			if err == nil {
+				return false
+			}
+			if opts.ErrorLog != nil {
+				opts.ErrorLog.Println("error encoding and sending metric family:", err)
+			}
+			errCnt.WithLabelValues("encoding").Inc()
+			switch opts.ErrorHandling {
+			case PanicOnError:
+				panic(err)
+			case HTTPErrorOnError:
+				// We cannot really send an HTTP error at this
+				// point because we most likely have written
+				// something to rsp already. But at least we can
+				// stop sending.
+				return true
 			}
 			}
+			// Do nothing in all other cases, including ContinueOnError.
+			return false
 		}
 		}
 
 
-		if lastErr != nil {
-			httpError(rsp, lastErr)
+		for _, mf := range mfs {
+			if handleError(enc.Encode(mf)) {
+				return
+			}
+		}
+		if closer, ok := enc.(expfmt.Closer); ok {
+			// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
+			if handleError(closer.Close()) {
+				return
+			}
 		}
 		}
 	})
 	})
 
 
@@ -255,7 +272,12 @@ type HandlerErrorHandling int
 // errors are encountered.
 // errors are encountered.
 const (
 const (
 	// Serve an HTTP status code 500 upon the first error
 	// Serve an HTTP status code 500 upon the first error
-	// encountered. Report the error message in the body.
+	// encountered. Report the error message in the body. Note that HTTP
+	// errors cannot be served anymore once the beginning of a regular
+	// payload has been sent. Thus, in the (unlikely) case that encoding the
+	// payload into the negotiated wire format fails, serving the response
+	// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
+	// those errors.
 	HTTPErrorOnError HandlerErrorHandling = iota
 	HTTPErrorOnError HandlerErrorHandling = iota
 	// Ignore errors and try to serve as many metrics as possible.  However,
 	// Ignore errors and try to serve as many metrics as possible.  However,
 	// if no metrics can be served, serve an HTTP status code 500 and the
 	// if no metrics can be served, serve an HTTP status code 500 and the
@@ -318,6 +340,16 @@ type HandlerOpts struct {
 	// away). Until the implementation is improved, it is recommended to
 	// away). Until the implementation is improved, it is recommended to
 	// implement a separate timeout in potentially slow Collectors.
 	// implement a separate timeout in potentially slow Collectors.
 	Timeout time.Duration
 	Timeout time.Duration
+	// If true, the experimental OpenMetrics encoding is added to the
+	// possible options during content negotiation. Note that Prometheus
+	// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
+	// the only way to transmit exemplars. However, the move to OpenMetrics
+	// is not completely transparent. Most notably, the values of "quantile"
+	// labels of Summaries and "le" labels of Histograms are formatted with
+	// a trailing ".0" if they would otherwise look like integer numbers
+	// (which changes the identity of the resulting series on the Prometheus
+	// server).
+	EnableOpenMetrics bool
 }
 }
 
 
 // gzipAccepted returns whether the client will accept gzip-encoded content.
 // gzipAccepted returns whether the client will accept gzip-encoded content.
@@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool {
 }
 }
 
 
 // httpError removes any content-encoding header and then calls http.Error with
 // httpError removes any content-encoding header and then calls http.Error with
-// the provided error and http.StatusInternalServerErrer. Error contents is
-// supposed to be uncompressed plain text. However, same as with a plain
-// http.Error, any header settings will be void if the header has already been
-// sent. The error message will still be written to the writer, but it will
-// probably be of limited use.
+// the provided error and http.StatusInternalServerError. Error contents is
+// supposed to be uncompressed plain text. Same as with a plain http.Error, this
+// must not be called if the header or any payload has already been sent.
 func httpError(rsp http.ResponseWriter, err error) {
 func httpError(rsp http.ResponseWriter, err error) {
 	rsp.Header().Del(contentEncodingHeader)
 	rsp.Header().Del(contentEncodingHeader)
 	http.Error(
 	http.Error(

+ 46 - 4
vendor/github.com/prometheus/client_golang/prometheus/value.go

@@ -16,8 +16,11 @@ package prometheus
 import (
 import (
 	"fmt"
 	"fmt"
 	"sort"
 	"sort"
+	"time"
+	"unicode/utf8"
 
 
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
 
 
 	dto "github.com/prometheus/client_model/go"
 	dto "github.com/prometheus/client_model/go"
 )
 )
@@ -25,7 +28,8 @@ import (
 // ValueType is an enumeration of metric types that represent a simple value.
 // ValueType is an enumeration of metric types that represent a simple value.
 type ValueType int
 type ValueType int
 
 
-// Possible values for the ValueType enum.
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
+// with an unknown type.
 const (
 const (
 	_ ValueType = iota
 	_ ValueType = iota
 	CounterValue
 	CounterValue
@@ -69,7 +73,7 @@ func (v *valueFunc) Desc() *Desc {
 }
 }
 
 
 func (v *valueFunc) Write(out *dto.Metric) error {
 func (v *valueFunc) Write(out *dto.Metric) error {
-	return populateMetric(v.valType, v.function(), v.labelPairs, out)
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
 }
 }
 
 
 // NewConstMetric returns a metric with one fixed value that cannot be
 // NewConstMetric returns a metric with one fixed value that cannot be
@@ -116,19 +120,20 @@ func (m *constMetric) Desc() *Desc {
 }
 }
 
 
 func (m *constMetric) Write(out *dto.Metric) error {
 func (m *constMetric) Write(out *dto.Metric) error {
-	return populateMetric(m.valType, m.val, m.labelPairs, out)
+	return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
 }
 }
 
 
 func populateMetric(
 func populateMetric(
 	t ValueType,
 	t ValueType,
 	v float64,
 	v float64,
 	labelPairs []*dto.LabelPair,
 	labelPairs []*dto.LabelPair,
+	e *dto.Exemplar,
 	m *dto.Metric,
 	m *dto.Metric,
 ) error {
 ) error {
 	m.Label = labelPairs
 	m.Label = labelPairs
 	switch t {
 	switch t {
 	case CounterValue:
 	case CounterValue:
-		m.Counter = &dto.Counter{Value: proto.Float64(v)}
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
 	case GaugeValue:
 	case GaugeValue:
 		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
 		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
 	case UntypedValue:
 	case UntypedValue:
@@ -160,3 +165,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
 	sort.Sort(labelPairSorter(labelPairs))
 	sort.Sort(labelPairSorter(labelPairs))
 	return labelPairs
 	return labelPairs
 }
 }
+
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
+const ExemplarMaxRunes = 64
+
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
+// returned if any of the label names or values are invalid or if the total
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
+	e := &dto.Exemplar{}
+	e.Value = proto.Float64(value)
+	tsProto, err := ptypes.TimestampProto(ts)
+	if err != nil {
+		return nil, err
+	}
+	e.Timestamp = tsProto
+	labelPairs := make([]*dto.LabelPair, 0, len(l))
+	var runes int
+	for name, value := range l {
+		if !checkLabelName(name) {
+			return nil, fmt.Errorf("exemplar label name %q is invalid", name)
+		}
+		runes += utf8.RuneCountInString(name)
+		if !utf8.ValidString(value) {
+			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
+		}
+		runes += utf8.RuneCountInString(value)
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(name),
+			Value: proto.String(value),
+		})
+	}
+	if runes > ExemplarMaxRunes {
+		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
+	}
+	e.Label = labelPairs
+	return e, nil
+}

+ 12 - 0
vendor/github.com/prometheus/client_golang/prometheus/vec.go

@@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool {
 	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
 	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
 }
 }
 
 
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
+// show up in GoDoc.
+
+// Describe implements Collector.
+func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
+
+// Collect implements Collector.
+func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
+
+// Reset deletes all metrics in this vector.
+func (m *metricVec) Reset() { m.metricMap.Reset() }
+
 func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
 func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
 	var (
 	var (
 		newCurry []curriedLabelValue
 		newCurry []curriedLabelValue

+ 181 - 87
vendor/github.com/prometheus/client_model/go/metrics.pb.go

@@ -1,11 +1,14 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // source: metrics.proto
 // source: metrics.proto
 
 
-package io_prometheus_client // import "github.com/prometheus/client_model/go"
+package io_prometheus_client
 
 
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	math "math"
+)
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = proto.Marshal
@@ -16,7 +19,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
 // proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 
 
 type MetricType int32
 type MetricType int32
 
 
@@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
 	3: "UNTYPED",
 	3: "UNTYPED",
 	4: "HISTOGRAM",
 	4: "HISTOGRAM",
 }
 }
+
 var MetricType_value = map[string]int32{
 var MetricType_value = map[string]int32{
 	"COUNTER":   0,
 	"COUNTER":   0,
 	"GAUGE":     1,
 	"GAUGE":     1,
@@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
 	*p = x
 	*p = x
 	return p
 	return p
 }
 }
+
 func (x MetricType) String() string {
 func (x MetricType) String() string {
 	return proto.EnumName(MetricType_name, int32(x))
 	return proto.EnumName(MetricType_name, int32(x))
 }
 }
+
 func (x *MetricType) UnmarshalJSON(data []byte) error {
 func (x *MetricType) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
 	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
 	if err != nil {
 	if err != nil {
@@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
 	*x = MetricType(value)
 	*x = MetricType(value)
 	return nil
 	return nil
 }
 }
+
 func (MetricType) EnumDescriptor() ([]byte, []int) {
 func (MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+	return fileDescriptor_6039342a2ba47b72, []int{0}
 }
 }
 
 
 type LabelPair struct {
 type LabelPair struct {
@@ -75,16 +82,17 @@ func (m *LabelPair) Reset()         { *m = LabelPair{} }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (m *LabelPair) String() string { return proto.CompactTextString(m) }
 func (*LabelPair) ProtoMessage()    {}
 func (*LabelPair) ProtoMessage()    {}
 func (*LabelPair) Descriptor() ([]byte, []int) {
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+	return fileDescriptor_6039342a2ba47b72, []int{0}
 }
 }
+
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
 	return xxx_messageInfo_LabelPair.Unmarshal(m, b)
 }
 }
 func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
 	return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
 }
 }
-func (dst *LabelPair) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelPair.Merge(dst, src)
+func (m *LabelPair) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelPair.Merge(m, src)
 }
 }
 func (m *LabelPair) XXX_Size() int {
 func (m *LabelPair) XXX_Size() int {
 	return xxx_messageInfo_LabelPair.Size(m)
 	return xxx_messageInfo_LabelPair.Size(m)
@@ -120,16 +128,17 @@ func (m *Gauge) Reset()         { *m = Gauge{} }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (m *Gauge) String() string { return proto.CompactTextString(m) }
 func (*Gauge) ProtoMessage()    {}
 func (*Gauge) ProtoMessage()    {}
 func (*Gauge) Descriptor() ([]byte, []int) {
 func (*Gauge) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+	return fileDescriptor_6039342a2ba47b72, []int{1}
 }
 }
+
 func (m *Gauge) XXX_Unmarshal(b []byte) error {
 func (m *Gauge) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Gauge.Unmarshal(m, b)
 	return xxx_messageInfo_Gauge.Unmarshal(m, b)
 }
 }
 func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
 }
 }
-func (dst *Gauge) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Gauge.Merge(dst, src)
+func (m *Gauge) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Gauge.Merge(m, src)
 }
 }
 func (m *Gauge) XXX_Size() int {
 func (m *Gauge) XXX_Size() int {
 	return xxx_messageInfo_Gauge.Size(m)
 	return xxx_messageInfo_Gauge.Size(m)
@@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
 }
 }
 
 
 type Counter struct {
 type Counter struct {
-	Value                *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	Value                *float64  `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	Exemplar             *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 }
 
 
 func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) Reset()         { *m = Counter{} }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (m *Counter) String() string { return proto.CompactTextString(m) }
 func (*Counter) ProtoMessage()    {}
 func (*Counter) ProtoMessage()    {}
 func (*Counter) Descriptor() ([]byte, []int) {
 func (*Counter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+	return fileDescriptor_6039342a2ba47b72, []int{2}
 }
 }
+
 func (m *Counter) XXX_Unmarshal(b []byte) error {
 func (m *Counter) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Counter.Unmarshal(m, b)
 	return xxx_messageInfo_Counter.Unmarshal(m, b)
 }
 }
 func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
 }
 }
-func (dst *Counter) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Counter.Merge(dst, src)
+func (m *Counter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Counter.Merge(m, src)
 }
 }
 func (m *Counter) XXX_Size() int {
 func (m *Counter) XXX_Size() int {
 	return xxx_messageInfo_Counter.Size(m)
 	return xxx_messageInfo_Counter.Size(m)
@@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
 	return 0
 	return 0
 }
 }
 
 
+func (m *Counter) GetExemplar() *Exemplar {
+	if m != nil {
+		return m.Exemplar
+	}
+	return nil
+}
+
 type Quantile struct {
 type Quantile struct {
 	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
 	Quantile             *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
 	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
 	Value                *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@@ -197,16 +215,17 @@ func (m *Quantile) Reset()         { *m = Quantile{} }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (m *Quantile) String() string { return proto.CompactTextString(m) }
 func (*Quantile) ProtoMessage()    {}
 func (*Quantile) ProtoMessage()    {}
 func (*Quantile) Descriptor() ([]byte, []int) {
 func (*Quantile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+	return fileDescriptor_6039342a2ba47b72, []int{3}
 }
 }
+
 func (m *Quantile) XXX_Unmarshal(b []byte) error {
 func (m *Quantile) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Quantile.Unmarshal(m, b)
 	return xxx_messageInfo_Quantile.Unmarshal(m, b)
 }
 }
 func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
 }
 }
-func (dst *Quantile) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Quantile.Merge(dst, src)
+func (m *Quantile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Quantile.Merge(m, src)
 }
 }
 func (m *Quantile) XXX_Size() int {
 func (m *Quantile) XXX_Size() int {
 	return xxx_messageInfo_Quantile.Size(m)
 	return xxx_messageInfo_Quantile.Size(m)
@@ -244,16 +263,17 @@ func (m *Summary) Reset()         { *m = Summary{} }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (m *Summary) String() string { return proto.CompactTextString(m) }
 func (*Summary) ProtoMessage()    {}
 func (*Summary) ProtoMessage()    {}
 func (*Summary) Descriptor() ([]byte, []int) {
 func (*Summary) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+	return fileDescriptor_6039342a2ba47b72, []int{4}
 }
 }
+
 func (m *Summary) XXX_Unmarshal(b []byte) error {
 func (m *Summary) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Summary.Unmarshal(m, b)
 	return xxx_messageInfo_Summary.Unmarshal(m, b)
 }
 }
 func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
 }
 }
-func (dst *Summary) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Summary.Merge(dst, src)
+func (m *Summary) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Summary.Merge(m, src)
 }
 }
 func (m *Summary) XXX_Size() int {
 func (m *Summary) XXX_Size() int {
 	return xxx_messageInfo_Summary.Size(m)
 	return xxx_messageInfo_Summary.Size(m)
@@ -296,16 +316,17 @@ func (m *Untyped) Reset()         { *m = Untyped{} }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (m *Untyped) String() string { return proto.CompactTextString(m) }
 func (*Untyped) ProtoMessage()    {}
 func (*Untyped) ProtoMessage()    {}
 func (*Untyped) Descriptor() ([]byte, []int) {
 func (*Untyped) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+	return fileDescriptor_6039342a2ba47b72, []int{5}
 }
 }
+
 func (m *Untyped) XXX_Unmarshal(b []byte) error {
 func (m *Untyped) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Untyped.Unmarshal(m, b)
 	return xxx_messageInfo_Untyped.Unmarshal(m, b)
 }
 }
 func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
 }
 }
-func (dst *Untyped) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Untyped.Merge(dst, src)
+func (m *Untyped) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Untyped.Merge(m, src)
 }
 }
 func (m *Untyped) XXX_Size() int {
 func (m *Untyped) XXX_Size() int {
 	return xxx_messageInfo_Untyped.Size(m)
 	return xxx_messageInfo_Untyped.Size(m)
@@ -336,16 +357,17 @@ func (m *Histogram) Reset()         { *m = Histogram{} }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (m *Histogram) String() string { return proto.CompactTextString(m) }
 func (*Histogram) ProtoMessage()    {}
 func (*Histogram) ProtoMessage()    {}
 func (*Histogram) Descriptor() ([]byte, []int) {
 func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+	return fileDescriptor_6039342a2ba47b72, []int{6}
 }
 }
+
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Histogram.Unmarshal(m, b)
 	return xxx_messageInfo_Histogram.Unmarshal(m, b)
 }
 }
 func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
 }
 }
-func (dst *Histogram) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Histogram.Merge(dst, src)
+func (m *Histogram) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Histogram.Merge(m, src)
 }
 }
 func (m *Histogram) XXX_Size() int {
 func (m *Histogram) XXX_Size() int {
 	return xxx_messageInfo_Histogram.Size(m)
 	return xxx_messageInfo_Histogram.Size(m)
@@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
 }
 }
 
 
 type Bucket struct {
 type Bucket struct {
-	CumulativeCount      *uint64  `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
-	UpperBound           *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 }
 
 
 func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) Reset()         { *m = Bucket{} }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (m *Bucket) String() string { return proto.CompactTextString(m) }
 func (*Bucket) ProtoMessage()    {}
 func (*Bucket) ProtoMessage()    {}
 func (*Bucket) Descriptor() ([]byte, []int) {
 func (*Bucket) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+	return fileDescriptor_6039342a2ba47b72, []int{7}
 }
 }
+
 func (m *Bucket) XXX_Unmarshal(b []byte) error {
 func (m *Bucket) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Bucket.Unmarshal(m, b)
 	return xxx_messageInfo_Bucket.Unmarshal(m, b)
 }
 }
 func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
 }
 }
-func (dst *Bucket) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Bucket.Merge(dst, src)
+func (m *Bucket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Bucket.Merge(m, src)
 }
 }
 func (m *Bucket) XXX_Size() int {
 func (m *Bucket) XXX_Size() int {
 	return xxx_messageInfo_Bucket.Size(m)
 	return xxx_messageInfo_Bucket.Size(m)
@@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
 	return 0
 	return 0
 }
 }
 
 
+func (m *Bucket) GetExemplar() *Exemplar {
+	if m != nil {
+		return m.Exemplar
+	}
+	return nil
+}
+
+type Exemplar struct {
+	Label                []*LabelPair         `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Value                *float64             `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	Timestamp            *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *Exemplar) Reset()         { *m = Exemplar{} }
+func (m *Exemplar) String() string { return proto.CompactTextString(m) }
+func (*Exemplar) ProtoMessage()    {}
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6039342a2ba47b72, []int{8}
+}
+
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Exemplar.Unmarshal(m, b)
+}
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+}
+func (m *Exemplar) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Exemplar.Merge(m, src)
+}
+func (m *Exemplar) XXX_Size() int {
+	return xxx_messageInfo_Exemplar.Size(m)
+}
+func (m *Exemplar) XXX_DiscardUnknown() {
+	xxx_messageInfo_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+
+func (m *Exemplar) GetLabel() []*LabelPair {
+	if m != nil {
+		return m.Label
+	}
+	return nil
+}
+
+func (m *Exemplar) GetValue() float64 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
+	if m != nil {
+		return m.Timestamp
+	}
+	return nil
+}
+
 type Metric struct {
 type Metric struct {
 	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
 	Label                []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
 	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
 	Gauge                *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
@@ -440,16 +526,17 @@ func (m *Metric) Reset()         { *m = Metric{} }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (m *Metric) String() string { return proto.CompactTextString(m) }
 func (*Metric) ProtoMessage()    {}
 func (*Metric) ProtoMessage()    {}
 func (*Metric) Descriptor() ([]byte, []int) {
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+	return fileDescriptor_6039342a2ba47b72, []int{9}
 }
 }
+
 func (m *Metric) XXX_Unmarshal(b []byte) error {
 func (m *Metric) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_Metric.Unmarshal(m, b)
 	return xxx_messageInfo_Metric.Unmarshal(m, b)
 }
 }
 func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
 	return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
 }
 }
-func (dst *Metric) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metric.Merge(dst, src)
+func (m *Metric) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metric.Merge(m, src)
 }
 }
 func (m *Metric) XXX_Size() int {
 func (m *Metric) XXX_Size() int {
 	return xxx_messageInfo_Metric.Size(m)
 	return xxx_messageInfo_Metric.Size(m)
@@ -523,16 +610,17 @@ func (m *MetricFamily) Reset()         { *m = MetricFamily{} }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
 func (*MetricFamily) ProtoMessage()    {}
 func (*MetricFamily) ProtoMessage()    {}
 func (*MetricFamily) Descriptor() ([]byte, []int) {
 func (*MetricFamily) Descriptor() ([]byte, []int) {
-	return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+	return fileDescriptor_6039342a2ba47b72, []int{10}
 }
 }
+
 func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
 func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
 	return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
 }
 }
 func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
 	return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
 }
 }
-func (dst *MetricFamily) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetricFamily.Merge(dst, src)
+func (m *MetricFamily) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricFamily.Merge(m, src)
 }
 }
 func (m *MetricFamily) XXX_Size() int {
 func (m *MetricFamily) XXX_Size() int {
 	return xxx_messageInfo_MetricFamily.Size(m)
 	return xxx_messageInfo_MetricFamily.Size(m)
@@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
 }
 }
 
 
 func init() {
 func init() {
+	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
 	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
 	proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
 	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
 	proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
 	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
 	proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
@@ -580,50 +669,55 @@ func init() {
 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
 	proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
 	proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
 	proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+	proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
 	proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
 	proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
 }
 }
 
 
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
-
-var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
-	// 591 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
-	0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
-	0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
-	0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
-	0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
-	0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
-	0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
-	0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
-	0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
-	0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
-	0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
-	0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
-	0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
-	0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
-	0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
-	0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
-	0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
-	0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
-	0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
-	0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
-	0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
-	0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
-	0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
-	0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
-	0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
-	0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
-	0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
-	0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
-	0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
-	0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
-	0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
-	0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
-	0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
-	0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
-	0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
-	0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
-	0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
+
+var fileDescriptor_6039342a2ba47b72 = []byte{
+	// 665 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
+	0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
+	0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
+	0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
+	0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
+	0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
+	0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
+	0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
+	0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
+	0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
+	0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
+	0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
+	0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
+	0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
+	0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
+	0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
+	0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
+	0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
+	0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
+	0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
+	0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
+	0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
+	0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
+	0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
+	0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
+	0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
+	0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
+	0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
+	0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
+	0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
+	0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
+	0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
+	0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
+	0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
+	0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
+	0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
+	0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
+	0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
+	0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
+	0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
+	0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
 }
 }

+ 99 - 25
vendor/github.com/prometheus/common/expfmt/encode.go

@@ -30,17 +30,38 @@ type Encoder interface {
 	Encode(*dto.MetricFamily) error
 	Encode(*dto.MetricFamily) error
 }
 }
 
 
-type encoder func(*dto.MetricFamily) error
+// Closer is implemented by Encoders that need to be closed to finalize
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
+//
+// Note that all Encoder implementations returned from this package implement
+// Closer, too, even if the Close call is a no-op. This happens in preparation
+// for adding a Close method to the Encoder interface directly in a (mildly
+// breaking) release in the future.
+type Closer interface {
+	Close() error
+}
+
+type encoderCloser struct {
+	encode func(*dto.MetricFamily) error
+	close  func() error
+}
 
 
-func (e encoder) Encode(v *dto.MetricFamily) error {
-	return e(v)
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
+	return ec.encode(v)
 }
 }
 
 
-// Negotiate returns the Content-Type based on the given Accept header.
-// If no appropriate accepted type is found, FmtText is returned.
+func (ec encoderCloser) Close() error {
+	return ec.close()
+}
+
+// Negotiate returns the Content-Type based on the given Accept header. If no
+// appropriate accepted type is found, FmtText is returned (which is the
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
+// as the support is still experimental. To include the option to negotiate
+// FmtOpenMetrics, use NegotiateOpenMetrics.
 func Negotiate(h http.Header) Format {
 func Negotiate(h http.Header) Format {
 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
 	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
-		// Check for protocol buffer
+		ver := ac.Params["version"]
 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
 		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
 			switch ac.Params["encoding"] {
 			switch ac.Params["encoding"] {
 			case "delimited":
 			case "delimited":
@@ -51,38 +72,91 @@ func Negotiate(h http.Header) Format {
 				return FmtProtoCompact
 				return FmtProtoCompact
 			}
 			}
 		}
 		}
-		// Check for text format.
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText
+		}
+	}
+	return FmtText
+}
+
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
+// FmtOpenMetrics as an option for the result. Note that this function is
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
+// such may be negotiated by the normal Negotiate function.
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
 		ver := ac.Params["version"]
 		ver := ac.Params["version"]
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim
+			case "text":
+				return FmtProtoText
+			case "compact-text":
+				return FmtProtoCompact
+			}
+		}
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
 		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
 			return FmtText
 			return FmtText
 		}
 		}
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
+			return FmtOpenMetrics
+		}
 	}
 	}
 	return FmtText
 	return FmtText
 }
 }
 
 
-// NewEncoder returns a new encoder based on content type negotiation.
+// NewEncoder returns a new encoder based on content type negotiation. All
+// Encoder implementations returned by NewEncoder also implement Closer, and
+// callers should always call the Close method. It is currently only required
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
+// to the Encoder interface directly. The current version of the Encoder
+// interface is kept for backwards compatibility.
 func NewEncoder(w io.Writer, format Format) Encoder {
 func NewEncoder(w io.Writer, format Format) Encoder {
 	switch format {
 	switch format {
 	case FmtProtoDelim:
 	case FmtProtoDelim:
-		return encoder(func(v *dto.MetricFamily) error {
-			_, err := pbutil.WriteDelimited(w, v)
-			return err
-		})
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := pbutil.WriteDelimited(w, v)
+				return err
+			},
+			close: func() error { return nil },
+		}
 	case FmtProtoCompact:
 	case FmtProtoCompact:
-		return encoder(func(v *dto.MetricFamily) error {
-			_, err := fmt.Fprintln(w, v.String())
-			return err
-		})
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, v.String())
+				return err
+			},
+			close: func() error { return nil },
+		}
 	case FmtProtoText:
 	case FmtProtoText:
-		return encoder(func(v *dto.MetricFamily) error {
-			_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
-			return err
-		})
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+				return err
+			},
+			close: func() error { return nil },
+		}
 	case FmtText:
 	case FmtText:
-		return encoder(func(v *dto.MetricFamily) error {
-			_, err := MetricFamilyToText(w, v)
-			return err
-		})
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToText(w, v)
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case FmtOpenMetrics:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToOpenMetrics(w, v)
+				return err
+			},
+			close: func() error {
+				_, err := FinalizeOpenMetrics(w)
+				return err
+			},
+		}
 	}
 	}
-	panic("expfmt.NewEncoder: unknown format")
+	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
 }
 }

+ 7 - 4
vendor/github.com/prometheus/common/expfmt/expfmt.go

@@ -19,10 +19,12 @@ type Format string
 
 
 // Constants to assemble the Content-Type values for the different wire protocols.
 // Constants to assemble the Content-Type values for the different wire protocols.
 const (
 const (
-	TextVersion   = "0.0.4"
-	ProtoType     = `application/vnd.google.protobuf`
-	ProtoProtocol = `io.prometheus.client.MetricFamily`
-	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";"
+	TextVersion        = "0.0.4"
+	ProtoType          = `application/vnd.google.protobuf`
+	ProtoProtocol      = `io.prometheus.client.MetricFamily`
+	ProtoFmt           = ProtoType + "; proto=" + ProtoProtocol + ";"
+	OpenMetricsType    = `application/openmetrics-text`
+	OpenMetricsVersion = "0.0.1"
 
 
 	// The Content-Type values for the different wire protocols.
 	// The Content-Type values for the different wire protocols.
 	FmtUnknown      Format = `<unknown>`
 	FmtUnknown      Format = `<unknown>`
@@ -30,6 +32,7 @@ const (
 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
 	FmtProtoText    Format = ProtoFmt + ` encoding=text`
 	FmtProtoText    Format = ProtoFmt + ` encoding=text`
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+	FmtOpenMetrics  Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
 )
 )
 
 
 const (
 const (

+ 527 - 0
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go

@@ -0,0 +1,527 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/ptypes"
+	"github.com/prometheus/common/model"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
+// the number of bytes written and any error encountered. The output will have
+// the same order as the input, no further sorting is performed. Furthermore,
+// this function assumes the input is already sanitized and does not perform any
+// sanity checks. If the input contains duplicate metrics or invalid metric or
+// label names, the conversion will result in invalid text format output.
+//
+// This function fulfills the type 'expfmt.encoder'.
+//
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
+// on individual metric families, it is the responsibility of the caller to
+// append this line to 'out' once all metric families have been written.
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
+//
+// The output should be fully OpenMetrics compliant. However, there are a few
+// missing features and peculiarities to avoid complications when switching from
+// Prometheus to OpenMetrics or vice versa:
+//
+// - Counters are expected to have the `_total` suffix in their metric name. In
+//   the output, the suffix will be truncated from the `# TYPE` and `# HELP`
+//   line. A counter with a missing `_total` suffix is not an error. However,
+//   its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+//   output.
+//
+// - No support for the following (optional) features: `# UNIT` line, `_created`
+//   line, info type, stateset type, gaugehistogram type.
+//
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+//   exemplars that are larger than allowed by the OpenMetrics specification).
+//
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+//   with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bufio.Writer from the sync.Pool.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
+		w = b
+		defer func() {
+			bErr := b.Flush()
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var (
+		n          int
+		metricType = in.GetType()
+		shortName  = name
+	)
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
+		shortName = name[:len(name)-6]
+	}
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = w.WriteString(shortName)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = w.WriteString(shortName)
+	written += n
+	if err != nil {
+		return
+	}
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		if strings.HasSuffix(name, "_total") {
+			n, err = w.WriteString(" counter\n")
+		} else {
+			n, err = w.WriteString(" unknown\n")
+		}
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" unknown\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+
+	// Finally the samples, one line for each.
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", name, metric,
+				)
+			}
+			// Note that we have ensured above that either the name
+			// ends on `_total` or that the rendered type is
+			// `unknown`. Therefore, no `_total` must be added here.
+			n, err = writeOpenMetricsSample(
+				w, name, "", metric, "", 0,
+				metric.Counter.GetValue(), 0, false,
+				metric.Counter.Exemplar,
+			)
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "", metric, "", 0,
+				metric.Gauge.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "", metric, "", 0,
+				metric.Untyped.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", name, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeOpenMetricsSample(
+					w, name, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(), 0, false,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "_count", metric, "", 0,
+				0, metric.Summary.GetSampleCount(), true,
+				nil,
+			)
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", name, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeOpenMetricsSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					0, b.GetCumulativeCount(), true,
+					b.Exemplar,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeOpenMetricsSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					0, metric.Histogram.GetSampleCount(), true,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, name, "_count", metric, "", 0,
+				0, metric.Histogram.GetSampleCount(), true,
+				nil,
+			)
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", name, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
+	return w.Write([]byte("# EOF\n"))
+}
+
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
+// w, given the metric name, the metric proto message itself, optionally an
+// additional label name with a float64 value (use empty string as label name if
+// not required), the value (optionally as float64 or uint64, determined by
+// useIntValue), and optionally an exemplar (use nil if not required). The
+// function returns the number of bytes written and any error encountered.
+func writeOpenMetricsSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	floatValue float64, intValue uint64, useIntValue bool,
+	exemplar *dto.Exemplar,
+) (int, error) {
+	var written int
+	n, err := w.WriteString(name)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if suffix != "" {
+		n, err = w.WriteString(suffix)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	n, err = writeOpenMetricsLabelPairs(
+		w, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	if useIntValue {
+		n, err = writeUint(w, intValue)
+	} else {
+		n, err = writeOpenMetricsFloat(w, floatValue)
+	}
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		// TODO(beorn7): Format this directly without converting to a float first.
+		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	if exemplar != nil {
+		n, err = writeExemplar(w, exemplar)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
+// in OpenMetrics style.
+func writeOpenMetricsLabelPairs(
+	w enhancedWriter,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	if len(in) == 0 && additionalLabelName == "" {
+		return 0, nil
+	}
+	var (
+		written   int
+		separator byte = '{'
+	)
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeOpenMetricsFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
+// function returns the number of bytes written and any error encountered.
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
+	written := 0
+	n, err := w.WriteString(" # ")
+	written += n
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsFloat(w, e.GetValue())
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if e.Timestamp != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		ts, err := ptypes.Timestamp((*e).Timestamp)
+		if err != nil {
+			return written, err
+		}
+		// TODO(beorn7): Format this directly from components of ts to
+		// avoid overflow/underflow and precision issues of the float
+		// conversion.
+		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	return written, nil
+}
+
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
+// number would otherwise contain neither a "." nor an "e".
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return w.WriteString("1.0")
+	case f == 0:
+		return w.WriteString("0.0")
+	case f == -1:
+		return w.WriteString("-1.0")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		if !bytes.ContainsAny(*bp, "e.") {
+			*bp = append(*bp, '.', '0')
+		}
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeUint is like writeInt just for uint64.
+func writeUint(w enhancedWriter, u uint64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendUint((*bp)[:0], u, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}

+ 1 - 2
vendor/github.com/prometheus/common/expfmt/text_create.go

@@ -423,9 +423,8 @@ var (
 func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
 func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
 	if includeDoubleQuote {
 	if includeDoubleQuote {
 		return quotedEscaper.WriteString(w, v)
 		return quotedEscaper.WriteString(w, v)
-	} else {
-		return escaper.WriteString(w, v)
 	}
 	}
+	return escaper.WriteString(w, v)
 }
 }
 
 
 // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
 // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes

+ 2 - 2
vendor/github.com/prometheus/common/go.mod

@@ -11,12 +11,12 @@ require (
 	github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
 	github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
 	github.com/pkg/errors v0.8.1
 	github.com/pkg/errors v0.8.1
 	github.com/prometheus/client_golang v1.0.0
 	github.com/prometheus/client_golang v1.0.0
-	github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
+	github.com/prometheus/client_model v0.2.0
 	github.com/sirupsen/logrus v1.4.2
 	github.com/sirupsen/logrus v1.4.2
 	golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
 	golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
 	golang.org/x/sys v0.0.0-20190422165155-953cdadca894
 	golang.org/x/sys v0.0.0-20190422165155-953cdadca894
 	gopkg.in/alecthomas/kingpin.v2 v2.2.6
 	gopkg.in/alecthomas/kingpin.v2 v2.2.6
-	gopkg.in/yaml.v2 v2.2.2
+	gopkg.in/yaml.v2 v2.2.4
 )
 )
 
 
 go 1.11
 go 1.11

+ 91 - 69
vendor/github.com/prometheus/procfs/crypto.go

@@ -14,10 +14,10 @@
 package procfs
 package procfs
 
 
 import (
 import (
+	"bufio"
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
-	"io/ioutil"
-	"strconv"
+	"io"
 	"strings"
 	"strings"
 
 
 	"github.com/prometheus/procfs/internal/util"
 	"github.com/prometheus/procfs/internal/util"
@@ -52,80 +52,102 @@ type Crypto struct {
 // structs containing the relevant info.  More information available here:
 // structs containing the relevant info.  More information available here:
 // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
 // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
 func (fs FS) Crypto() ([]Crypto, error) {
 func (fs FS) Crypto() ([]Crypto, error) {
-	data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
+	path := fs.proc.Path("crypto")
+	b, err := util.ReadFileNoStat(path)
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+		return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
 	}
 	}
-	crypto, err := parseCrypto(data)
+
+	crypto, err := parseCrypto(bytes.NewReader(b))
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+		return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
 	}
 	}
+
 	return crypto, nil
 	return crypto, nil
 }
 }
 
 
-func parseCrypto(cryptoData []byte) ([]Crypto, error) {
-	crypto := []Crypto{}
-
-	cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
-
-	for _, block := range cryptoBlocks {
-		var newCryptoElem Crypto
-
-		lines := strings.Split(string(block), "\n")
-		for _, line := range lines {
-			if strings.TrimSpace(line) == "" || line[0] == ' ' {
-				continue
-			}
-			fields := strings.Split(line, ":")
-			key := strings.TrimSpace(fields[0])
-			value := strings.TrimSpace(fields[1])
-			vp := util.NewValueParser(value)
-
-			switch strings.TrimSpace(key) {
-			case "async":
-				b, err := strconv.ParseBool(value)
-				if err == nil {
-					newCryptoElem.Async = b
-				}
-			case "blocksize":
-				newCryptoElem.Blocksize = vp.PUInt64()
-			case "chunksize":
-				newCryptoElem.Chunksize = vp.PUInt64()
-			case "digestsize":
-				newCryptoElem.Digestsize = vp.PUInt64()
-			case "driver":
-				newCryptoElem.Driver = value
-			case "geniv":
-				newCryptoElem.Geniv = value
-			case "internal":
-				newCryptoElem.Internal = value
-			case "ivsize":
-				newCryptoElem.Ivsize = vp.PUInt64()
-			case "maxauthsize":
-				newCryptoElem.Maxauthsize = vp.PUInt64()
-			case "max keysize":
-				newCryptoElem.MaxKeysize = vp.PUInt64()
-			case "min keysize":
-				newCryptoElem.MinKeysize = vp.PUInt64()
-			case "module":
-				newCryptoElem.Module = value
-			case "name":
-				newCryptoElem.Name = value
-			case "priority":
-				newCryptoElem.Priority = vp.PInt64()
-			case "refcnt":
-				newCryptoElem.Refcnt = vp.PInt64()
-			case "seedsize":
-				newCryptoElem.Seedsize = vp.PUInt64()
-			case "selftest":
-				newCryptoElem.Selftest = value
-			case "type":
-				newCryptoElem.Type = value
-			case "walksize":
-				newCryptoElem.Walksize = vp.PUInt64()
-			}
+// parseCrypto parses a /proc/crypto stream into Crypto elements.
+func parseCrypto(r io.Reader) ([]Crypto, error) {
+	var out []Crypto
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		text := s.Text()
+		switch {
+		case strings.HasPrefix(text, "name"):
+			// Each crypto element begins with its name.
+			out = append(out, Crypto{})
+		case text == "":
+			continue
+		}
+
+		kv := strings.Split(text, ":")
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("malformed crypto line: %q", text)
+		}
+
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
+
+		// Parse the key/value pair into the currently focused element.
+		c := &out[len(out)-1]
+		if err := c.parseKV(k, v); err != nil {
+			return nil, err
 		}
 		}
-		crypto = append(crypto, newCryptoElem)
 	}
 	}
-	return crypto, nil
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	return out, nil
+}
+
+// parseKV parses a key/value pair into the appropriate field of c.
+func (c *Crypto) parseKV(k, v string) error {
+	vp := util.NewValueParser(v)
+
+	switch k {
+	case "async":
+		// Interpret literal yes as true.
+		c.Async = v == "yes"
+	case "blocksize":
+		c.Blocksize = vp.PUInt64()
+	case "chunksize":
+		c.Chunksize = vp.PUInt64()
+	case "digestsize":
+		c.Digestsize = vp.PUInt64()
+	case "driver":
+		c.Driver = v
+	case "geniv":
+		c.Geniv = v
+	case "internal":
+		c.Internal = v
+	case "ivsize":
+		c.Ivsize = vp.PUInt64()
+	case "maxauthsize":
+		c.Maxauthsize = vp.PUInt64()
+	case "max keysize":
+		c.MaxKeysize = vp.PUInt64()
+	case "min keysize":
+		c.MinKeysize = vp.PUInt64()
+	case "module":
+		c.Module = v
+	case "name":
+		c.Name = v
+	case "priority":
+		c.Priority = vp.PInt64()
+	case "refcnt":
+		c.Refcnt = vp.PInt64()
+	case "seedsize":
+		c.Seedsize = vp.PUInt64()
+	case "selftest":
+		c.Selftest = v
+	case "type":
+		c.Type = v
+	case "walksize":
+		c.Walksize = vp.PUInt64()
+	}
+
+	return vp.Err()
 }
 }

+ 1 - 0
vendor/github.com/prometheus/procfs/go.mod

@@ -5,4 +5,5 @@ go 1.12
 require (
 require (
 	github.com/google/go-cmp v0.3.1
 	github.com/google/go-cmp v0.3.1
 	golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
 	golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
+	golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
 )
 )

+ 62 - 0
vendor/github.com/prometheus/procfs/loadavg.go

@@ -0,0 +1,62 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// LoadAvg represents an entry in /proc/loadavg
+type LoadAvg struct {
+	Load1  float64
+	Load5  float64
+	Load15 float64
+}
+
+// LoadAvg returns loadavg from /proc.
+func (fs FS) LoadAvg() (*LoadAvg, error) {
+	path := fs.proc.Path("loadavg")
+
+	data, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+	return parseLoad(data)
+}
+
+// Parse /proc loadavg and return 1m, 5m and 15m.
+func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
+	loads := make([]float64, 3)
+	parts := strings.Fields(string(loadavgBytes))
+	if len(parts) < 3 {
+		return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
+	}
+
+	var err error
+	for i, load := range parts[0:3] {
+		loads[i], err = strconv.ParseFloat(load, 64)
+		if err != nil {
+			return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
+		}
+	}
+	return &LoadAvg{
+		Load1:  loads[0],
+		Load5:  loads[1],
+		Load15: loads[2],
+	}, nil
+}

+ 6 - 6
vendor/github.com/prometheus/procfs/mountinfo.go

@@ -29,10 +29,10 @@ import (
 // is described in the following man page.
 // is described in the following man page.
 // http://man7.org/linux/man-pages/man5/proc.5.html
 // http://man7.org/linux/man-pages/man5/proc.5.html
 type MountInfo struct {
 type MountInfo struct {
-	// Unique Id for the mount
-	MountId int
-	// The Id of the parent mount
-	ParentId int
+	// Unique ID for the mount
+	MountID int
+	// The ID of the parent mount
+	ParentID int
 	// The value of `st_dev` for the files on this FS
 	// The value of `st_dev` for the files on this FS
 	MajorMinorVer string
 	MajorMinorVer string
 	// The pathname of the directory in the FS that forms
 	// The pathname of the directory in the FS that forms
@@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
 		SuperOptions:   mountOptionsParser(mountInfo[mountInfoLength-1]),
 		SuperOptions:   mountOptionsParser(mountInfo[mountInfoLength-1]),
 	}
 	}
 
 
-	mount.MountId, err = strconv.Atoi(mountInfo[0])
+	mount.MountID, err = strconv.Atoi(mountInfo[0])
 	if err != nil {
 	if err != nil {
 		return nil, fmt.Errorf("failed to parse mount ID")
 		return nil, fmt.Errorf("failed to parse mount ID")
 	}
 	}
-	mount.ParentId, err = strconv.Atoi(mountInfo[1])
+	mount.ParentID, err = strconv.Atoi(mountInfo[1])
 	if err != nil {
 	if err != nil {
 		return nil, fmt.Errorf("failed to parse parent ID")
 		return nil, fmt.Errorf("failed to parse parent ID")
 	}
 	}

+ 153 - 0
vendor/github.com/prometheus/procfs/net_conntrackstat.go

@@ -0,0 +1,153 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
+// and contains netfilter conntrack statistics at one CPU core
+type ConntrackStatEntry struct {
+	Entries       uint64
+	Found         uint64
+	Invalid       uint64
+	Ignore        uint64
+	Insert        uint64
+	InsertFailed  uint64
+	Drop          uint64
+	EarlyDrop     uint64
+	SearchRestart uint64
+}
+
+// Retrieves netfilter's conntrack statistics, split by CPU cores
+func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
+	return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
+}
+
+// Parses a slice of ConntrackStatEntries from the given filepath
+func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
+	// This file is small and can be read with one syscall.
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		// Do not wrap this error so the caller can detect os.IsNotExist and
+		// similar conditions.
+		return nil, err
+	}
+
+	stat, err := parseConntrackStat(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
+	}
+
+	return stat, nil
+}
+
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
+func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
+	var entries []ConntrackStatEntry
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		conntrackEntry, err := parseConntrackStatEntry(fields)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, *conntrackEntry)
+	}
+
+	return entries, nil
+}
+
+// Parses a ConntrackStatEntry from given array of fields
+func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
+	if len(fields) != 17 {
+		return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
+	}
+	entry := &ConntrackStatEntry{}
+
+	entries, err := parseConntrackStatField(fields[0])
+	if err != nil {
+		return nil, err
+	}
+	entry.Entries = entries
+
+	found, err := parseConntrackStatField(fields[2])
+	if err != nil {
+		return nil, err
+	}
+	entry.Found = found
+
+	invalid, err := parseConntrackStatField(fields[4])
+	if err != nil {
+		return nil, err
+	}
+	entry.Invalid = invalid
+
+	ignore, err := parseConntrackStatField(fields[5])
+	if err != nil {
+		return nil, err
+	}
+	entry.Ignore = ignore
+
+	insert, err := parseConntrackStatField(fields[8])
+	if err != nil {
+		return nil, err
+	}
+	entry.Insert = insert
+
+	insertFailed, err := parseConntrackStatField(fields[9])
+	if err != nil {
+		return nil, err
+	}
+	entry.InsertFailed = insertFailed
+
+	drop, err := parseConntrackStatField(fields[10])
+	if err != nil {
+		return nil, err
+	}
+	entry.Drop = drop
+
+	earlyDrop, err := parseConntrackStatField(fields[11])
+	if err != nil {
+		return nil, err
+	}
+	entry.EarlyDrop = earlyDrop
+
+	searchRestart, err := parseConntrackStatField(fields[16])
+	if err != nil {
+		return nil, err
+	}
+	entry.SearchRestart = searchRestart
+
+	return entry, nil
+}
+
+// Parses a uint64 from given hex in string
+func parseConntrackStatField(field string) (uint64, error) {
+	val, err := strconv.ParseUint(field, 16, 64)
+	if err != nil {
+		return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
+	}
+	return val, err
+}

+ 59 - 48
vendor/github.com/prometheus/procfs/net_softnet.go

@@ -14,78 +14,89 @@
 package procfs
 package procfs
 
 
 import (
 import (
+	"bufio"
+	"bytes"
 	"fmt"
 	"fmt"
-	"io/ioutil"
+	"io"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
 )
 )
 
 
 // For the proc file format details,
 // For the proc file format details,
-// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
+// See:
+// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
+// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
 
 
-// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
-type SoftnetEntry struct {
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat
+type SoftnetStat struct {
 	// Number of processed packets
 	// Number of processed packets
-	Processed uint
+	Processed uint32
 	// Number of dropped packets
 	// Number of dropped packets
-	Dropped uint
+	Dropped uint32
 	// Number of times processing packets ran out of quota
 	// Number of times processing packets ran out of quota
-	TimeSqueezed uint
+	TimeSqueezed uint32
 }
 }
 
 
-// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
-// and then return a slice of SoftnetEntry's.
-func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
-	data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
+var softNetProcFile = "net/softnet_stat"
+
+// NetSoftnetStat reads data from /proc/net/softnet_stat.
+func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
+	if err != nil {
+		return nil, err
+	}
+
+	entries, err := parseSoftnet(bytes.NewReader(b))
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
+		return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
 	}
 	}
 
 
-	return parseSoftnetEntries(data)
+	return entries, nil
 }
 }
 
 
-func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
-	lines := strings.Split(string(data), "\n")
-	entries := make([]SoftnetEntry, 0)
-	var err error
-	const (
-		expectedColumns = 11
-	)
-	for _, line := range lines {
-		columns := strings.Fields(line)
+func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
+	const minColumns = 9
+
+	s := bufio.NewScanner(r)
+
+	var stats []SoftnetStat
+	for s.Scan() {
+		columns := strings.Fields(s.Text())
 		width := len(columns)
 		width := len(columns)
-		if width == 0 {
-			continue
-		}
-		if width != expectedColumns {
-			return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
+
+		if width < minColumns {
+			return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
 		}
 		}
-		var entry SoftnetEntry
-		if entry, err = parseSoftnetEntry(columns); err != nil {
-			return []SoftnetEntry{}, err
+
+		// We only parse the first three columns at the moment.
+		us, err := parseHexUint32s(columns[0:3])
+		if err != nil {
+			return nil, err
 		}
 		}
-		entries = append(entries, entry)
+
+		stats = append(stats, SoftnetStat{
+			Processed:    us[0],
+			Dropped:      us[1],
+			TimeSqueezed: us[2],
+		})
 	}
 	}
 
 
-	return entries, nil
+	return stats, nil
 }
 }
 
 
-func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
-	var err error
-	var processed, dropped, timeSqueezed uint64
-	if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
-	}
-	if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
-	}
-	if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
-		return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
+func parseHexUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
 	}
 	}
-	return SoftnetEntry{
-		Processed:    uint(processed),
-		Dropped:      uint(dropped),
-		TimeSqueezed: uint(timeSqueezed),
-	}, nil
+
+	return us, nil
 }
 }

+ 229 - 0
vendor/github.com/prometheus/procfs/net_udp.go

@@ -0,0 +1,229 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	// readLimit is used by io.LimitReader while reading the content of the
+	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
+	// as each line represents a single used socket.
+	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
+	// With e.g. 150 Byte per line and the maximum number of 65535,
+	// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
+	readLimit = 4294967296 // Byte -> 4 GiB
+)
+
+type (
+	// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
+	NetUDP []*netUDPLine
+
+	// NetUDPSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetUDP it does not collect
+	// the parsed lines into a slice.
+	NetUDPSummary struct {
+		// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
+		TxQueueLength uint64
+		// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
+		RxQueueLength uint64
+		// UsedSockets shows the total number of parsed lines representing the
+		// number of used sockets.
+		UsedSockets uint64
+	}
+
+	// netUDPLine represents the fields parsed from a single line
+	// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
+	// For the proc file format details, see https://linux.die.net/man/5/proc.
+	netUDPLine struct {
+		Sl        uint64
+		LocalAddr net.IP
+		LocalPort uint64
+		RemAddr   net.IP
+		RemPort   uint64
+		St        uint64
+		TxQueue   uint64
+		RxQueue   uint64
+		UID       uint64
+	}
+)
+
+// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp.
+func (fs FS) NetUDP() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp6.
+func (fs FS) NetUDP6() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp6"))
+}
+
+// NetUDPSummary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp.
+func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6Summary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp6.
+func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp6"))
+}
+
+// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
+func newNetUDP(file string) (NetUDP, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	netUDP := NetUDP{}
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetUDPLine(fields)
+		if err != nil {
+			return nil, err
+		}
+		netUDP = append(netUDP, line)
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return netUDP, nil
+}
+
+// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
+func newNetUDPSummary(file string) (*NetUDPSummary, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	netUDPSummary := &NetUDPSummary{}
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetUDPLine(fields)
+		if err != nil {
+			return nil, err
+		}
+		netUDPSummary.TxQueueLength += line.TxQueue
+		netUDPSummary.RxQueueLength += line.RxQueue
+		netUDPSummary.UsedSockets++
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return netUDPSummary, nil
+}
+
+// parseNetUDPLine parses a single line, represented by a list of fields.
+func parseNetUDPLine(fields []string) (*netUDPLine, error) {
+	line := &netUDPLine{}
+	if len(fields) < 8 {
+		return nil, fmt.Errorf(
+			"cannot parse net udp socket line as it has less then 8 columns: %s",
+			strings.Join(fields, " "),
+		)
+	}
+	var err error // parse error
+
+	// sl
+	s := strings.Split(fields[0], ":")
+	if len(s) != 2 {
+		return nil, fmt.Errorf(
+			"cannot parse sl field in udp socket line: %s", fields[0])
+	}
+
+	if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
+		return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
+	}
+	// local_address
+	l := strings.Split(fields[1], ":")
+	if len(l) != 2 {
+		return nil, fmt.Errorf(
+			"cannot parse local_address field in udp socket line: %s", fields[1])
+	}
+	if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse local_address value in udp socket line: %s", err)
+	}
+	if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse local_address port value in udp socket line: %s", err)
+	}
+
+	// remote_address
+	r := strings.Split(fields[2], ":")
+	if len(r) != 2 {
+		return nil, fmt.Errorf(
+			"cannot parse rem_address field in udp socket line: %s", fields[1])
+	}
+	if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse rem_address value in udp socket line: %s", err)
+	}
+	if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse rem_address port value in udp socket line: %s", err)
+	}
+
+	// st
+	if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse st value in udp socket line: %s", err)
+	}
+
+	// tx_queue and rx_queue
+	q := strings.Split(fields[4], ":")
+	if len(q) != 2 {
+		return nil, fmt.Errorf(
+			"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
+			fields[4],
+		)
+	}
+	if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
+		return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
+	}
+	if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
+	}
+
+	// uid
+	if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
+		return nil, fmt.Errorf(
+			"cannot parse uid value in udp socket line: %s", err)
+	}
+
+	return line, nil
+}

+ 104 - 118
vendor/github.com/prometheus/procfs/net_unix.go

@@ -15,7 +15,6 @@ package procfs
 
 
 import (
 import (
 	"bufio"
 	"bufio"
-	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
@@ -27,25 +26,15 @@ import (
 // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
 // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
 // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
 // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
 
 
-const (
-	netUnixKernelPtrIdx = iota
-	netUnixRefCountIdx
-	_
-	netUnixFlagsIdx
-	netUnixTypeIdx
-	netUnixStateIdx
-	netUnixInodeIdx
-
-	// Inode and Path are optional.
-	netUnixStaticFieldsCnt = 6
-)
-
+// Constants for the various /proc/net/unix enumerations.
+// TODO: match against x/sys/unix or similar?
 const (
 const (
 	netUnixTypeStream    = 1
 	netUnixTypeStream    = 1
 	netUnixTypeDgram     = 2
 	netUnixTypeDgram     = 2
 	netUnixTypeSeqpacket = 5
 	netUnixTypeSeqpacket = 5
 
 
-	netUnixFlagListen = 1 << 16
+	netUnixFlagDefault = 0
+	netUnixFlagListen  = 1 << 16
 
 
 	netUnixStateUnconnected  = 1
 	netUnixStateUnconnected  = 1
 	netUnixStateConnecting   = 2
 	netUnixStateConnecting   = 2
@@ -53,129 +42,127 @@ const (
 	netUnixStateDisconnected = 4
 	netUnixStateDisconnected = 4
 )
 )
 
 
-var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
+// NetUNIXType is the type of the type field.
+type NetUNIXType uint64
 
 
-// NetUnixType is the type of the type field.
-type NetUnixType uint64
+// NetUNIXFlags is the type of the flags field.
+type NetUNIXFlags uint64
 
 
-// NetUnixFlags is the type of the flags field.
-type NetUnixFlags uint64
+// NetUNIXState is the type of the state field.
+type NetUNIXState uint64
 
 
-// NetUnixState is the type of the state field.
-type NetUnixState uint64
-
-// NetUnixLine represents a line of /proc/net/unix.
-type NetUnixLine struct {
+// NetUNIXLine represents a line of /proc/net/unix.
+type NetUNIXLine struct {
 	KernelPtr string
 	KernelPtr string
 	RefCount  uint64
 	RefCount  uint64
 	Protocol  uint64
 	Protocol  uint64
-	Flags     NetUnixFlags
-	Type      NetUnixType
-	State     NetUnixState
+	Flags     NetUNIXFlags
+	Type      NetUNIXType
+	State     NetUNIXState
 	Inode     uint64
 	Inode     uint64
 	Path      string
 	Path      string
 }
 }
 
 
-// NetUnix holds the data read from /proc/net/unix.
-type NetUnix struct {
-	Rows []*NetUnixLine
+// NetUNIX holds the data read from /proc/net/unix.
+type NetUNIX struct {
+	Rows []*NetUNIXLine
 }
 }
 
 
-// NewNetUnix returns data read from /proc/net/unix.
-func NewNetUnix() (*NetUnix, error) {
-	fs, err := NewFS(DefaultMountPoint)
-	if err != nil {
-		return nil, err
-	}
-
-	return fs.NewNetUnix()
+// NetUNIX returns data read from /proc/net/unix.
+func (fs FS) NetUNIX() (*NetUNIX, error) {
+	return readNetUNIX(fs.proc.Path("net/unix"))
 }
 }
 
 
-// NewNetUnix returns data read from /proc/net/unix.
-func (fs FS) NewNetUnix() (*NetUnix, error) {
-	return NewNetUnixByPath(fs.proc.Path("net/unix"))
-}
-
-// NewNetUnixByPath returns data read from /proc/net/unix by file path.
-// It might returns an error with partial parsed data, if an error occur after some data parsed.
-func NewNetUnixByPath(path string) (*NetUnix, error) {
-	f, err := os.Open(path)
+// readNetUNIX reads data in /proc/net/unix format from the specified file.
+func readNetUNIX(file string) (*NetUNIX, error) {
+	// This file could be quite large and a streaming read is desirable versus
+	// reading the entire contents at once.
+	f, err := os.Open(file)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	defer f.Close()
 	defer f.Close()
-	return NewNetUnixByReader(f)
+
+	return parseNetUNIX(f)
 }
 }
 
 
-// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
-// It might returns an error with partial parsed data, if an error occur after some data parsed.
-func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
-	nu := &NetUnix{
-		Rows: make([]*NetUnixLine, 0, 32),
-	}
-	scanner := bufio.NewScanner(reader)
-	// Omit the header line.
-	scanner.Scan()
-	header := scanner.Text()
+// parseNetUNIX creates a NetUnix structure from the incoming stream.
+func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
+	// Begin scanning by checking for the existence of Inode.
+	s := bufio.NewScanner(r)
+	s.Scan()
+
 	// From the man page of proc(5), it does not contain an Inode field,
 	// From the man page of proc(5), it does not contain an Inode field,
-	// but in actually it exists.
-	// This code works for both cases.
-	hasInode := strings.Contains(header, "Inode")
+	// but in actually it exists. This code works for both cases.
+	hasInode := strings.Contains(s.Text(), "Inode")
 
 
-	minFieldsCnt := netUnixStaticFieldsCnt
+	// Expect a minimum number of fields, but Inode and Path are optional:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+	minFields := 6
 	if hasInode {
 	if hasInode {
-		minFieldsCnt++
+		minFields++
 	}
 	}
-	for scanner.Scan() {
-		line := scanner.Text()
-		item, err := nu.parseLine(line, hasInode, minFieldsCnt)
+
+	var nu NetUNIX
+	for s.Scan() {
+		line := s.Text()
+		item, err := nu.parseLine(line, hasInode, minFields)
 		if err != nil {
 		if err != nil {
-			return nu, err
+			return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
 		}
 		}
+
 		nu.Rows = append(nu.Rows, item)
 		nu.Rows = append(nu.Rows, item)
 	}
 	}
 
 
-	return nu, scanner.Err()
+	if err := s.Err(); err != nil {
+		return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
+	}
+
+	return &nu, nil
 }
 }
 
 
-func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
 	fields := strings.Fields(line)
 	fields := strings.Fields(line)
-	fieldsLen := len(fields)
-	if fieldsLen < minFieldsCnt {
-		return nil, fmt.Errorf(
-			"Parse Unix domain failed: expect at least %d fields but got %d",
-			minFieldsCnt, fieldsLen)
-	}
-	kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
-	if err != nil {
-		return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
+
+	l := len(fields)
+	if l < min {
+		return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
 	}
 	}
-	users, err := u.parseUsers(fields[netUnixRefCountIdx])
+
+	// Field offsets are as follows:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+
+	kernelPtr := strings.TrimSuffix(fields[0], ":")
+
+	users, err := u.parseUsers(fields[1])
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
+		return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
 	}
 	}
-	flags, err := u.parseFlags(fields[netUnixFlagsIdx])
+
+	flags, err := u.parseFlags(fields[3])
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
+		return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
 	}
 	}
-	typ, err := u.parseType(fields[netUnixTypeIdx])
+
+	typ, err := u.parseType(fields[4])
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
+		return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
 	}
 	}
-	state, err := u.parseState(fields[netUnixStateIdx])
+
+	state, err := u.parseState(fields[5])
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
+		return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
 	}
 	}
+
 	var inode uint64
 	var inode uint64
 	if hasInode {
 	if hasInode {
-		inodeStr := fields[netUnixInodeIdx]
-		inode, err = u.parseInode(inodeStr)
+		inode, err = u.parseInode(fields[6])
 		if err != nil {
 		if err != nil {
-			return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
+			return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
 		}
 		}
 	}
 	}
 
 
-	nuLine := &NetUnixLine{
+	n := &NetUNIXLine{
 		KernelPtr: kernelPtr,
 		KernelPtr: kernelPtr,
 		RefCount:  users,
 		RefCount:  users,
 		Type:      typ,
 		Type:      typ,
@@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
 	}
 	}
 
 
 	// Path field is optional.
 	// Path field is optional.
-	if fieldsLen > minFieldsCnt {
-		pathIdx := netUnixInodeIdx + 1
+	if l > min {
+		// Path occurs at either index 6 or 7 depending on whether inode is
+		// already present.
+		pathIdx := 7
 		if !hasInode {
 		if !hasInode {
 			pathIdx--
 			pathIdx--
 		}
 		}
-		nuLine.Path = fields[pathIdx]
-	}
-
-	return nuLine, nil
-}
 
 
-func (u NetUnix) parseKernelPtr(str string) (string, error) {
-	if !strings.HasSuffix(str, ":") {
-		return "", errInvalidKernelPtrFmt
+		n.Path = fields[pathIdx]
 	}
 	}
-	return str[:len(str)-1], nil
+
+	return n, nil
 }
 }
 
 
-func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
-	return strconv.ParseUint(hexStr, 16, 32)
+func (u NetUNIX) parseUsers(s string) (uint64, error) {
+	return strconv.ParseUint(s, 16, 32)
 }
 }
 
 
-func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
-	typ, err := strconv.ParseUint(hexStr, 16, 16)
+func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
+	typ, err := strconv.ParseUint(s, 16, 16)
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	return NetUnixType(typ), nil
+
+	return NetUNIXType(typ), nil
 }
 }
 
 
-func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
-	flags, err := strconv.ParseUint(hexStr, 16, 32)
+func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
+	flags, err := strconv.ParseUint(s, 16, 32)
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	return NetUnixFlags(flags), nil
+
+	return NetUNIXFlags(flags), nil
 }
 }
 
 
-func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
-	st, err := strconv.ParseInt(hexStr, 16, 8)
+func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
+	st, err := strconv.ParseInt(s, 16, 8)
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	return NetUnixState(st), nil
+
+	return NetUNIXState(st), nil
 }
 }
 
 
-func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
-	return strconv.ParseUint(inodeStr, 10, 64)
+func (u NetUNIX) parseInode(s string) (uint64, error) {
+	return strconv.ParseUint(s, 10, 64)
 }
 }
 
 
-func (t NetUnixType) String() string {
+func (t NetUNIXType) String() string {
 	switch t {
 	switch t {
 	case netUnixTypeStream:
 	case netUnixTypeStream:
 		return "stream"
 		return "stream"
@@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
 	return "unknown"
 	return "unknown"
 }
 }
 
 
-func (f NetUnixFlags) String() string {
+func (f NetUNIXFlags) String() string {
 	switch f {
 	switch f {
 	case netUnixFlagListen:
 	case netUnixFlagListen:
 		return "listen"
 		return "listen"
@@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
 	}
 	}
 }
 }
 
 
-func (s NetUnixState) String() string {
+func (s NetUNIXState) String() string {
 	switch s {
 	switch s {
 	case netUnixStateUnconnected:
 	case netUnixStateUnconnected:
 		return "unconnected"
 		return "unconnected"

+ 20 - 12
vendor/github.com/prometheus/procfs/proc_fdinfo.go

@@ -16,6 +16,7 @@ package procfs
 import (
 import (
 	"bufio"
 	"bufio"
 	"bytes"
 	"bytes"
+	"errors"
 	"regexp"
 	"regexp"
 
 
 	"github.com/prometheus/procfs/internal/util"
 	"github.com/prometheus/procfs/internal/util"
@@ -23,10 +24,11 @@ import (
 
 
 // Regexp variables
 // Regexp variables
 var (
 var (
-	rPos     = regexp.MustCompile(`^pos:\s+(\d+)$`)
-	rFlags   = regexp.MustCompile(`^flags:\s+(\d+)$`)
-	rMntID   = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
-	rInotify = regexp.MustCompile(`^inotify`)
+	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
+	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
+	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+	rInotify      = regexp.MustCompile(`^inotify`)
+	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
 )
 )
 
 
 // ProcFDInfo contains represents file descriptor information.
 // ProcFDInfo contains represents file descriptor information.
@@ -96,15 +98,21 @@ type InotifyInfo struct {
 
 
 // InotifyInfo constructor. Only available on kernel 3.8+.
 // InotifyInfo constructor. Only available on kernel 3.8+.
 func parseInotifyInfo(line string) (*InotifyInfo, error) {
 func parseInotifyInfo(line string) (*InotifyInfo, error) {
-	r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
-	m := r.FindStringSubmatch(line)
-	i := &InotifyInfo{
-		WD:   m[1],
-		Ino:  m[2],
-		Sdev: m[3],
-		Mask: m[4],
+	m := rInotifyParts.FindStringSubmatch(line)
+	if len(m) >= 4 {
+		var mask string
+		if len(m) == 5 {
+			mask = m[4]
+		}
+		i := &InotifyInfo{
+			WD:   m[1],
+			Ino:  m[2],
+			Sdev: m[3],
+			Mask: mask,
+		}
+		return i, nil
 	}
 	}
-	return i, nil
+	return nil, errors.New("invalid inode entry: " + line)
 }
 }
 
 
 // ProcFDInfos represents a list of ProcFDInfo structs.
 // ProcFDInfos represents a list of ProcFDInfo structs.

+ 208 - 0
vendor/github.com/prometheus/procfs/proc_maps.go

@@ -0,0 +1,208 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+type ProcMapPermissions struct {
+	// mapping has the [R]ead flag set
+	Read bool
+	// mapping has the [W]rite flag set
+	Write bool
+	// mapping has the [X]ecutable flag set
+	Execute bool
+	// mapping has the [S]hared flag set
+	Shared bool
+	// mapping is marked as [P]rivate (copy on write)
+	Private bool
+}
+
+// ProcMap contains the process memory-mappings of the process,
+// read from /proc/[pid]/maps
+type ProcMap struct {
+	// The start address of current mapping.
+	StartAddr uintptr
+	// The end address of the current mapping
+	EndAddr uintptr
+	// The permissions for this mapping
+	Perms *ProcMapPermissions
+	// The current offset into the file/fd (e.g., shared libs)
+	Offset int64
+	// Device owner of this mapping (major:minor) in Mkdev format.
+	Dev uint64
+	// The inode of the device above
+	Inode uint64
+	// The file or psuedofile (or empty==anonymous)
+	Pathname string
+}
+
+// parseDevice parses the device token of a line and converts it to a dev_t
+// (mkdev) like structure.
+func parseDevice(s string) (uint64, error) {
+	toks := strings.Split(s, ":")
+	if len(toks) < 2 {
+		return 0, fmt.Errorf("unexpected number of fields")
+	}
+
+	major, err := strconv.ParseUint(toks[0], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	minor, err := strconv.ParseUint(toks[1], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return unix.Mkdev(uint32(major), uint32(minor)), nil
+}
+
+// parseAddress just converts a hex-string to a uintptr
+func parseAddress(s string) (uintptr, error) {
+	a, err := strconv.ParseUint(s, 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return uintptr(a), nil
+}
+
+// parseAddresses parses the start-end address
+func parseAddresses(s string) (uintptr, uintptr, error) {
+	toks := strings.Split(s, "-")
+	if len(toks) < 2 {
+		return 0, 0, fmt.Errorf("invalid address")
+	}
+
+	saddr, err := parseAddress(toks[0])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	eaddr, err := parseAddress(toks[1])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	return saddr, eaddr, nil
+}
+
+// parsePermissions parses a token and returns any that are set.
+func parsePermissions(s string) (*ProcMapPermissions, error) {
+	if len(s) < 4 {
+		return nil, fmt.Errorf("invalid permissions token")
+	}
+
+	perms := ProcMapPermissions{}
+	for _, ch := range s {
+		switch ch {
+		case 'r':
+			perms.Read = true
+		case 'w':
+			perms.Write = true
+		case 'x':
+			perms.Execute = true
+		case 'p':
+			perms.Private = true
+		case 's':
+			perms.Shared = true
+		}
+	}
+
+	return &perms, nil
+}
+
+// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
+// buffer.
+func parseProcMap(text string) (*ProcMap, error) {
+	fields := strings.Fields(text)
+	if len(fields) < 5 {
+		return nil, fmt.Errorf("truncated procmap entry")
+	}
+
+	saddr, eaddr, err := parseAddresses(fields[0])
+	if err != nil {
+		return nil, err
+	}
+
+	perms, err := parsePermissions(fields[1])
+	if err != nil {
+		return nil, err
+	}
+
+	offset, err := strconv.ParseInt(fields[2], 16, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	device, err := parseDevice(fields[3])
+	if err != nil {
+		return nil, err
+	}
+
+	inode, err := strconv.ParseUint(fields[4], 10, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	pathname := ""
+
+	if len(fields) >= 5 {
+		pathname = strings.Join(fields[5:], " ")
+	}
+
+	return &ProcMap{
+		StartAddr: saddr,
+		EndAddr:   eaddr,
+		Perms:     perms,
+		Offset:    offset,
+		Dev:       device,
+		Inode:     inode,
+		Pathname:  pathname,
+	}, nil
+}
+
+// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
+// process.
+func (p Proc) ProcMaps() ([]*ProcMap, error) {
+	file, err := os.Open(p.path("maps"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	maps := []*ProcMap{}
+	scan := bufio.NewScanner(file)
+
+	for scan.Scan() {
+		m, err := parseProcMap(scan.Text())
+		if err != nil {
+			return nil, err
+		}
+
+		maps = append(maps, m)
+	}
+
+	return maps, nil
+}

+ 21 - 16
vendor/github.com/prometheus/procfs/proc_status.go

@@ -33,37 +33,37 @@ type ProcStatus struct {
 	TGID int
 	TGID int
 
 
 	// Peak virtual memory size.
 	// Peak virtual memory size.
-	VmPeak uint64
+	VmPeak uint64 // nolint:golint
 	// Virtual memory size.
 	// Virtual memory size.
-	VmSize uint64
+	VmSize uint64 // nolint:golint
 	// Locked memory size.
 	// Locked memory size.
-	VmLck uint64
+	VmLck uint64 // nolint:golint
 	// Pinned memory size.
 	// Pinned memory size.
-	VmPin uint64
+	VmPin uint64 // nolint:golint
 	// Peak resident set size.
 	// Peak resident set size.
-	VmHWM uint64
+	VmHWM uint64 // nolint:golint
 	// Resident set size (sum of RssAnnon RssFile and RssShmem).
 	// Resident set size (sum of RssAnnon RssFile and RssShmem).
-	VmRSS uint64
+	VmRSS uint64 // nolint:golint
 	// Size of resident anonymous memory.
 	// Size of resident anonymous memory.
-	RssAnon uint64
+	RssAnon uint64 // nolint:golint
 	// Size of resident file mappings.
 	// Size of resident file mappings.
-	RssFile uint64
+	RssFile uint64 // nolint:golint
 	// Size of resident shared memory.
 	// Size of resident shared memory.
-	RssShmem uint64
+	RssShmem uint64 // nolint:golint
 	// Size of data segments.
 	// Size of data segments.
-	VmData uint64
+	VmData uint64 // nolint:golint
 	// Size of stack segments.
 	// Size of stack segments.
-	VmStk uint64
+	VmStk uint64 // nolint:golint
 	// Size of text segments.
 	// Size of text segments.
-	VmExe uint64
+	VmExe uint64 // nolint:golint
 	// Shared library code size.
 	// Shared library code size.
-	VmLib uint64
+	VmLib uint64 // nolint:golint
 	// Page table entries size.
 	// Page table entries size.
-	VmPTE uint64
+	VmPTE uint64 // nolint:golint
 	// Size of second-level page tables.
 	// Size of second-level page tables.
-	VmPMD uint64
+	VmPMD uint64 // nolint:golint
 	// Swapped-out virtual memory size by anonymous private.
 	// Swapped-out virtual memory size by anonymous private.
-	VmSwap uint64
+	VmSwap uint64 // nolint:golint
 	// Size of hugetlb memory portions
 	// Size of hugetlb memory portions
 	HugetlbPages uint64
 	HugetlbPages uint64
 
 
@@ -71,6 +71,9 @@ type ProcStatus struct {
 	VoluntaryCtxtSwitches uint64
 	VoluntaryCtxtSwitches uint64
 	// Number of involuntary context switches.
 	// Number of involuntary context switches.
 	NonVoluntaryCtxtSwitches uint64
 	NonVoluntaryCtxtSwitches uint64
+
+	// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs))
+	UIDs [4]string
 }
 }
 
 
 // NewStatus returns the current status information of the process.
 // NewStatus returns the current status information of the process.
@@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
 		s.TGID = int(vUint)
 		s.TGID = int(vUint)
 	case "Name":
 	case "Name":
 		s.Name = vString
 		s.Name = vString
+	case "Uid":
+		copy(s.UIDs[:], strings.Split(vString, "\t"))
 	case "VmPeak":
 	case "VmPeak":
 		s.VmPeak = vUintBytes
 		s.VmPeak = vUintBytes
 	case "VmSize":
 	case "VmSize":

+ 89 - 0
vendor/github.com/prometheus/procfs/swaps.go

@@ -0,0 +1,89 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Swap represents an entry in /proc/swaps.
+type Swap struct {
+	Filename string
+	Type     string
+	Size     int
+	Used     int
+	Priority int
+}
+
+// Swaps returns a slice of all configured swap devices on the system.
+func (fs FS) Swaps() ([]*Swap, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
+	if err != nil {
+		return nil, err
+	}
+	return parseSwaps(data)
+}
+
+func parseSwaps(info []byte) ([]*Swap, error) {
+	swaps := []*Swap{}
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	scanner.Scan() // ignore header line
+	for scanner.Scan() {
+		swapString := scanner.Text()
+		parsedSwap, err := parseSwapString(swapString)
+		if err != nil {
+			return nil, err
+		}
+		swaps = append(swaps, parsedSwap)
+	}
+
+	err := scanner.Err()
+	return swaps, err
+}
+
+func parseSwapString(swapString string) (*Swap, error) {
+	var err error
+
+	swapFields := strings.Fields(swapString)
+	swapLength := len(swapFields)
+	if swapLength < 5 {
+		return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
+	}
+
+	swap := &Swap{
+		Filename: swapFields[0],
+		Type:     swapFields[1],
+	}
+
+	swap.Size, err = strconv.Atoi(swapFields[2])
+	if err != nil {
+		return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
+	}
+	swap.Used, err = strconv.Atoi(swapFields[3])
+	if err != nil {
+		return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
+	}
+	swap.Priority, err = strconv.Atoi(swapFields[4])
+	if err != nil {
+		return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
+	}
+
+	return swap, nil
+}

+ 9 - 6
vendor/go.etcd.io/bbolt/README.md

@@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
 a transaction for each one or use locking to ensure only one goroutine accesses
 a transaction for each one or use locking to ensure only one goroutine accesses
 a transaction at a time. Creating transaction from the `DB` is thread safe.
 a transaction at a time. Creating transaction from the `DB` is thread safe.
 
 
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
+Transactions should not depend on one another and generally shouldn't be opened
+simultaneously in the same goroutine. This can cause a deadlock as the read-write
+transaction needs to periodically re-map the data file but it cannot do so while
+any read-only transaction is open. Even a nested read-only transaction can cause
+a deadlock, as the child transaction can block the parent transaction from releasing
+its resources.
 
 
 #### Read-write transactions
 #### Read-write transactions
 
 
@@ -275,7 +276,7 @@ should be writable.
 ### Using buckets
 ### Using buckets
 
 
 Buckets are collections of key/value pairs within the database. All keys in a
 Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
 function:
 function:
 
 
 ```go
 ```go
@@ -923,6 +924,7 @@ Below is a list of public, open source projects that use Bolt:
 * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
 * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
 * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
 * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
 * [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
 * [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
 * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
 * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
 * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
 * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
 * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
 * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
@@ -935,6 +937,7 @@ Below is a list of public, open source projects that use Bolt:
 * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
 * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
 * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
 * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
 * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
 * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
 * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
 * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
 * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
 * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
 * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
 * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.

+ 0 - 3
vendor/go.etcd.io/bbolt/bolt_386.go

@@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
 
 
 // maxAllocSize is the size used when creating array pointers.
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 3
vendor/go.etcd.io/bbolt/bolt_amd64.go

@@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 
 // maxAllocSize is the size used when creating array pointers.
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor