Bläddra i källkod

Merge pull request #168 from thaJeztah/18.09_backport_bump_golang_1.11

[18.09 backport] Bump Golang to 1.11.11
Kirill Kolyshkin 6 år sedan
förälder
incheckning
241a7fc265
100 ändrade filer med 6963 tillägg och 4500 borttagningar
  1. 1 4
      Dockerfile
  2. 2 2
      Dockerfile.e2e
  3. 1 13
      Dockerfile.simple
  4. 1 1
      Dockerfile.windows
  5. 2 2
      api/types/strslice/strslice_test.go
  6. 1 1
      daemon/daemon.go
  7. 1 1
      daemon/logger/gelf/gelf_test.go
  8. 7 7
      daemon/logger/splunk/splunk_test.go
  9. 1 1
      hack/dockerfile/install/vndr.installer
  10. 1 1
      hack/make.ps1
  11. 1 5
      hack/validate/vendor
  12. 12 12
      integration/build/build_test.go
  13. 8 8
      libcontainerd/client_local_windows.go
  14. 2 2
      migrate/v1/migratev1_test.go
  15. 12 12
      opts/hosts_test.go
  16. 1 1
      pkg/authorization/api_test.go
  17. 1 1
      pkg/authorization/authz_unix_test.go
  18. 5 5
      reference/store_test.go
  19. 2 2
      registry/service_v2.go
  20. 8 8
      runconfig/hostconfig_test.go
  21. 8 14
      vendor.conf
  22. 0 27
      vendor/archive/tar/LICENSE
  23. 0 27
      vendor/archive/tar/README.md
  24. 0 720
      vendor/archive/tar/common.go
  25. 0 303
      vendor/archive/tar/format.go
  26. 0 855
      vendor/archive/tar/reader.go
  27. 0 20
      vendor/archive/tar/stat_actime1.go
  28. 0 20
      vendor/archive/tar/stat_actime2.go
  29. 0 76
      vendor/archive/tar/stat_unix.go
  30. 0 326
      vendor/archive/tar/strconv.go
  31. 0 644
      vendor/archive/tar/writer.go
  32. 29 5
      vendor/github.com/beorn7/perks/quantile/stream.go
  33. 33 7
      vendor/github.com/coreos/etcd/README.md
  34. 1 1
      vendor/github.com/coreos/etcd/client/README.md
  35. 1 2
      vendor/github.com/coreos/etcd/client/auth_role.go
  36. 1 2
      vendor/github.com/coreos/etcd/client/auth_user.go
  37. 20 13
      vendor/github.com/coreos/etcd/client/client.go
  38. 2 2
      vendor/github.com/coreos/etcd/client/doc.go
  39. 4243 299
      vendor/github.com/coreos/etcd/client/keys.generated.go
  40. 2 3
      vendor/github.com/coreos/etcd/client/keys.go
  41. 2 3
      vendor/github.com/coreos/etcd/client/members.go
  42. 1 1
      vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
  43. 22 0
      vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
  44. 2 1
      vendor/github.com/coreos/etcd/pkg/srv/srv.go
  45. 2 2
      vendor/github.com/coreos/etcd/pkg/types/set.go
  46. 6 6
      vendor/github.com/coreos/etcd/raft/README.md
  47. 4 2
      vendor/github.com/coreos/etcd/raft/node.go
  48. 6 1
      vendor/github.com/coreos/etcd/raft/progress.go
  49. 212 63
      vendor/github.com/coreos/etcd/raft/raft.go
  50. 193 89
      vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
  51. 6 4
      vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
  52. 3 1
      vendor/github.com/coreos/etcd/raft/rawnode.go
  53. 1 1
      vendor/github.com/coreos/etcd/raft/read_only.go
  54. 17 5
      vendor/github.com/coreos/etcd/raft/status.go
  55. 3 20
      vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
  56. 1 1
      vendor/github.com/coreos/etcd/version/version.go
  57. 6 3
      vendor/github.com/coreos/etcd/wal/decoder.go
  58. 1 1
      vendor/github.com/coreos/etcd/wal/encoder.go
  59. 1 1
      vendor/github.com/coreos/etcd/wal/file_pipeline.go
  60. 48 1
      vendor/github.com/coreos/etcd/wal/wal.go
  61. 0 44
      vendor/github.com/coreos/etcd/wal/wal_unix.go
  62. 0 41
      vendor/github.com/coreos/etcd/wal/wal_windows.go
  63. 3 20
      vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
  64. 0 5
      vendor/github.com/prometheus/client_golang/NOTICE
  65. 1 53
      vendor/github.com/prometheus/client_golang/prometheus/README.md
  66. 26 26
      vendor/github.com/prometheus/client_golang/prometheus/collector.go
  67. 14 15
      vendor/github.com/prometheus/client_golang/prometheus/counter.go
  68. 13 0
      vendor/github.com/prometheus/client_golang/prometheus/desc.go
  69. 131 61
      vendor/github.com/prometheus/client_golang/prometheus/doc.go
  70. 16 16
      vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
  71. 4 8
      vendor/github.com/prometheus/client_golang/prometheus/gauge.go
  72. 1 1
      vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
  73. 10 14
      vendor/github.com/prometheus/client_golang/prometheus/histogram.go
  74. 112 3
      vendor/github.com/prometheus/client_golang/prometheus/http.go
  75. 17 17
      vendor/github.com/prometheus/client_golang/prometheus/metric.go
  76. 2 2
      vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
  77. 0 65
      vendor/github.com/prometheus/client_golang/prometheus/push.go
  78. 517 313
      vendor/github.com/prometheus/client_golang/prometheus/registry.go
  79. 12 16
      vendor/github.com/prometheus/client_golang/prometheus/summary.go
  80. 4 8
      vendor/github.com/prometheus/client_golang/prometheus/untyped.go
  81. 4 4
      vendor/github.com/prometheus/client_golang/prometheus/value.go
  82. 199 44
      vendor/github.com/prometheus/client_golang/prometheus/vec.go
  83. 2 2
      vendor/github.com/prometheus/common/README.md
  84. 32 15
      vendor/github.com/prometheus/common/expfmt/decode.go
  85. 5 7
      vendor/github.com/prometheus/common/expfmt/expfmt.go
  86. 5 2
      vendor/github.com/prometheus/common/expfmt/text_create.go
  87. 7 3
      vendor/github.com/prometheus/common/expfmt/text_parse.go
  88. 8 4
      vendor/github.com/prometheus/common/model/labels.go
  89. 1 1
      vendor/github.com/prometheus/common/model/labelset.go
  90. 8 3
      vendor/github.com/prometheus/common/model/metric.go
  91. 2 2
      vendor/github.com/prometheus/common/model/silence.go
  92. 16 1
      vendor/github.com/prometheus/common/model/time.go
  93. 19 6
      vendor/github.com/prometheus/common/model/value.go
  94. 1 0
      vendor/github.com/prometheus/procfs/README.md
  95. 95 0
      vendor/github.com/prometheus/procfs/buddyinfo.go
  96. 49 0
      vendor/github.com/prometheus/procfs/fs.go
  97. 46 0
      vendor/github.com/prometheus/procfs/internal/util/parse.go
  98. 52 17
      vendor/github.com/prometheus/procfs/ipvs.go
  99. 13 0
      vendor/github.com/prometheus/procfs/mdstat.go
  100. 569 0
      vendor/github.com/prometheus/procfs/mountstats.go

+ 1 - 4
Dockerfile

@@ -24,10 +24,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM golang:1.10.8 AS base
-# FIXME(vdemeester) this is kept for other script depending on it to not fail right away
-# Remove this once the other scripts uses something else to detect the version
-ENV GO_VERSION 1.10.8
+FROM golang:1.11.11 AS base
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
 RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list

+ 2 - 2
Dockerfile.e2e

@@ -1,5 +1,5 @@
 ## Step 1: Build tests
-FROM golang:1.10.8-alpine3.7 as builder
+FROM golang:1.11.11-alpine3.9 as builder
 
 RUN apk add --update \
     bash \
@@ -40,7 +40,7 @@ RUN hack/make.sh build-integration-test-binary
 RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
 
 ## Step 2: Generate testing image
-FROM alpine:3.7 as runner
+FROM alpine:3.9 as runner
 
 # GNU tar is used for generating the emptyfs image
 RUN apk add --update \

+ 1 - 13
Dockerfile.simple

@@ -5,7 +5,7 @@
 
 # This represents the bare minimum required to build and test Docker.
 
-FROM debian:stretch
+FROM golang:1.11.11-stretch
 
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
@@ -37,18 +37,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
 		vim-common \
 	&& rm -rf /var/lib/apt/lists/*
 
-# Install Go
-# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
-#            will need updating, to avoid errors. Ping #docker-maintainers on IRC
-#            with a heads-up.
-# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.10.8
-RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
-	| tar -xzC /usr/local
-ENV PATH /go/bin:/usr/local/go/bin:$PATH
-ENV GOPATH /go
-ENV CGO_LDFLAGS -L/lib
-
 # Install runc, containerd, tini and docker-proxy
 # Please edit hack/dockerfile/install/<name>.installer to update them.
 COPY hack/dockerfile/install hack/dockerfile/install

+ 1 - 1
Dockerfile.windows

@@ -161,7 +161,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
 # Environment variable notes:
 #  - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
 #  - FROM_DOCKERFILE is used for detection of building within a container.
-ENV GO_VERSION=1.10.8 `
+ENV GO_VERSION=1.11.11 `
     GIT_VERSION=2.11.1 `
     GOPATH=C:\go `
     FROM_DOCKERFILE=1

+ 2 - 2
api/types/strslice/strslice_test.go

@@ -29,8 +29,8 @@ func TestStrSliceMarshalJSON(t *testing.T) {
 
 func TestStrSliceUnmarshalJSON(t *testing.T) {
 	parts := map[string][]string{
-		"":   {"default", "values"},
-		"[]": {},
+		"":                        {"default", "values"},
+		"[]":                      {},
 		`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
 	}
 	for json, expectedParts := range parts {

+ 1 - 1
daemon/daemon.go

@@ -848,7 +848,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 	for operatingSystem, gd := range d.graphDrivers {
 		layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
-			Root: config.Root,
+			Root:                      config.Root,
 			MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
 			GraphDriver:               gd,
 			GraphDriverOptions:        config.GraphOptions,

+ 1 - 1
daemon/logger/gelf/gelf_test.go

@@ -156,7 +156,7 @@ func TestNewGELFTCPWriter(t *testing.T) {
 			"gelf-address":             url,
 			"gelf-tcp-max-reconnect":   "0",
 			"gelf-tcp-reconnect-delay": "0",
-			"tag": "{{.ID}}",
+			"tag":                      "{{.ID}}",
 		},
 		ContainerID: "12345678901234567890",
 	}

+ 7 - 7
daemon/logger/splunk/splunk_test.go

@@ -30,10 +30,10 @@ func TestValidateLogOpt(t *testing.T) {
 		splunkVerifyConnectionKey:     "true",
 		splunkGzipCompressionKey:      "true",
 		splunkGzipCompressionLevelKey: "1",
-		envKey:      "a",
-		envRegexKey: "^foo",
-		labelsKey:   "b",
-		tagKey:      "c",
+		envKey:                        "a",
+		envRegexKey:                   "^foo",
+		labelsKey:                     "b",
+		tagKey:                        "c",
 	})
 	if err != nil {
 		t.Fatal(err)
@@ -251,9 +251,9 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) {
 			splunkIndexKey:           "myindex",
 			splunkFormatKey:          splunkFormatInline,
 			splunkGzipCompressionKey: "true",
-			tagKey:      "{{.ImageName}}/{{.Name}}",
-			labelsKey:   "a",
-			envRegexKey: "^foo",
+			tagKey:                   "{{.ImageName}}/{{.Name}}",
+			labelsKey:                "a",
+			envRegexKey:              "^foo",
 		},
 		ContainerID:        "containeriid",
 		ContainerName:      "/container_name",

+ 1 - 1
hack/dockerfile/install/vndr.installer

@@ -1,6 +1,6 @@
 #!/bin/sh
 
-VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
+VNDR_COMMIT=81cb8916aad3c8d06193f008dba3e16f82851f52
 
 install_vndr() {
 	echo "Install vndr version $VNDR_COMMIT"

+ 1 - 1
hack/make.ps1

@@ -134,7 +134,7 @@ Function Check-InContainer() {
 # outside of a container where it may be out of date with master.
 Function Verify-GoVersion() {
     Try {
-        $goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^FROM golang:").ToString().Split(" ")[1].SubString(7)
+        $goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^FROM golang:").ToString().Split(" ")[1].SubString(7) -replace '\.0$',''
         $goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2)
     }
     Catch [Exception] {

+ 1 - 5
hack/validate/vendor

@@ -9,11 +9,7 @@ validate_vendor_diff(){
 	unset IFS
 
 	if [ ${#files[@]} -gt 0 ]; then
-		# Remove vendor/ first so  that anything not included in vendor.conf will
-		# cause the validation to fail. archive/tar is a special case, see vendor.conf
-		# for details.
-		ls -d vendor/* | grep -v vendor/archive | xargs rm -rf
-		# run vndr to recreate vendor/
+		# recreate vendor/
 		vndr
 		# check if any files have changed
 		diffs="$(git status --porcelain -- vendor 2>/dev/null)"

+ 12 - 12
integration/build/build_test.go

@@ -38,8 +38,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 0`,
 			numberOfIntermediateContainers: 2,
-			rm:      false,
-			forceRm: false,
+			rm:                             false,
+			forceRm:                        false,
 		},
 		{
 			name: "successful build with remove",
@@ -47,8 +47,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 0`,
 			numberOfIntermediateContainers: 0,
-			rm:      true,
-			forceRm: false,
+			rm:                             true,
+			forceRm:                        false,
 		},
 		{
 			name: "successful build with remove and force remove",
@@ -56,8 +56,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 0`,
 			numberOfIntermediateContainers: 0,
-			rm:      true,
-			forceRm: true,
+			rm:                             true,
+			forceRm:                        true,
 		},
 		{
 			name: "failed build with no removal",
@@ -65,8 +65,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 1`,
 			numberOfIntermediateContainers: 2,
-			rm:      false,
-			forceRm: false,
+			rm:                             false,
+			forceRm:                        false,
 		},
 		{
 			name: "failed build with remove",
@@ -74,8 +74,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 1`,
 			numberOfIntermediateContainers: 1,
-			rm:      true,
-			forceRm: false,
+			rm:                             true,
+			forceRm:                        false,
 		},
 		{
 			name: "failed build with remove and force remove",
@@ -83,8 +83,8 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
 			RUN exit 0
 			RUN exit 1`,
 			numberOfIntermediateContainers: 0,
-			rm:      true,
-			forceRm: true,
+			rm:                             true,
+			forceRm:                        true,
 		},
 	}
 

+ 8 - 8
libcontainerd/client_local_windows.go

@@ -168,9 +168,9 @@ func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeO
 func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
 	logger := c.logger.WithField("container", id)
 	configuration := &hcsshim.ContainerConfig{
-		SystemType: "Container",
-		Name:       id,
-		Owner:      defaultOwner,
+		SystemType:              "Container",
+		Name:                    id,
+		Owner:                   defaultOwner,
 		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
 		HostName:                spec.Hostname,
 		HvPartition:             false,
@@ -377,11 +377,11 @@ func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interfa
 	}
 
 	configuration := &hcsshim.ContainerConfig{
-		HvPartition:   true,
-		Name:          id,
-		SystemType:    "container",
-		ContainerType: "linux",
-		Owner:         defaultOwner,
+		HvPartition:                 true,
+		Name:                        id,
+		SystemType:                  "container",
+		ContainerType:               "linux",
+		Owner:                       defaultOwner,
 		TerminateOnLastHandleClosed: true,
 	}
 

+ 2 - 2
migrate/v1/migratev1_test.go

@@ -40,9 +40,9 @@ func TestMigrateRefs(t *testing.T) {
 	}
 
 	expected := map[string]string{
-		"docker.io/library/busybox:latest":                                                                  "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
+		"docker.io/library/busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
 		"docker.io/library/busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
-		"docker.io/library/registry:2":                                                                      "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
+		"docker.io/library/registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
 	}
 
 	if !reflect.DeepEqual(expected, ta.refs) {

+ 12 - 12
opts/hosts_test.go

@@ -69,18 +69,18 @@ func TestParseDockerDaemonHost(t *testing.T) {
 		"[::1]:5555/path":             "tcp://[::1]:5555/path",
 		"[0:0:0:0:0:0:0:1]:":          "tcp://[0:0:0:0:0:0:0:1]:2375",
 		"[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path",
-		":6666":                   fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost),
-		":6666/path":              fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost),
-		"tcp://":                  DefaultTCPHost,
-		"tcp://:7777":             fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost),
-		"tcp://:7777/path":        fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost),
-		"unix:///run/docker.sock": "unix:///run/docker.sock",
-		"unix://":                 "unix://" + DefaultUnixSocket,
-		"fd://":                   "fd://",
-		"fd://something":          "fd://something",
-		"localhost:":              "tcp://localhost:2375",
-		"localhost:5555":          "tcp://localhost:5555",
-		"localhost:5555/path":     "tcp://localhost:5555/path",
+		":6666":                       fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost),
+		":6666/path":                  fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost),
+		"tcp://":                      DefaultTCPHost,
+		"tcp://:7777":                 fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost),
+		"tcp://:7777/path":            fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost),
+		"unix:///run/docker.sock":     "unix:///run/docker.sock",
+		"unix://":                     "unix://" + DefaultUnixSocket,
+		"fd://":                       "fd://",
+		"fd://something":              "fd://something",
+		"localhost:":                  "tcp://localhost:2375",
+		"localhost:5555":              "tcp://localhost:5555",
+		"localhost:5555/path":         "tcp://localhost:5555/path",
 	}
 	for invalidAddr, expectedError := range invalids {
 		if addr, err := parseDaemonHost(invalidAddr); err == nil || err.Error() != expectedError {

+ 1 - 1
pkg/authorization/api_test.go

@@ -17,7 +17,7 @@ import (
 
 func TestPeerCertificateMarshalJSON(t *testing.T) {
 	template := &x509.Certificate{
-		IsCA: true,
+		IsCA:                  true,
 		BasicConstraintsValid: true,
 		SubjectKeyId:          []byte{1, 2, 3},
 		SerialNumber:          big.NewInt(1234),

+ 1 - 1
pkg/authorization/authz_unix_test.go

@@ -144,7 +144,7 @@ func TestDrainBody(t *testing.T) {
 		length             int // length is the message length send to drainBody
 		expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called
 	}{
-		{10, 10}, // Small message size
+		{10, 10},                           // Small message size
 		{maxBodySize - 1, maxBodySize - 1}, // Max message size
 		{maxBodySize * 2, 0},               // Large message size (skip copying body)
 

+ 5 - 5
reference/store_test.go

@@ -16,11 +16,11 @@ import (
 
 var (
 	saveLoadTestCases = map[string]digest.Digest{
-		"registry:5000/foobar:HEAD":                                                        "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6",
-		"registry:5000/foobar:alternate":                                                   "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793",
-		"registry:5000/foobar:latest":                                                      "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b",
-		"registry:5000/foobar:master":                                                      "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc",
-		"jess/hollywood:latest":                                                            "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe",
+		"registry:5000/foobar:HEAD":      "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6",
+		"registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793",
+		"registry:5000/foobar:latest":    "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b",
+		"registry:5000/foobar:master":    "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc",
+		"jess/hollywood:latest":          "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe",
 		"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c",
 		"busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c",
 	}

+ 2 - 2
registry/service_v2.go

@@ -57,7 +57,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp
 				Scheme: "https",
 				Host:   hostname,
 			},
-			Version: APIVersion2,
+			Version:                        APIVersion2,
 			AllowNondistributableArtifacts: ana,
 			TrimHostname:                   true,
 			TLSConfig:                      tlsConfig,
@@ -70,7 +70,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp
 				Scheme: "http",
 				Host:   hostname,
 			},
-			Version: APIVersion2,
+			Version:                        APIVersion2,
 			AllowNondistributableArtifacts: ana,
 			TrimHostname:                   true,
 			// used to check if supposed to be secure via InsecureSkipVerify

+ 8 - 8
runconfig/hostconfig_test.go

@@ -22,20 +22,20 @@ func TestNetworkModeTest(t *testing.T) {
 		"something:weird":          {true, false, false, false, false, false},
 		"bridge":                   {true, true, false, false, false, false},
 		DefaultDaemonNetworkMode(): {true, true, false, false, false, false},
-		"host":           {false, false, true, false, false, false},
-		"container:name": {false, false, false, true, false, false},
-		"none":           {true, false, false, false, true, false},
-		"default":        {true, false, false, false, false, true},
+		"host":                     {false, false, true, false, false, false},
+		"container:name":           {false, false, false, true, false, false},
+		"none":                     {true, false, false, false, true, false},
+		"default":                  {true, false, false, false, false, true},
 	}
 	networkModeNames := map[container.NetworkMode]string{
 		"":                         "",
 		"something:weird":          "something:weird",
 		"bridge":                   "bridge",
 		DefaultDaemonNetworkMode(): "bridge",
-		"host":           "host",
-		"container:name": "container",
-		"none":           "none",
-		"default":        "default",
+		"host":                     "host",
+		"container:name":           "container",
+		"none":                     "none",
+		"default":                  "default",
 	}
 	for networkMode, state := range networkModes {
 		if networkMode.IsPrivate() != state[0] {

+ 8 - 14
vendor.conf

@@ -55,9 +55,9 @@ github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-github.com/coreos/etcd v3.2.1
+github.com/coreos/etcd v3.3.9
 github.com/coreos/go-semver v0.2.0
-github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
+github.com/ugorji/go v1.1.1
 github.com/hashicorp/consul v0.5.2
 github.com/miekg/dns v1.0.7
 github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
@@ -140,13 +140,13 @@ golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
 github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
 github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
 github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3
-github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
+github.com/coreos/pkg v3
 github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
-github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
-github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
-github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
-github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/prometheus/client_golang v0.8.0
+github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceaf
+github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2
+github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801
+github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7
 github.com/matttproud/golang_protobuf_extensions v1.0.0
 github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d # v0.8.0
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
@@ -161,9 +161,3 @@ github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.
 github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
 
 github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
-
-
-# archive/tar (for Go 1.10, see https://github.com/golang/go/issues/24787)
-# mkdir -p ./vendor/archive
-# git clone -b go-1.10 --depth=1 git@github.com:kolyshkin/go-tar.git ./vendor/archive/tar
-# vndr # to clean up test files

+ 0 - 27
vendor/archive/tar/LICENSE

@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 27
vendor/archive/tar/README.md

@@ -1,27 +0,0 @@
-This is a fork of Go 1.10 `archive/tar` package from the official
-[repo](https://github.com/golang/go/tree/release-branch.go1.10/src/archive/tar),
-with a partial [revert](https://github.com/kolyshkin/go-tar/commit/d651d6e45972363e9bb62b8e9d876df440b31628)
-of upstream [commit 0564e304a6ea](https://github.com/golang/go/commit/0564e304a6ea394a42929060c588469dbd6f32af).
-It is suggested as a replacement to the original package included with Go 1.10
-in case you want to build a static Linux/glibc binary that works, and
-can't afford to use `CGO_ENABLED=0`.
-
-## Details
-
-Using Go 1.10 [archive/tar](https://golang.org/pkg/archive/tar/) from a static binary
-compiled with glibc on Linux can result in a panic upon calling
-[`tar.FileInfoHeader()`](https://golang.org/pkg/archive/tar/#FileInfoHeader).
-This is a major regression in Go 1.10, filed as
-[Go issue #24787](https://github.com/golang/go/issues/24787).
-
-The above issue is caused by an unfortunate combination of:
-1. glibc way of dynamic loading of nss libraries even for a static build;
-2. Go `os/user` package hard-coded reliance on libc to resolve user/group IDs to names (unless CGO is disabled).
-
-While glibc can probably not be fixed and is not considered a bug per se,
-the `os/user` issue is documented (see [Go issue #23265](https://github.com/golang/go/issues/23265))
-and already fixed by [Go commit 62f0127d81](https://github.com/golang/go/commit/62f0127d8104d8266d9a3fb5a87e2f09ec8b6f5b).
-The fix is expected to make its way to Go 1.11, and requires `osusergo` build tag
-to be used for a static build.
-
-This repository serves as a temporary workaround until the above fix is available.

+ 0 - 720
vendor/archive/tar/common.go

@@ -1,720 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-//
-// Tape archives (tar) are a file format for storing a sequence of files that
-// can be read and written in a streaming manner.
-// This package aims to cover most variations of the format,
-// including those produced by GNU and BSD tar tools.
-package tar
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"os"
-	"path"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
-// architectures. If a large value is encountered when decoding, the result
-// stored in Header will be the truncated version.
-
-var (
-	ErrHeader          = errors.New("archive/tar: invalid tar header")
-	ErrWriteTooLong    = errors.New("archive/tar: write too long")
-	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
-	ErrWriteAfterClose = errors.New("archive/tar: write after close")
-	errMissData        = errors.New("archive/tar: sparse file references non-existent data")
-	errUnrefData       = errors.New("archive/tar: sparse file contains unreferenced data")
-	errWriteHole       = errors.New("archive/tar: write non-NUL byte in sparse hole")
-)
-
-type headerError []string
-
-func (he headerError) Error() string {
-	const prefix = "archive/tar: cannot encode header"
-	var ss []string
-	for _, s := range he {
-		if s != "" {
-			ss = append(ss, s)
-		}
-	}
-	if len(ss) == 0 {
-		return prefix
-	}
-	return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
-}
-
-// Type flags for Header.Typeflag.
-const (
-	// Type '0' indicates a regular file.
-	TypeReg  = '0'
-	TypeRegA = '\x00' // For legacy support; use TypeReg instead
-
-	// Type '1' to '6' are header-only flags and may not have a data body.
-	TypeLink    = '1' // Hard link
-	TypeSymlink = '2' // Symbolic link
-	TypeChar    = '3' // Character device node
-	TypeBlock   = '4' // Block device node
-	TypeDir     = '5' // Directory
-	TypeFifo    = '6' // FIFO node
-
-	// Type '7' is reserved.
-	TypeCont = '7'
-
-	// Type 'x' is used by the PAX format to store key-value records that
-	// are only relevant to the next file.
-	// This package transparently handles these types.
-	TypeXHeader = 'x'
-
-	// Type 'g' is used by the PAX format to store key-value records that
-	// are relevant to all subsequent files.
-	// This package only supports parsing and composing such headers,
-	// but does not currently support persisting the global state across files.
-	TypeXGlobalHeader = 'g'
-
-	// Type 'S' indicates a sparse file in the GNU format.
-	TypeGNUSparse = 'S'
-
-	// Types 'L' and 'K' are used by the GNU format for a meta file
-	// used to store the path or link name for the next file.
-	// This package transparently handles these types.
-	TypeGNULongName = 'L'
-	TypeGNULongLink = 'K'
-)
-
-// Keywords for PAX extended header records.
-const (
-	paxNone     = "" // Indicates that no PAX key is suitable
-	paxPath     = "path"
-	paxLinkpath = "linkpath"
-	paxSize     = "size"
-	paxUid      = "uid"
-	paxGid      = "gid"
-	paxUname    = "uname"
-	paxGname    = "gname"
-	paxMtime    = "mtime"
-	paxAtime    = "atime"
-	paxCtime    = "ctime"   // Removed from later revision of PAX spec, but was valid
-	paxCharset  = "charset" // Currently unused
-	paxComment  = "comment" // Currently unused
-
-	paxSchilyXattr = "SCHILY.xattr."
-
-	// Keywords for GNU sparse files in a PAX extended header.
-	paxGNUSparse          = "GNU.sparse."
-	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
-	paxGNUSparseOffset    = "GNU.sparse.offset"
-	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
-	paxGNUSparseMap       = "GNU.sparse.map"
-	paxGNUSparseName      = "GNU.sparse.name"
-	paxGNUSparseMajor     = "GNU.sparse.major"
-	paxGNUSparseMinor     = "GNU.sparse.minor"
-	paxGNUSparseSize      = "GNU.sparse.size"
-	paxGNUSparseRealSize  = "GNU.sparse.realsize"
-)
-
-// basicKeys is a set of the PAX keys for which we have built-in support.
-// This does not contain "charset" or "comment", which are both PAX-specific,
-// so adding them as first-class features of Header is unlikely.
-// Users can use the PAXRecords field to set it themselves.
-var basicKeys = map[string]bool{
-	paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
-	paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
-}
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-//
-// For forward compatibility, users that retrieve a Header from Reader.Next,
-// mutate it in some ways, and then pass it back to Writer.WriteHeader
-// should do so by creating a new Header and copying the fields
-// that they are interested in preserving.
-type Header struct {
-	Typeflag byte // Type of header entry (should be TypeReg for most files)
-
-	Name     string // Name of file entry
-	Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
-
-	Size  int64  // Logical file size in bytes
-	Mode  int64  // Permission and mode bits
-	Uid   int    // User ID of owner
-	Gid   int    // Group ID of owner
-	Uname string // User name of owner
-	Gname string // Group name of owner
-
-	// If the Format is unspecified, then Writer.WriteHeader rounds ModTime
-	// to the nearest second and ignores the AccessTime and ChangeTime fields.
-	//
-	// To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
-	// To use sub-second resolution, specify the Format as PAX.
-	ModTime    time.Time // Modification time
-	AccessTime time.Time // Access time (requires either PAX or GNU support)
-	ChangeTime time.Time // Change time (requires either PAX or GNU support)
-
-	Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
-	Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
-
-	// Xattrs stores extended attributes as PAX records under the
-	// "SCHILY.xattr." namespace.
-	//
-	// The following are semantically equivalent:
-	//  h.Xattrs[key] = value
-	//  h.PAXRecords["SCHILY.xattr."+key] = value
-	//
-	// When Writer.WriteHeader is called, the contents of Xattrs will take
-	// precedence over those in PAXRecords.
-	//
-	// Deprecated: Use PAXRecords instead.
-	Xattrs map[string]string
-
-	// PAXRecords is a map of PAX extended header records.
-	//
-	// User-defined records should have keys of the following form:
-	//	VENDOR.keyword
-	// Where VENDOR is some namespace in all uppercase, and keyword may
-	// not contain the '=' character (e.g., "GOLANG.pkg.version").
-	// The key and value should be non-empty UTF-8 strings.
-	//
-	// When Writer.WriteHeader is called, PAX records derived from the
-	// the other fields in Header take precedence over PAXRecords.
-	PAXRecords map[string]string
-
-	// Format specifies the format of the tar header.
-	//
-	// This is set by Reader.Next as a best-effort guess at the format.
-	// Since the Reader liberally reads some non-compliant files,
-	// it is possible for this to be FormatUnknown.
-	//
-	// If the format is unspecified when Writer.WriteHeader is called,
-	// then it uses the first format (in the order of USTAR, PAX, GNU)
-	// capable of encoding this Header (see Format).
-	Format Format
-}
-
-// sparseEntry represents a Length-sized fragment at Offset in the file.
-type sparseEntry struct{ Offset, Length int64 }
-
-func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
-
-// A sparse file can be represented as either a sparseDatas or a sparseHoles.
-// As long as the total size is known, they are equivalent and one can be
-// converted to the other form and back. The various tar formats with sparse
-// file support represent sparse files in the sparseDatas form. That is, they
-// specify the fragments in the file that has data, and treat everything else as
-// having zero bytes. As such, the encoding and decoding logic in this package
-// deals with sparseDatas.
-//
-// However, the external API uses sparseHoles instead of sparseDatas because the
-// zero value of sparseHoles logically represents a normal file (i.e., there are
-// no holes in it). On the other hand, the zero value of sparseDatas implies
-// that the file has no data in it, which is rather odd.
-//
-// As an example, if the underlying raw file contains the 10-byte data:
-//	var compactFile = "abcdefgh"
-//
-// And the sparse map has the following entries:
-//	var spd sparseDatas = []sparseEntry{
-//		{Offset: 2,  Length: 5},  // Data fragment for 2..6
-//		{Offset: 18, Length: 3},  // Data fragment for 18..20
-//	}
-//	var sph sparseHoles = []sparseEntry{
-//		{Offset: 0,  Length: 2},  // Hole fragment for 0..1
-//		{Offset: 7,  Length: 11}, // Hole fragment for 7..17
-//		{Offset: 21, Length: 4},  // Hole fragment for 21..24
-//	}
-//
-// Then the content of the resulting sparse file with a Header.Size of 25 is:
-//	var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type (
-	sparseDatas []sparseEntry
-	sparseHoles []sparseEntry
-)
-
-// validateSparseEntries reports whether sp is a valid sparse map.
-// It does not matter whether sp represents data fragments or hole fragments.
-func validateSparseEntries(sp []sparseEntry, size int64) bool {
-	// Validate all sparse entries. These are the same checks as performed by
-	// the BSD tar utility.
-	if size < 0 {
-		return false
-	}
-	var pre sparseEntry
-	for _, cur := range sp {
-		switch {
-		case cur.Offset < 0 || cur.Length < 0:
-			return false // Negative values are never okay
-		case cur.Offset > math.MaxInt64-cur.Length:
-			return false // Integer overflow with large length
-		case cur.endOffset() > size:
-			return false // Region extends beyond the actual size
-		case pre.endOffset() > cur.Offset:
-			return false // Regions cannot overlap and must be in order
-		}
-		pre = cur
-	}
-	return true
-}
-
-// alignSparseEntries mutates src and returns dst where each fragment's
-// starting offset is aligned up to the nearest block edge, and each
-// ending offset is aligned down to the nearest block edge.
-//
-// Even though the Go tar Reader and the BSD tar utility can handle entries
-// with arbitrary offsets and lengths, the GNU tar utility can only handle
-// offsets and lengths that are multiples of blockSize.
-func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
-	dst := src[:0]
-	for _, s := range src {
-		pos, end := s.Offset, s.endOffset()
-		pos += blockPadding(+pos) // Round-up to nearest blockSize
-		if end != size {
-			end -= blockPadding(-end) // Round-down to nearest blockSize
-		}
-		if pos < end {
-			dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
-		}
-	}
-	return dst
-}
-
-// invertSparseEntries converts a sparse map from one form to the other.
-// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
-// The input must have been already validated.
-//
-// This function mutates src and returns a normalized map where:
-//	* adjacent fragments are coalesced together
-//	* only the last fragment may be empty
-//	* the endOffset of the last fragment is the total size
-func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
-	dst := src[:0]
-	var pre sparseEntry
-	for _, cur := range src {
-		if cur.Length == 0 {
-			continue // Skip empty fragments
-		}
-		pre.Length = cur.Offset - pre.Offset
-		if pre.Length > 0 {
-			dst = append(dst, pre) // Only add non-empty fragments
-		}
-		pre.Offset = cur.endOffset()
-	}
-	pre.Length = size - pre.Offset // Possibly the only empty fragment
-	return append(dst, pre)
-}
-
-// fileState tracks the number of logical (includes sparse holes) and physical
-// (actual in tar archive) bytes remaining for the current file.
-//
-// Invariant: LogicalRemaining >= PhysicalRemaining
-type fileState interface {
-	LogicalRemaining() int64
-	PhysicalRemaining() int64
-}
-
-// allowedFormats determines which formats can be used.
-// The value returned is the logical OR of multiple possible formats.
-// If the value is FormatUnknown, then the input Header cannot be encoded
-// and an error is returned explaining why.
-//
-// As a by-product of checking the fields, this function returns paxHdrs, which
-// contain all fields that could not be directly encoded.
-// A value receiver ensures that this method does not mutate the source Header.
-func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
-	format = FormatUSTAR | FormatPAX | FormatGNU
-	paxHdrs = make(map[string]string)
-
-	var whyNoUSTAR, whyNoPAX, whyNoGNU string
-	var preferPAX bool // Prefer PAX over USTAR
-	verifyString := func(s string, size int, name, paxKey string) {
-		// NUL-terminator is optional for path and linkpath.
-		// Technically, it is required for uname and gname,
-		// but neither GNU nor BSD tar checks for it.
-		tooLong := len(s) > size
-		allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
-		if hasNUL(s) || (tooLong && !allowLongGNU) {
-			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
-			format.mustNotBe(FormatGNU)
-		}
-		if !isASCII(s) || tooLong {
-			canSplitUSTAR := paxKey == paxPath
-			if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
-				whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
-				format.mustNotBe(FormatUSTAR)
-			}
-			if paxKey == paxNone {
-				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
-				format.mustNotBe(FormatPAX)
-			} else {
-				paxHdrs[paxKey] = s
-			}
-		}
-		if v, ok := h.PAXRecords[paxKey]; ok && v == s {
-			paxHdrs[paxKey] = v
-		}
-	}
-	verifyNumeric := func(n int64, size int, name, paxKey string) {
-		if !fitsInBase256(size, n) {
-			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
-			format.mustNotBe(FormatGNU)
-		}
-		if !fitsInOctal(size, n) {
-			whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
-			format.mustNotBe(FormatUSTAR)
-			if paxKey == paxNone {
-				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
-				format.mustNotBe(FormatPAX)
-			} else {
-				paxHdrs[paxKey] = strconv.FormatInt(n, 10)
-			}
-		}
-		if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
-			paxHdrs[paxKey] = v
-		}
-	}
-	verifyTime := func(ts time.Time, size int, name, paxKey string) {
-		if ts.IsZero() {
-			return // Always okay
-		}
-		if !fitsInBase256(size, ts.Unix()) {
-			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
-			format.mustNotBe(FormatGNU)
-		}
-		isMtime := paxKey == paxMtime
-		fitsOctal := fitsInOctal(size, ts.Unix())
-		if (isMtime && !fitsOctal) || !isMtime {
-			whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
-			format.mustNotBe(FormatUSTAR)
-		}
-		needsNano := ts.Nanosecond() != 0
-		if !isMtime || !fitsOctal || needsNano {
-			preferPAX = true // USTAR may truncate sub-second measurements
-			if paxKey == paxNone {
-				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
-				format.mustNotBe(FormatPAX)
-			} else {
-				paxHdrs[paxKey] = formatPAXTime(ts)
-			}
-		}
-		if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
-			paxHdrs[paxKey] = v
-		}
-	}
-
-	// Check basic fields.
-	var blk block
-	v7 := blk.V7()
-	ustar := blk.USTAR()
-	gnu := blk.GNU()
-	verifyString(h.Name, len(v7.Name()), "Name", paxPath)
-	verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
-	verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
-	verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
-	verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
-	verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
-	verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
-	verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
-	verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
-	verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
-	verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
-	verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
-	verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
-
-	// Check for header-only types.
-	var whyOnlyPAX, whyOnlyGNU string
-	switch h.Typeflag {
-	case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
-		// Exclude TypeLink and TypeSymlink, since they may reference directories.
-		if strings.HasSuffix(h.Name, "/") {
-			return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
-		}
-	case TypeXHeader, TypeGNULongName, TypeGNULongLink:
-		return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
-	case TypeXGlobalHeader:
-		h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
-		if !reflect.DeepEqual(h, h2) {
-			return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
-		}
-		whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
-		format.mayOnlyBe(FormatPAX)
-	}
-	if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
-		return FormatUnknown, nil, headerError{"negative size on header-only type"}
-	}
-
-	// Check PAX records.
-	if len(h.Xattrs) > 0 {
-		for k, v := range h.Xattrs {
-			paxHdrs[paxSchilyXattr+k] = v
-		}
-		whyOnlyPAX = "only PAX supports Xattrs"
-		format.mayOnlyBe(FormatPAX)
-	}
-	if len(h.PAXRecords) > 0 {
-		for k, v := range h.PAXRecords {
-			switch _, exists := paxHdrs[k]; {
-			case exists:
-				continue // Do not overwrite existing records
-			case h.Typeflag == TypeXGlobalHeader:
-				paxHdrs[k] = v // Copy all records
-			case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
-				paxHdrs[k] = v // Ignore local records that may conflict
-			}
-		}
-		whyOnlyPAX = "only PAX supports PAXRecords"
-		format.mayOnlyBe(FormatPAX)
-	}
-	for k, v := range paxHdrs {
-		if !validPAXRecord(k, v) {
-			return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
-		}
-	}
-
-	// TODO(dsnet): Re-enable this when adding sparse support.
-	// See https://golang.org/issue/22735
-	/*
-		// Check sparse files.
-		if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
-			if isHeaderOnlyType(h.Typeflag) {
-				return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
-			}
-			if !validateSparseEntries(h.SparseHoles, h.Size) {
-				return FormatUnknown, nil, headerError{"invalid sparse holes"}
-			}
-			if h.Typeflag == TypeGNUSparse {
-				whyOnlyGNU = "only GNU supports TypeGNUSparse"
-				format.mayOnlyBe(FormatGNU)
-			} else {
-				whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
-				format.mustNotBe(FormatGNU)
-			}
-			whyNoUSTAR = "USTAR does not support sparse files"
-			format.mustNotBe(FormatUSTAR)
-		}
-	*/
-
-	// Check desired format.
-	if wantFormat := h.Format; wantFormat != FormatUnknown {
-		if wantFormat.has(FormatPAX) && !preferPAX {
-			wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
-		}
-		format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
-	}
-	if format == FormatUnknown {
-		switch h.Format {
-		case FormatUSTAR:
-			err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
-		case FormatPAX:
-			err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
-		case FormatGNU:
-			err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
-		default:
-			err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
-		}
-	}
-	return format, paxHdrs, err
-}
-
-// FileInfo returns an os.FileInfo for the Header.
-func (h *Header) FileInfo() os.FileInfo {
-	return headerFileInfo{h}
-}
-
-// headerFileInfo implements os.FileInfo.
-type headerFileInfo struct {
-	h *Header
-}
-
-func (fi headerFileInfo) Size() int64        { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool        { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{}   { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
-	if fi.IsDir() {
-		return path.Base(path.Clean(fi.h.Name))
-	}
-	return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode os.FileMode) {
-	// Set file permission bits.
-	mode = os.FileMode(fi.h.Mode).Perm()
-
-	// Set setuid, setgid and sticky bits.
-	if fi.h.Mode&c_ISUID != 0 {
-		mode |= os.ModeSetuid
-	}
-	if fi.h.Mode&c_ISGID != 0 {
-		mode |= os.ModeSetgid
-	}
-	if fi.h.Mode&c_ISVTX != 0 {
-		mode |= os.ModeSticky
-	}
-
-	// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
-	switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
-	case c_ISDIR:
-		mode |= os.ModeDir
-	case c_ISFIFO:
-		mode |= os.ModeNamedPipe
-	case c_ISLNK:
-		mode |= os.ModeSymlink
-	case c_ISBLK:
-		mode |= os.ModeDevice
-	case c_ISCHR:
-		mode |= os.ModeDevice
-		mode |= os.ModeCharDevice
-	case c_ISSOCK:
-		mode |= os.ModeSocket
-	}
-
-	switch fi.h.Typeflag {
-	case TypeSymlink:
-		mode |= os.ModeSymlink
-	case TypeChar:
-		mode |= os.ModeDevice
-		mode |= os.ModeCharDevice
-	case TypeBlock:
-		mode |= os.ModeDevice
-	case TypeDir:
-		mode |= os.ModeDir
-	case TypeFifo:
-		mode |= os.ModeNamedPipe
-	}
-
-	return mode
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, h *Header) error
-
-const (
-	// Mode constants from the USTAR spec:
-	// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
-	c_ISUID = 04000 // Set uid
-	c_ISGID = 02000 // Set gid
-	c_ISVTX = 01000 // Save text (sticky bit)
-
-	// Common Unix mode constants; these are not defined in any common tar standard.
-	// Header.FileInfo understands these, but FileInfoHeader will never produce these.
-	c_ISDIR  = 040000  // Directory
-	c_ISFIFO = 010000  // FIFO
-	c_ISREG  = 0100000 // Regular file
-	c_ISLNK  = 0120000 // Symbolic link
-	c_ISBLK  = 060000  // Block special file
-	c_ISCHR  = 020000  // Character special file
-	c_ISSOCK = 0140000 // Socket
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-//
-// Since os.FileInfo's Name method only returns the base name of
-// the file it describes, it may be necessary to modify Header.Name
-// to provide the full path name of the file.
-func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
-	if fi == nil {
-		return nil, errors.New("archive/tar: FileInfo is nil")
-	}
-	fm := fi.Mode()
-	h := &Header{
-		Name:    fi.Name(),
-		ModTime: fi.ModTime(),
-		Mode:    int64(fm.Perm()), // or'd with c_IS* constants later
-	}
-	switch {
-	case fm.IsRegular():
-		h.Typeflag = TypeReg
-		h.Size = fi.Size()
-	case fi.IsDir():
-		h.Typeflag = TypeDir
-		h.Name += "/"
-	case fm&os.ModeSymlink != 0:
-		h.Typeflag = TypeSymlink
-		h.Linkname = link
-	case fm&os.ModeDevice != 0:
-		if fm&os.ModeCharDevice != 0 {
-			h.Typeflag = TypeChar
-		} else {
-			h.Typeflag = TypeBlock
-		}
-	case fm&os.ModeNamedPipe != 0:
-		h.Typeflag = TypeFifo
-	case fm&os.ModeSocket != 0:
-		return nil, fmt.Errorf("archive/tar: sockets not supported")
-	default:
-		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
-	}
-	if fm&os.ModeSetuid != 0 {
-		h.Mode |= c_ISUID
-	}
-	if fm&os.ModeSetgid != 0 {
-		h.Mode |= c_ISGID
-	}
-	if fm&os.ModeSticky != 0 {
-		h.Mode |= c_ISVTX
-	}
-	// If possible, populate additional fields from OS-specific
-	// FileInfo fields.
-	if sys, ok := fi.Sys().(*Header); ok {
-		// This FileInfo came from a Header (not the OS). Use the
-		// original Header to populate all remaining fields.
-		h.Uid = sys.Uid
-		h.Gid = sys.Gid
-		h.Uname = sys.Uname
-		h.Gname = sys.Gname
-		h.AccessTime = sys.AccessTime
-		h.ChangeTime = sys.ChangeTime
-		if sys.Xattrs != nil {
-			h.Xattrs = make(map[string]string)
-			for k, v := range sys.Xattrs {
-				h.Xattrs[k] = v
-			}
-		}
-		if sys.Typeflag == TypeLink {
-			// hard link
-			h.Typeflag = TypeLink
-			h.Size = 0
-			h.Linkname = sys.Linkname
-		}
-		if sys.PAXRecords != nil {
-			h.PAXRecords = make(map[string]string)
-			for k, v := range sys.PAXRecords {
-				h.PAXRecords[k] = v
-			}
-		}
-	}
-	if sysStat != nil {
-		return h, sysStat(fi, h)
-	}
-	return h, nil
-}
-
-// isHeaderOnlyType checks if the given type flag is of the type that has no
-// data section even if a size is specified.
-func isHeaderOnlyType(flag byte) bool {
-	switch flag {
-	case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
-		return true
-	default:
-		return false
-	}
-}
-
-func min(a, b int64) int64 {
-	if a < b {
-		return a
-	}
-	return b
-}

+ 0 - 303
vendor/archive/tar/format.go

@@ -1,303 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import "strings"
-
-// Format represents the tar archive format.
-//
-// The original tar format was introduced in Unix V7.
-// Since then, there have been multiple competing formats attempting to
-// standardize or extend the V7 format to overcome its limitations.
-// The most common formats are the USTAR, PAX, and GNU formats,
-// each with their own advantages and limitations.
-//
-// The following table captures the capabilities of each format:
-//
-//	                  |  USTAR |       PAX |       GNU
-//	------------------+--------+-----------+----------
-//	Name              |   256B | unlimited | unlimited
-//	Linkname          |   100B | unlimited | unlimited
-//	Size              | uint33 | unlimited |    uint89
-//	Mode              | uint21 |    uint21 |    uint57
-//	Uid/Gid           | uint21 | unlimited |    uint57
-//	Uname/Gname       |    32B | unlimited |       32B
-//	ModTime           | uint33 | unlimited |     int89
-//	AccessTime        |    n/a | unlimited |     int89
-//	ChangeTime        |    n/a | unlimited |     int89
-//	Devmajor/Devminor | uint21 |    uint21 |    uint57
-//	------------------+--------+-----------+----------
-//	string encoding   |  ASCII |     UTF-8 |    binary
-//	sub-second times  |     no |       yes |        no
-//	sparse files      |     no |       yes |       yes
-//
-// The table's upper portion shows the Header fields, where each format reports
-// the maximum number of bytes allowed for each string field and
-// the integer type used to store each numeric field
-// (where timestamps are stored as the number of seconds since the Unix epoch).
-//
-// The table's lower portion shows specialized features of each format,
-// such as supported string encodings, support for sub-second timestamps,
-// or support for sparse files.
-//
-// The Writer currently provides no support for sparse files.
-type Format int
-
-// Constants to identify various tar formats.
-const (
-	// Deliberately hide the meaning of constants from public API.
-	_ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
-
-	// FormatUnknown indicates that the format is unknown.
-	FormatUnknown
-
-	// The format of the original Unix V7 tar tool prior to standardization.
-	formatV7
-
-	// FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
-	//
-	// While this format is compatible with most tar readers,
-	// the format has several limitations making it unsuitable for some usages.
-	// Most notably, it cannot support sparse files, files larger than 8GiB,
-	// filenames larger than 256 characters, and non-ASCII filenames.
-	//
-	// Reference:
-	//	http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
-	FormatUSTAR
-
-	// FormatPAX represents the PAX header format defined in POSIX.1-2001.
-	//
-	// PAX extends USTAR by writing a special file with Typeflag TypeXHeader
-	// preceding the original header. This file contains a set of key-value
-	// records, which are used to overcome USTAR's shortcomings, in addition to
-	// providing the ability to have sub-second resolution for timestamps.
-	//
-	// Some newer formats add their own extensions to PAX by defining their
-	// own keys and assigning certain semantic meaning to the associated values.
-	// For example, sparse file support in PAX is implemented using keys
-	// defined by the GNU manual (e.g., "GNU.sparse.map").
-	//
-	// Reference:
-	//	http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
-	FormatPAX
-
-	// FormatGNU represents the GNU header format.
-	//
-	// The GNU header format is older than the USTAR and PAX standards and
-	// is not compatible with them. The GNU format supports
-	// arbitrary file sizes, filenames of arbitrary encoding and length,
-	// sparse files, and other features.
-	//
-	// It is recommended that PAX be chosen over GNU unless the target
-	// application can only parse GNU formatted archives.
-	//
-	// Reference:
-	//	http://www.gnu.org/software/tar/manual/html_node/Standard.html
-	FormatGNU
-
-	// Schily's tar format, which is incompatible with USTAR.
-	// This does not cover STAR extensions to the PAX format; these fall under
-	// the PAX format.
-	formatSTAR
-
-	formatMax
-)
-
-func (f Format) has(f2 Format) bool   { return f&f2 != 0 }
-func (f *Format) mayBe(f2 Format)     { *f |= f2 }
-func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
-func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
-
-var formatNames = map[Format]string{
-	formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
-}
-
-func (f Format) String() string {
-	var ss []string
-	for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
-		if f.has(f2) {
-			ss = append(ss, formatNames[f2])
-		}
-	}
-	switch len(ss) {
-	case 0:
-		return "<unknown>"
-	case 1:
-		return ss[0]
-	default:
-		return "(" + strings.Join(ss, " | ") + ")"
-	}
-}
-
-// Magics used to identify various formats.
-const (
-	magicGNU, versionGNU     = "ustar ", " \x00"
-	magicUSTAR, versionUSTAR = "ustar\x00", "00"
-	trailerSTAR              = "tar\x00"
-)
-
-// Size constants from various tar specifications.
-const (
-	blockSize  = 512 // Size of each block in a tar stream
-	nameSize   = 100 // Max length of the name field in USTAR format
-	prefixSize = 155 // Max length of the prefix field in USTAR format
-)
-
-// blockPadding computes the number of bytes needed to pad offset up to the
-// nearest block edge where 0 <= n < blockSize.
-func blockPadding(offset int64) (n int64) {
-	return -offset & (blockSize - 1)
-}
-
-var zeroBlock block
-
-type block [blockSize]byte
-
-// Convert block to any number of formats.
-func (b *block) V7() *headerV7       { return (*headerV7)(b) }
-func (b *block) GNU() *headerGNU     { return (*headerGNU)(b) }
-func (b *block) STAR() *headerSTAR   { return (*headerSTAR)(b) }
-func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
-func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
-
-// GetFormat checks that the block is a valid tar header based on the checksum.
-// It then attempts to guess the specific format based on magic values.
-// If the checksum fails, then FormatUnknown is returned.
-func (b *block) GetFormat() Format {
-	// Verify checksum.
-	var p parser
-	value := p.parseOctal(b.V7().Chksum())
-	chksum1, chksum2 := b.ComputeChecksum()
-	if p.err != nil || (value != chksum1 && value != chksum2) {
-		return FormatUnknown
-	}
-
-	// Guess the magic values.
-	magic := string(b.USTAR().Magic())
-	version := string(b.USTAR().Version())
-	trailer := string(b.STAR().Trailer())
-	switch {
-	case magic == magicUSTAR && trailer == trailerSTAR:
-		return formatSTAR
-	case magic == magicUSTAR:
-		return FormatUSTAR | FormatPAX
-	case magic == magicGNU && version == versionGNU:
-		return FormatGNU
-	default:
-		return formatV7
-	}
-}
-
-// SetFormat writes the magic values necessary for specified format
-// and then updates the checksum accordingly.
-func (b *block) SetFormat(format Format) {
-	// Set the magic values.
-	switch {
-	case format.has(formatV7):
-		// Do nothing.
-	case format.has(FormatGNU):
-		copy(b.GNU().Magic(), magicGNU)
-		copy(b.GNU().Version(), versionGNU)
-	case format.has(formatSTAR):
-		copy(b.STAR().Magic(), magicUSTAR)
-		copy(b.STAR().Version(), versionUSTAR)
-		copy(b.STAR().Trailer(), trailerSTAR)
-	case format.has(FormatUSTAR | FormatPAX):
-		copy(b.USTAR().Magic(), magicUSTAR)
-		copy(b.USTAR().Version(), versionUSTAR)
-	default:
-		panic("invalid format")
-	}
-
-	// Update checksum.
-	// This field is special in that it is terminated by a NULL then space.
-	var f formatter
-	field := b.V7().Chksum()
-	chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
-	f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
-	field[7] = ' '
-}
-
-// ComputeChecksum computes the checksum for the header block.
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
-// signed byte values.
-// We compute and return both.
-func (b *block) ComputeChecksum() (unsigned, signed int64) {
-	for i, c := range b {
-		if 148 <= i && i < 156 {
-			c = ' ' // Treat the checksum field itself as all spaces.
-		}
-		unsigned += int64(c)
-		signed += int64(int8(c))
-	}
-	return unsigned, signed
-}
-
-// Reset clears the block with all zeros.
-func (b *block) Reset() {
-	*b = block{}
-}
-
-type headerV7 [blockSize]byte
-
-func (h *headerV7) Name() []byte     { return h[000:][:100] }
-func (h *headerV7) Mode() []byte     { return h[100:][:8] }
-func (h *headerV7) UID() []byte      { return h[108:][:8] }
-func (h *headerV7) GID() []byte      { return h[116:][:8] }
-func (h *headerV7) Size() []byte     { return h[124:][:12] }
-func (h *headerV7) ModTime() []byte  { return h[136:][:12] }
-func (h *headerV7) Chksum() []byte   { return h[148:][:8] }
-func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
-func (h *headerV7) LinkName() []byte { return h[157:][:100] }
-
-type headerGNU [blockSize]byte
-
-func (h *headerGNU) V7() *headerV7       { return (*headerV7)(h) }
-func (h *headerGNU) Magic() []byte       { return h[257:][:6] }
-func (h *headerGNU) Version() []byte     { return h[263:][:2] }
-func (h *headerGNU) UserName() []byte    { return h[265:][:32] }
-func (h *headerGNU) GroupName() []byte   { return h[297:][:32] }
-func (h *headerGNU) DevMajor() []byte    { return h[329:][:8] }
-func (h *headerGNU) DevMinor() []byte    { return h[337:][:8] }
-func (h *headerGNU) AccessTime() []byte  { return h[345:][:12] }
-func (h *headerGNU) ChangeTime() []byte  { return h[357:][:12] }
-func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) }
-func (h *headerGNU) RealSize() []byte    { return h[483:][:12] }
-
-type headerSTAR [blockSize]byte
-
-func (h *headerSTAR) V7() *headerV7      { return (*headerV7)(h) }
-func (h *headerSTAR) Magic() []byte      { return h[257:][:6] }
-func (h *headerSTAR) Version() []byte    { return h[263:][:2] }
-func (h *headerSTAR) UserName() []byte   { return h[265:][:32] }
-func (h *headerSTAR) GroupName() []byte  { return h[297:][:32] }
-func (h *headerSTAR) DevMajor() []byte   { return h[329:][:8] }
-func (h *headerSTAR) DevMinor() []byte   { return h[337:][:8] }
-func (h *headerSTAR) Prefix() []byte     { return h[345:][:131] }
-func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
-func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
-func (h *headerSTAR) Trailer() []byte    { return h[508:][:4] }
-
-type headerUSTAR [blockSize]byte
-
-func (h *headerUSTAR) V7() *headerV7     { return (*headerV7)(h) }
-func (h *headerUSTAR) Magic() []byte     { return h[257:][:6] }
-func (h *headerUSTAR) Version() []byte   { return h[263:][:2] }
-func (h *headerUSTAR) UserName() []byte  { return h[265:][:32] }
-func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
-func (h *headerUSTAR) DevMajor() []byte  { return h[329:][:8] }
-func (h *headerUSTAR) DevMinor() []byte  { return h[337:][:8] }
-func (h *headerUSTAR) Prefix() []byte    { return h[345:][:155] }
-
-type sparseArray []byte
-
-func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) }
-func (s sparseArray) IsExtended() []byte     { return s[24*s.MaxEntries():][:1] }
-func (s sparseArray) MaxEntries() int        { return len(s) / 24 }
-
-type sparseElem []byte
-
-func (s sparseElem) Offset() []byte { return s[00:][:12] }
-func (s sparseElem) Length() []byte { return s[12:][:12] }

+ 0 - 855
vendor/archive/tar/reader.go

@@ -1,855 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"io"
-	"io/ioutil"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// Reader provides sequential access to the contents of a tar archive.
-// Reader.Next advances to the next file in the archive (including the first),
-// and then Reader can be treated as an io.Reader to access the file's data.
-type Reader struct {
-	r    io.Reader
-	pad  int64      // Amount of padding (ignored) after current file entry
-	curr fileReader // Reader for current file entry
-	blk  block      // Buffer to use as temporary local storage
-
-	// err is a persistent error.
-	// It is only the responsibility of every exported method of Reader to
-	// ensure that this error is sticky.
-	err error
-}
-
-type fileReader interface {
-	io.Reader
-	fileState
-
-	WriteTo(io.Writer) (int64, error)
-}
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader {
-	return &Reader{r: r, curr: &regFileReader{r, 0}}
-}
-
-// Next advances to the next entry in the tar archive.
-// The Header.Size determines how many bytes can be read for the next file.
-// Any remaining data in the current file is automatically discarded.
-//
-// io.EOF is returned at the end of the input.
-func (tr *Reader) Next() (*Header, error) {
-	if tr.err != nil {
-		return nil, tr.err
-	}
-	hdr, err := tr.next()
-	tr.err = err
-	return hdr, err
-}
-
-func (tr *Reader) next() (*Header, error) {
-	var paxHdrs map[string]string
-	var gnuLongName, gnuLongLink string
-
-	// Externally, Next iterates through the tar archive as if it is a series of
-	// files. Internally, the tar format often uses fake "files" to add meta
-	// data that describes the next file. These meta data "files" should not
-	// normally be visible to the outside. As such, this loop iterates through
-	// one or more "header files" until it finds a "normal file".
-	format := FormatUSTAR | FormatPAX | FormatGNU
-loop:
-	for {
-		// Discard the remainder of the file and any padding.
-		if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
-			return nil, err
-		}
-		if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
-			return nil, err
-		}
-		tr.pad = 0
-
-		hdr, rawHdr, err := tr.readHeader()
-		if err != nil {
-			return nil, err
-		}
-		if err := tr.handleRegularFile(hdr); err != nil {
-			return nil, err
-		}
-		format.mayOnlyBe(hdr.Format)
-
-		// Check for PAX/GNU special headers and files.
-		switch hdr.Typeflag {
-		case TypeXHeader, TypeXGlobalHeader:
-			format.mayOnlyBe(FormatPAX)
-			paxHdrs, err = parsePAX(tr)
-			if err != nil {
-				return nil, err
-			}
-			if hdr.Typeflag == TypeXGlobalHeader {
-				mergePAX(hdr, paxHdrs)
-				return &Header{
-					Name:       hdr.Name,
-					Typeflag:   hdr.Typeflag,
-					Xattrs:     hdr.Xattrs,
-					PAXRecords: hdr.PAXRecords,
-					Format:     format,
-				}, nil
-			}
-			continue loop // This is a meta header affecting the next header
-		case TypeGNULongName, TypeGNULongLink:
-			format.mayOnlyBe(FormatGNU)
-			realname, err := ioutil.ReadAll(tr)
-			if err != nil {
-				return nil, err
-			}
-
-			var p parser
-			switch hdr.Typeflag {
-			case TypeGNULongName:
-				gnuLongName = p.parseString(realname)
-			case TypeGNULongLink:
-				gnuLongLink = p.parseString(realname)
-			}
-			continue loop // This is a meta header affecting the next header
-		default:
-			// The old GNU sparse format is handled here since it is technically
-			// just a regular file with additional attributes.
-
-			if err := mergePAX(hdr, paxHdrs); err != nil {
-				return nil, err
-			}
-			if gnuLongName != "" {
-				hdr.Name = gnuLongName
-			}
-			if gnuLongLink != "" {
-				hdr.Linkname = gnuLongLink
-			}
-			if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") {
-				hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
-			}
-
-			// The extended headers may have updated the size.
-			// Thus, setup the regFileReader again after merging PAX headers.
-			if err := tr.handleRegularFile(hdr); err != nil {
-				return nil, err
-			}
-
-			// Sparse formats rely on being able to read from the logical data
-			// section; there must be a preceding call to handleRegularFile.
-			if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
-				return nil, err
-			}
-
-			// Set the final guess at the format.
-			if format.has(FormatUSTAR) && format.has(FormatPAX) {
-				format.mayOnlyBe(FormatUSTAR)
-			}
-			hdr.Format = format
-			return hdr, nil // This is a file, so stop
-		}
-	}
-}
-
-// handleRegularFile sets up the current file reader and padding such that it
-// can only read the following logical data section. It will properly handle
-// special headers that contain no data section.
-func (tr *Reader) handleRegularFile(hdr *Header) error {
-	nb := hdr.Size
-	if isHeaderOnlyType(hdr.Typeflag) {
-		nb = 0
-	}
-	if nb < 0 {
-		return ErrHeader
-	}
-
-	tr.pad = blockPadding(nb)
-	tr.curr = &regFileReader{r: tr.r, nb: nb}
-	return nil
-}
-
-// handleSparseFile checks if the current file is a sparse format of any type
-// and sets the curr reader appropriately.
-func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
-	var spd sparseDatas
-	var err error
-	if hdr.Typeflag == TypeGNUSparse {
-		spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
-	} else {
-		spd, err = tr.readGNUSparsePAXHeaders(hdr)
-	}
-
-	// If sp is non-nil, then this is a sparse file.
-	// Note that it is possible for len(sp) == 0.
-	if err == nil && spd != nil {
-		if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
-			return ErrHeader
-		}
-		sph := invertSparseEntries(spd, hdr.Size)
-		tr.curr = &sparseFileReader{tr.curr, sph, 0}
-	}
-	return err
-}
-
-// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
-// If they are found, then this function reads the sparse map and returns it.
-// This assumes that 0.0 headers have already been converted to 0.1 headers
-// by the the PAX header parsing logic.
-func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
-	// Identify the version of GNU headers.
-	var is1x0 bool
-	major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
-	switch {
-	case major == "0" && (minor == "0" || minor == "1"):
-		is1x0 = false
-	case major == "1" && minor == "0":
-		is1x0 = true
-	case major != "" || minor != "":
-		return nil, nil // Unknown GNU sparse PAX version
-	case hdr.PAXRecords[paxGNUSparseMap] != "":
-		is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
-	default:
-		return nil, nil // Not a PAX format GNU sparse file.
-	}
-	hdr.Format.mayOnlyBe(FormatPAX)
-
-	// Update hdr from GNU sparse PAX headers.
-	if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
-		hdr.Name = name
-	}
-	size := hdr.PAXRecords[paxGNUSparseSize]
-	if size == "" {
-		size = hdr.PAXRecords[paxGNUSparseRealSize]
-	}
-	if size != "" {
-		n, err := strconv.ParseInt(size, 10, 64)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		hdr.Size = n
-	}
-
-	// Read the sparse map according to the appropriate format.
-	if is1x0 {
-		return readGNUSparseMap1x0(tr.curr)
-	}
-	return readGNUSparseMap0x1(hdr.PAXRecords)
-}
-
-// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
-func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
-	for k, v := range paxHdrs {
-		if v == "" {
-			continue // Keep the original USTAR value
-		}
-		var id64 int64
-		switch k {
-		case paxPath:
-			hdr.Name = v
-		case paxLinkpath:
-			hdr.Linkname = v
-		case paxUname:
-			hdr.Uname = v
-		case paxGname:
-			hdr.Gname = v
-		case paxUid:
-			id64, err = strconv.ParseInt(v, 10, 64)
-			hdr.Uid = int(id64) // Integer overflow possible
-		case paxGid:
-			id64, err = strconv.ParseInt(v, 10, 64)
-			hdr.Gid = int(id64) // Integer overflow possible
-		case paxAtime:
-			hdr.AccessTime, err = parsePAXTime(v)
-		case paxMtime:
-			hdr.ModTime, err = parsePAXTime(v)
-		case paxCtime:
-			hdr.ChangeTime, err = parsePAXTime(v)
-		case paxSize:
-			hdr.Size, err = strconv.ParseInt(v, 10, 64)
-		default:
-			if strings.HasPrefix(k, paxSchilyXattr) {
-				if hdr.Xattrs == nil {
-					hdr.Xattrs = make(map[string]string)
-				}
-				hdr.Xattrs[k[len(paxSchilyXattr):]] = v
-			}
-		}
-		if err != nil {
-			return ErrHeader
-		}
-	}
-	hdr.PAXRecords = paxHdrs
-	return nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
-func parsePAX(r io.Reader) (map[string]string, error) {
-	buf, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, err
-	}
-	sbuf := string(buf)
-
-	// For GNU PAX sparse format 0.0 support.
-	// This function transforms the sparse format 0.0 headers into format 0.1
-	// headers since 0.0 headers were not PAX compliant.
-	var sparseMap []string
-
-	paxHdrs := make(map[string]string)
-	for len(sbuf) > 0 {
-		key, value, residual, err := parsePAXRecord(sbuf)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		sbuf = residual
-
-		switch key {
-		case paxGNUSparseOffset, paxGNUSparseNumBytes:
-			// Validate sparse header order and value.
-			if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
-				(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
-				strings.Contains(value, ",") {
-				return nil, ErrHeader
-			}
-			sparseMap = append(sparseMap, value)
-		default:
-			paxHdrs[key] = value
-		}
-	}
-	if len(sparseMap) > 0 {
-		paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
-	}
-	return paxHdrs, nil
-}
-
-// readHeader reads the next block header and assumes that the underlying reader
-// is already aligned to a block boundary. It returns the raw block of the
-// header in case further processing is required.
-//
-// The err will be set to io.EOF only when one of the following occurs:
-//	* Exactly 0 bytes are read and EOF is hit.
-//	* Exactly 1 block of zeros is read and EOF is hit.
-//	* At least 2 blocks of zeros are read.
-func (tr *Reader) readHeader() (*Header, *block, error) {
-	// Two blocks of zero bytes marks the end of the archive.
-	if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
-		return nil, nil, err // EOF is okay here; exactly 0 bytes read
-	}
-	if bytes.Equal(tr.blk[:], zeroBlock[:]) {
-		if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
-			return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
-		}
-		if bytes.Equal(tr.blk[:], zeroBlock[:]) {
-			return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
-		}
-		return nil, nil, ErrHeader // Zero block and then non-zero block
-	}
-
-	// Verify the header matches a known format.
-	format := tr.blk.GetFormat()
-	if format == FormatUnknown {
-		return nil, nil, ErrHeader
-	}
-
-	var p parser
-	hdr := new(Header)
-
-	// Unpack the V7 header.
-	v7 := tr.blk.V7()
-	hdr.Typeflag = v7.TypeFlag()[0]
-	hdr.Name = p.parseString(v7.Name())
-	hdr.Linkname = p.parseString(v7.LinkName())
-	hdr.Size = p.parseNumeric(v7.Size())
-	hdr.Mode = p.parseNumeric(v7.Mode())
-	hdr.Uid = int(p.parseNumeric(v7.UID()))
-	hdr.Gid = int(p.parseNumeric(v7.GID()))
-	hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
-
-	// Unpack format specific fields.
-	if format > formatV7 {
-		ustar := tr.blk.USTAR()
-		hdr.Uname = p.parseString(ustar.UserName())
-		hdr.Gname = p.parseString(ustar.GroupName())
-		hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
-		hdr.Devminor = p.parseNumeric(ustar.DevMinor())
-
-		var prefix string
-		switch {
-		case format.has(FormatUSTAR | FormatPAX):
-			hdr.Format = format
-			ustar := tr.blk.USTAR()
-			prefix = p.parseString(ustar.Prefix())
-
-			// For Format detection, check if block is properly formatted since
-			// the parser is more liberal than what USTAR actually permits.
-			notASCII := func(r rune) bool { return r >= 0x80 }
-			if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
-				hdr.Format = FormatUnknown // Non-ASCII characters in block.
-			}
-			nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
-			if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
-				nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
-				hdr.Format = FormatUnknown // Numeric fields must end in NUL
-			}
-		case format.has(formatSTAR):
-			star := tr.blk.STAR()
-			prefix = p.parseString(star.Prefix())
-			hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
-			hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
-		case format.has(FormatGNU):
-			hdr.Format = format
-			var p2 parser
-			gnu := tr.blk.GNU()
-			if b := gnu.AccessTime(); b[0] != 0 {
-				hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
-			}
-			if b := gnu.ChangeTime(); b[0] != 0 {
-				hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
-			}
-
-			// Prior to Go1.8, the Writer had a bug where it would output
-			// an invalid tar file in certain rare situations because the logic
-			// incorrectly believed that the old GNU format had a prefix field.
-			// This is wrong and leads to an output file that mangles the
-			// atime and ctime fields, which are often left unused.
-			//
-			// In order to continue reading tar files created by former, buggy
-			// versions of Go, we skeptically parse the atime and ctime fields.
-			// If we are unable to parse them and the prefix field looks like
-			// an ASCII string, then we fallback on the pre-Go1.8 behavior
-			// of treating these fields as the USTAR prefix field.
-			//
-			// Note that this will not use the fallback logic for all possible
-			// files generated by a pre-Go1.8 toolchain. If the generated file
-			// happened to have a prefix field that parses as valid
-			// atime and ctime fields (e.g., when they are valid octal strings),
-			// then it is impossible to distinguish between an valid GNU file
-			// and an invalid pre-Go1.8 file.
-			//
-			// See https://golang.org/issues/12594
-			// See https://golang.org/issues/21005
-			if p2.err != nil {
-				hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
-				ustar := tr.blk.USTAR()
-				if s := p.parseString(ustar.Prefix()); isASCII(s) {
-					prefix = s
-				}
-				hdr.Format = FormatUnknown // Buggy file is not GNU
-			}
-		}
-		if len(prefix) > 0 {
-			hdr.Name = prefix + "/" + hdr.Name
-		}
-	}
-	return hdr, &tr.blk, p.err
-}
-
-// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough.
-// If it's larger than four entries, then one or more extension headers are used
-// to store the rest of the sparse map.
-//
-// The Header.Size does not reflect the size of any extended headers used.
-// Thus, this function will read from the raw io.Reader to fetch extra headers.
-// This method mutates blk in the process.
-func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
-	// Make sure that the input format is GNU.
-	// Unfortunately, the STAR format also has a sparse header format that uses
-	// the same type flag but has a completely different layout.
-	if blk.GetFormat() != FormatGNU {
-		return nil, ErrHeader
-	}
-	hdr.Format.mayOnlyBe(FormatGNU)
-
-	var p parser
-	hdr.Size = p.parseNumeric(blk.GNU().RealSize())
-	if p.err != nil {
-		return nil, p.err
-	}
-	s := blk.GNU().Sparse()
-	spd := make(sparseDatas, 0, s.MaxEntries())
-	for {
-		for i := 0; i < s.MaxEntries(); i++ {
-			// This termination condition is identical to GNU and BSD tar.
-			if s.Entry(i).Offset()[0] == 0x00 {
-				break // Don't return, need to process extended headers (even if empty)
-			}
-			offset := p.parseNumeric(s.Entry(i).Offset())
-			length := p.parseNumeric(s.Entry(i).Length())
-			if p.err != nil {
-				return nil, p.err
-			}
-			spd = append(spd, sparseEntry{Offset: offset, Length: length})
-		}
-
-		if s.IsExtended()[0] > 0 {
-			// There are more entries. Read an extension header and parse its entries.
-			if _, err := mustReadFull(tr.r, blk[:]); err != nil {
-				return nil, err
-			}
-			s = blk.Sparse()
-			continue
-		}
-		return spd, nil // Done
-	}
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
-// version 1.0. The format of the sparse map consists of a series of
-// newline-terminated numeric fields. The first field is the number of entries
-// and is always present. Following this are the entries, consisting of two
-// fields (offset, length). This function must stop reading at the end
-// boundary of the block containing the last newline.
-//
-// Note that the GNU manual says that numeric values should be encoded in octal
-// format. However, the GNU tar utility itself outputs these values in decimal.
-// As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
-	var (
-		cntNewline int64
-		buf        bytes.Buffer
-		blk        block
-	)
-
-	// feedTokens copies data in blocks from r into buf until there are
-	// at least cnt newlines in buf. It will not read more blocks than needed.
-	feedTokens := func(n int64) error {
-		for cntNewline < n {
-			if _, err := mustReadFull(r, blk[:]); err != nil {
-				return err
-			}
-			buf.Write(blk[:])
-			for _, c := range blk {
-				if c == '\n' {
-					cntNewline++
-				}
-			}
-		}
-		return nil
-	}
-
-	// nextToken gets the next token delimited by a newline. This assumes that
-	// at least one newline exists in the buffer.
-	nextToken := func() string {
-		cntNewline--
-		tok, _ := buf.ReadString('\n')
-		return strings.TrimRight(tok, "\n")
-	}
-
-	// Parse for the number of entries.
-	// Use integer overflow resistant math to check this.
-	if err := feedTokens(1); err != nil {
-		return nil, err
-	}
-	numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
-	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
-		return nil, ErrHeader
-	}
-
-	// Parse for all member entries.
-	// numEntries is trusted after this since a potential attacker must have
-	// committed resources proportional to what this library used.
-	if err := feedTokens(2 * numEntries); err != nil {
-		return nil, err
-	}
-	spd := make(sparseDatas, 0, numEntries)
-	for i := int64(0); i < numEntries; i++ {
-		offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
-		length, err2 := strconv.ParseInt(nextToken(), 10, 64)
-		if err1 != nil || err2 != nil {
-			return nil, ErrHeader
-		}
-		spd = append(spd, sparseEntry{Offset: offset, Length: length})
-	}
-	return spd, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
-// version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
-	// Get number of entries.
-	// Use integer overflow resistant math to check this.
-	numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
-	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
-	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
-		return nil, ErrHeader
-	}
-
-	// There should be two numbers in sparseMap for each entry.
-	sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
-	if len(sparseMap) == 1 && sparseMap[0] == "" {
-		sparseMap = sparseMap[:0]
-	}
-	if int64(len(sparseMap)) != 2*numEntries {
-		return nil, ErrHeader
-	}
-
-	// Loop through the entries in the sparse map.
-	// numEntries is trusted now.
-	spd := make(sparseDatas, 0, numEntries)
-	for len(sparseMap) >= 2 {
-		offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
-		length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
-		if err1 != nil || err2 != nil {
-			return nil, ErrHeader
-		}
-		spd = append(spd, sparseEntry{Offset: offset, Length: length})
-		sparseMap = sparseMap[2:]
-	}
-	return spd, nil
-}
-
-// Read reads from the current file in the tar archive.
-// It returns (0, io.EOF) when it reaches the end of that file,
-// until Next is called to advance to the next file.
-//
-// If the current file is sparse, then the regions marked as a hole
-// are read back as NUL-bytes.
-//
-// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
-// the Header.Size claims.
-func (tr *Reader) Read(b []byte) (int, error) {
-	if tr.err != nil {
-		return 0, tr.err
-	}
-	n, err := tr.curr.Read(b)
-	if err != nil && err != io.EOF {
-		tr.err = err
-	}
-	return n, err
-}
-
-// writeTo writes the content of the current file to w.
-// The bytes written matches the number of remaining bytes in the current file.
-//
-// If the current file is sparse and w is an io.WriteSeeker,
-// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
-// assuming that skipped regions are filled with NULs.
-// This always writes the last byte to ensure w is the right size.
-//
-// TODO(dsnet): Re-export this when adding sparse file support.
-// See https://golang.org/issue/22735
-func (tr *Reader) writeTo(w io.Writer) (int64, error) {
-	if tr.err != nil {
-		return 0, tr.err
-	}
-	n, err := tr.curr.WriteTo(w)
-	if err != nil {
-		tr.err = err
-	}
-	return n, err
-}
-
-// regFileReader is a fileReader for reading data from a regular file entry.
-type regFileReader struct {
-	r  io.Reader // Underlying Reader
-	nb int64     // Number of remaining bytes to read
-}
-
-func (fr *regFileReader) Read(b []byte) (n int, err error) {
-	if int64(len(b)) > fr.nb {
-		b = b[:fr.nb]
-	}
-	if len(b) > 0 {
-		n, err = fr.r.Read(b)
-		fr.nb -= int64(n)
-	}
-	switch {
-	case err == io.EOF && fr.nb > 0:
-		return n, io.ErrUnexpectedEOF
-	case err == nil && fr.nb == 0:
-		return n, io.EOF
-	default:
-		return n, err
-	}
-}
-
-func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
-	return io.Copy(w, struct{ io.Reader }{fr})
-}
-
-func (fr regFileReader) LogicalRemaining() int64 {
-	return fr.nb
-}
-
-func (fr regFileReader) PhysicalRemaining() int64 {
-	return fr.nb
-}
-
-// sparseFileReader is a fileReader for reading data from a sparse file entry.
-type sparseFileReader struct {
-	fr  fileReader  // Underlying fileReader
-	sp  sparseHoles // Normalized list of sparse holes
-	pos int64       // Current position in sparse file
-}
-
-func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
-	finished := int64(len(b)) >= sr.LogicalRemaining()
-	if finished {
-		b = b[:sr.LogicalRemaining()]
-	}
-
-	b0 := b
-	endPos := sr.pos + int64(len(b))
-	for endPos > sr.pos && err == nil {
-		var nf int // Bytes read in fragment
-		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
-		if sr.pos < holeStart { // In a data fragment
-			bf := b[:min(int64(len(b)), holeStart-sr.pos)]
-			nf, err = tryReadFull(sr.fr, bf)
-		} else { // In a hole fragment
-			bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
-			nf, err = tryReadFull(zeroReader{}, bf)
-		}
-		b = b[nf:]
-		sr.pos += int64(nf)
-		if sr.pos >= holeEnd && len(sr.sp) > 1 {
-			sr.sp = sr.sp[1:] // Ensure last fragment always remains
-		}
-	}
-
-	n = len(b0) - len(b)
-	switch {
-	case err == io.EOF:
-		return n, errMissData // Less data in dense file than sparse file
-	case err != nil:
-		return n, err
-	case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
-		return n, errUnrefData // More data in dense file than sparse file
-	case finished:
-		return n, io.EOF
-	default:
-		return n, nil
-	}
-}
-
-func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
-	ws, ok := w.(io.WriteSeeker)
-	if ok {
-		if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
-			ok = false // Not all io.Seeker can really seek
-		}
-	}
-	if !ok {
-		return io.Copy(w, struct{ io.Reader }{sr})
-	}
-
-	var writeLastByte bool
-	pos0 := sr.pos
-	for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
-		var nf int64 // Size of fragment
-		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
-		if sr.pos < holeStart { // In a data fragment
-			nf = holeStart - sr.pos
-			nf, err = io.CopyN(ws, sr.fr, nf)
-		} else { // In a hole fragment
-			nf = holeEnd - sr.pos
-			if sr.PhysicalRemaining() == 0 {
-				writeLastByte = true
-				nf--
-			}
-			_, err = ws.Seek(nf, io.SeekCurrent)
-		}
-		sr.pos += nf
-		if sr.pos >= holeEnd && len(sr.sp) > 1 {
-			sr.sp = sr.sp[1:] // Ensure last fragment always remains
-		}
-	}
-
-	// If the last fragment is a hole, then seek to 1-byte before EOF, and
-	// write a single byte to ensure the file is the right size.
-	if writeLastByte && err == nil {
-		_, err = ws.Write([]byte{0})
-		sr.pos++
-	}
-
-	n = sr.pos - pos0
-	switch {
-	case err == io.EOF:
-		return n, errMissData // Less data in dense file than sparse file
-	case err != nil:
-		return n, err
-	case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
-		return n, errUnrefData // More data in dense file than sparse file
-	default:
-		return n, nil
-	}
-}
-
-func (sr sparseFileReader) LogicalRemaining() int64 {
-	return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
-}
-func (sr sparseFileReader) PhysicalRemaining() int64 {
-	return sr.fr.PhysicalRemaining()
-}
-
-type zeroReader struct{}
-
-func (zeroReader) Read(b []byte) (int, error) {
-	for i := range b {
-		b[i] = 0
-	}
-	return len(b), nil
-}
-
-// mustReadFull is like io.ReadFull except it returns
-// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
-func mustReadFull(r io.Reader, b []byte) (int, error) {
-	n, err := tryReadFull(r, b)
-	if err == io.EOF {
-		err = io.ErrUnexpectedEOF
-	}
-	return n, err
-}
-
-// tryReadFull is like io.ReadFull except it returns
-// io.EOF when it is hit before len(b) bytes are read.
-func tryReadFull(r io.Reader, b []byte) (n int, err error) {
-	for len(b) > n && err == nil {
-		var nn int
-		nn, err = r.Read(b[n:])
-		n += nn
-	}
-	if len(b) == n && err == io.EOF {
-		err = nil
-	}
-	return n, err
-}
-
-// discard skips n bytes in r, reporting an error if unable to do so.
-func discard(r io.Reader, n int64) error {
-	// If possible, Seek to the last byte before the end of the data section.
-	// Do this because Seek is often lazy about reporting errors; this will mask
-	// the fact that the stream may be truncated. We can rely on the
-	// io.CopyN done shortly afterwards to trigger any IO errors.
-	var seekSkipped int64 // Number of bytes skipped via Seek
-	if sr, ok := r.(io.Seeker); ok && n > 1 {
-		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
-		// io.Seeker, but calling Seek always returns an error and performs
-		// no action. Thus, we try an innocent seek to the current position
-		// to see if Seek is really supported.
-		pos1, err := sr.Seek(0, io.SeekCurrent)
-		if pos1 >= 0 && err == nil {
-			// Seek seems supported, so perform the real Seek.
-			pos2, err := sr.Seek(n-1, io.SeekCurrent)
-			if pos2 < 0 || err != nil {
-				return err
-			}
-			seekSkipped = pos2 - pos1
-		}
-	}
-
-	copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped)
-	if err == io.EOF && seekSkipped+copySkipped < n {
-		err = io.ErrUnexpectedEOF
-	}
-	return err
-}

+ 0 - 20
vendor/archive/tar/stat_actime1.go

@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux dragonfly openbsd solaris
-
-package tar
-
-import (
-	"syscall"
-	"time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Ctim.Unix())
-}

+ 0 - 20
vendor/archive/tar/stat_actime2.go

@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd netbsd
-
-package tar
-
-import (
-	"syscall"
-	"time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Ctimespec.Unix())
-}

+ 0 - 76
vendor/archive/tar/stat_unix.go

@@ -1,76 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris
-
-package tar
-
-import (
-	"os"
-	"runtime"
-	"syscall"
-)
-
-func init() {
-	sysStat = statUnix
-}
-
-func statUnix(fi os.FileInfo, h *Header) error {
-	sys, ok := fi.Sys().(*syscall.Stat_t)
-	if !ok {
-		return nil
-	}
-	h.Uid = int(sys.Uid)
-	h.Gid = int(sys.Gid)
-
-	// TODO(bradfitz): populate username & group.  os/user
-	// doesn't cache LookupId lookups, and lacks group
-	// lookup functions.
-	h.AccessTime = statAtime(sys)
-	h.ChangeTime = statCtime(sys)
-
-	// Best effort at populating Devmajor and Devminor.
-	if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
-		dev := uint64(sys.Rdev) // May be int32 or uint32
-		switch runtime.GOOS {
-		case "linux":
-			// Copied from golang.org/x/sys/unix/dev_linux.go.
-			major := uint32((dev & 0x00000000000fff00) >> 8)
-			major |= uint32((dev & 0xfffff00000000000) >> 32)
-			minor := uint32((dev & 0x00000000000000ff) >> 0)
-			minor |= uint32((dev & 0x00000ffffff00000) >> 12)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		case "darwin":
-			// Copied from golang.org/x/sys/unix/dev_darwin.go.
-			major := uint32((dev >> 24) & 0xff)
-			minor := uint32(dev & 0xffffff)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		case "dragonfly":
-			// Copied from golang.org/x/sys/unix/dev_dragonfly.go.
-			major := uint32((dev >> 8) & 0xff)
-			minor := uint32(dev & 0xffff00ff)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		case "freebsd":
-			// Copied from golang.org/x/sys/unix/dev_freebsd.go.
-			major := uint32((dev >> 8) & 0xff)
-			minor := uint32(dev & 0xffff00ff)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		case "netbsd":
-			// Copied from golang.org/x/sys/unix/dev_netbsd.go.
-			major := uint32((dev & 0x000fff00) >> 8)
-			minor := uint32((dev & 0x000000ff) >> 0)
-			minor |= uint32((dev & 0xfff00000) >> 12)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		case "openbsd":
-			// Copied from golang.org/x/sys/unix/dev_openbsd.go.
-			major := uint32((dev & 0x0000ff00) >> 8)
-			minor := uint32((dev & 0x000000ff) >> 0)
-			minor |= uint32((dev & 0xffff0000) >> 8)
-			h.Devmajor, h.Devminor = int64(major), int64(minor)
-		default:
-			// TODO: Implement solaris (see https://golang.org/issue/8106)
-		}
-	}
-	return nil
-}

+ 0 - 326
vendor/archive/tar/strconv.go

@@ -1,326 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// hasNUL reports whether the NUL character exists within s.
-func hasNUL(s string) bool {
-	return strings.IndexByte(s, 0) >= 0
-}
-
-// isASCII reports whether the input is an ASCII C-style string.
-func isASCII(s string) bool {
-	for _, c := range s {
-		if c >= 0x80 || c == 0x00 {
-			return false
-		}
-	}
-	return true
-}
-
-// toASCII converts the input to an ASCII C-style string.
-// This a best effort conversion, so invalid characters are dropped.
-func toASCII(s string) string {
-	if isASCII(s) {
-		return s
-	}
-	b := make([]byte, 0, len(s))
-	for _, c := range s {
-		if c < 0x80 && c != 0x00 {
-			b = append(b, byte(c))
-		}
-	}
-	return string(b)
-}
-
-type parser struct {
-	err error // Last error seen
-}
-
-type formatter struct {
-	err error // Last error seen
-}
-
-// parseString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func (*parser) parseString(b []byte) string {
-	if i := bytes.IndexByte(b, 0); i >= 0 {
-		return string(b[:i])
-	}
-	return string(b)
-}
-
-// formatString copies s into b, NUL-terminating if possible.
-func (f *formatter) formatString(b []byte, s string) {
-	if len(s) > len(b) {
-		f.err = ErrFieldTooLong
-	}
-	copy(b, s)
-	if len(s) < len(b) {
-		b[len(s)] = 0
-	}
-
-	// Some buggy readers treat regular files with a trailing slash
-	// in the V7 path field as a directory even though the full path
-	// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
-	if len(s) > len(b) && b[len(b)-1] == '/' {
-		n := len(strings.TrimRight(s[:len(b)], "/"))
-		b[n] = 0 // Replace trailing slash with NUL terminator
-	}
-}
-
-// fitsInBase256 reports whether x can be encoded into n bytes using base-256
-// encoding. Unlike octal encoding, base-256 encoding does not require that the
-// string ends with a NUL character. Thus, all n bytes are available for output.
-//
-// If operating in binary mode, this assumes strict GNU binary mode; which means
-// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
-// equivalent to the sign bit in two's complement form.
-func fitsInBase256(n int, x int64) bool {
-	binBits := uint(n-1) * 8
-	return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
-}
-
-// parseNumeric parses the input as being encoded in either base-256 or octal.
-// This function may return negative numbers.
-// If parsing fails or an integer overflow occurs, err will be set.
-func (p *parser) parseNumeric(b []byte) int64 {
-	// Check for base-256 (binary) format first.
-	// If the first bit is set, then all following bits constitute a two's
-	// complement encoded number in big-endian byte order.
-	if len(b) > 0 && b[0]&0x80 != 0 {
-		// Handling negative numbers relies on the following identity:
-		//	-a-1 == ^a
-		//
-		// If the number is negative, we use an inversion mask to invert the
-		// data bytes and treat the value as an unsigned number.
-		var inv byte // 0x00 if positive or zero, 0xff if negative
-		if b[0]&0x40 != 0 {
-			inv = 0xff
-		}
-
-		var x uint64
-		for i, c := range b {
-			c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
-			if i == 0 {
-				c &= 0x7f // Ignore signal bit in first byte
-			}
-			if (x >> 56) > 0 {
-				p.err = ErrHeader // Integer overflow
-				return 0
-			}
-			x = x<<8 | uint64(c)
-		}
-		if (x >> 63) > 0 {
-			p.err = ErrHeader // Integer overflow
-			return 0
-		}
-		if inv == 0xff {
-			return ^int64(x)
-		}
-		return int64(x)
-	}
-
-	// Normal case is base-8 (octal) format.
-	return p.parseOctal(b)
-}
-
-// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
-// Otherwise it will attempt to use base-256 (binary) encoding.
-func (f *formatter) formatNumeric(b []byte, x int64) {
-	if fitsInOctal(len(b), x) {
-		f.formatOctal(b, x)
-		return
-	}
-
-	if fitsInBase256(len(b), x) {
-		for i := len(b) - 1; i >= 0; i-- {
-			b[i] = byte(x)
-			x >>= 8
-		}
-		b[0] |= 0x80 // Highest bit indicates binary format
-		return
-	}
-
-	f.formatOctal(b, 0) // Last resort, just write zero
-	f.err = ErrFieldTooLong
-}
-
-func (p *parser) parseOctal(b []byte) int64 {
-	// Because unused fields are filled with NULs, we need
-	// to skip leading NULs. Fields may also be padded with
-	// spaces or NULs.
-	// So we remove leading and trailing NULs and spaces to
-	// be sure.
-	b = bytes.Trim(b, " \x00")
-
-	if len(b) == 0 {
-		return 0
-	}
-	x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
-	if perr != nil {
-		p.err = ErrHeader
-	}
-	return int64(x)
-}
-
-func (f *formatter) formatOctal(b []byte, x int64) {
-	if !fitsInOctal(len(b), x) {
-		x = 0 // Last resort, just write zero
-		f.err = ErrFieldTooLong
-	}
-
-	s := strconv.FormatInt(x, 8)
-	// Add leading zeros, but leave room for a NUL.
-	if n := len(b) - len(s) - 1; n > 0 {
-		s = strings.Repeat("0", n) + s
-	}
-	f.formatString(b, s)
-}
-
-// fitsInOctal reports whether the integer x fits in a field n-bytes long
-// using octal encoding with the appropriate NUL terminator.
-func fitsInOctal(n int, x int64) bool {
-	octBits := uint(n-1) * 3
-	return x >= 0 && (n >= 22 || x < 1<<octBits)
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in the PAX
-// specification. Note that this implementation allows for negative timestamps,
-// which is allowed for by the PAX specification, but not always portable.
-func parsePAXTime(s string) (time.Time, error) {
-	const maxNanoSecondDigits = 9
-
-	// Split string into seconds and sub-seconds parts.
-	ss, sn := s, ""
-	if pos := strings.IndexByte(s, '.'); pos >= 0 {
-		ss, sn = s[:pos], s[pos+1:]
-	}
-
-	// Parse the seconds.
-	secs, err := strconv.ParseInt(ss, 10, 64)
-	if err != nil {
-		return time.Time{}, ErrHeader
-	}
-	if len(sn) == 0 {
-		return time.Unix(secs, 0), nil // No sub-second values
-	}
-
-	// Parse the nanoseconds.
-	if strings.Trim(sn, "0123456789") != "" {
-		return time.Time{}, ErrHeader
-	}
-	if len(sn) < maxNanoSecondDigits {
-		sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
-	} else {
-		sn = sn[:maxNanoSecondDigits] // Right truncate
-	}
-	nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
-	if len(ss) > 0 && ss[0] == '-' {
-		return time.Unix(secs, -1*nsecs), nil // Negative correction
-	}
-	return time.Unix(secs, nsecs), nil
-}
-
-// formatPAXTime converts ts into a time of the form %d.%d as described in the
-// PAX specification. This function is capable of negative timestamps.
-func formatPAXTime(ts time.Time) (s string) {
-	secs, nsecs := ts.Unix(), ts.Nanosecond()
-	if nsecs == 0 {
-		return strconv.FormatInt(secs, 10)
-	}
-
-	// If seconds is negative, then perform correction.
-	sign := ""
-	if secs < 0 {
-		sign = "-"             // Remember sign
-		secs = -(secs + 1)     // Add a second to secs
-		nsecs = -(nsecs - 1E9) // Take that second away from nsecs
-	}
-	return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
-}
-
-// parsePAXRecord parses the input PAX record string into a key-value pair.
-// If parsing is successful, it will slice off the currently read record and
-// return the remainder as r.
-func parsePAXRecord(s string) (k, v, r string, err error) {
-	// The size field ends at the first space.
-	sp := strings.IndexByte(s, ' ')
-	if sp == -1 {
-		return "", "", s, ErrHeader
-	}
-
-	// Parse the first token as a decimal integer.
-	n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
-	if perr != nil || n < 5 || int64(len(s)) < n {
-		return "", "", s, ErrHeader
-	}
-
-	// Extract everything between the space and the final newline.
-	rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
-	if nl != "\n" {
-		return "", "", s, ErrHeader
-	}
-
-	// The first equals separates the key from the value.
-	eq := strings.IndexByte(rec, '=')
-	if eq == -1 {
-		return "", "", s, ErrHeader
-	}
-	k, v = rec[:eq], rec[eq+1:]
-
-	if !validPAXRecord(k, v) {
-		return "", "", s, ErrHeader
-	}
-	return k, v, rem, nil
-}
-
-// formatPAXRecord formats a single PAX record, prefixing it with the
-// appropriate length.
-func formatPAXRecord(k, v string) (string, error) {
-	if !validPAXRecord(k, v) {
-		return "", ErrHeader
-	}
-
-	const padding = 3 // Extra padding for ' ', '=', and '\n'
-	size := len(k) + len(v) + padding
-	size += len(strconv.Itoa(size))
-	record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
-
-	// Final adjustment if adding size field increased the record size.
-	if len(record) != size {
-		size = len(record)
-		record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
-	}
-	return record, nil
-}
-
-// validPAXRecord reports whether the key-value pair is valid where each
-// record is formatted as:
-//	"%d %s=%s\n" % (size, key, value)
-//
-// Keys and values should be UTF-8, but the number of bad writers out there
-// forces us to be a more liberal.
-// Thus, we only reject all keys with NUL, and only reject NULs in values
-// for the PAX version of the USTAR string fields.
-// The key must not contain an '=' character.
-func validPAXRecord(k, v string) bool {
-	if k == "" || strings.IndexByte(k, '=') >= 0 {
-		return false
-	}
-	switch k {
-	case paxPath, paxLinkpath, paxUname, paxGname:
-		return !hasNUL(v)
-	default:
-		return !hasNUL(k)
-	}
-}

+ 0 - 644
vendor/archive/tar/writer.go

@@ -1,644 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"path"
-	"sort"
-	"strings"
-	"time"
-)
-
-// Writer provides sequential writing of a tar archive.
-// Write.WriteHeader begins a new file with the provided Header,
-// and then Writer can be treated as an io.Writer to supply that file's data.
-type Writer struct {
-	w    io.Writer
-	pad  int64      // Amount of padding to write after current file entry
-	curr fileWriter // Writer for current file entry
-	hdr  Header     // Shallow copy of Header that is safe for mutations
-	blk  block      // Buffer to use as temporary local storage
-
-	// err is a persistent error.
-	// It is only the responsibility of every exported method of Writer to
-	// ensure that this error is sticky.
-	err error
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer {
-	return &Writer{w: w, curr: &regFileWriter{w, 0}}
-}
-
-type fileWriter interface {
-	io.Writer
-	fileState
-
-	ReadFrom(io.Reader) (int64, error)
-}
-
-// Flush finishes writing the current file's block padding.
-// The current file must be fully written before Flush can be called.
-//
-// This is unnecessary as the next call to WriteHeader or Close
-// will implicitly flush out the file's padding.
-func (tw *Writer) Flush() error {
-	if tw.err != nil {
-		return tw.err
-	}
-	if nb := tw.curr.LogicalRemaining(); nb > 0 {
-		return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
-	}
-	if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
-		return tw.err
-	}
-	tw.pad = 0
-	return nil
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// The Header.Size determines how many bytes can be written for the next file.
-// If the current file is not fully written, then this returns an error.
-// This implicitly flushes any padding necessary before writing the header.
-func (tw *Writer) WriteHeader(hdr *Header) error {
-	if err := tw.Flush(); err != nil {
-		return err
-	}
-	tw.hdr = *hdr // Shallow copy of Header
-
-	// Round ModTime and ignore AccessTime and ChangeTime unless
-	// the format is explicitly chosen.
-	// This ensures nominal usage of WriteHeader (without specifying the format)
-	// does not always result in the PAX format being chosen, which
-	// causes a 1KiB increase to every header.
-	if tw.hdr.Format == FormatUnknown {
-		tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
-		tw.hdr.AccessTime = time.Time{}
-		tw.hdr.ChangeTime = time.Time{}
-	}
-
-	allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
-	switch {
-	case allowedFormats.has(FormatUSTAR):
-		tw.err = tw.writeUSTARHeader(&tw.hdr)
-		return tw.err
-	case allowedFormats.has(FormatPAX):
-		tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
-		return tw.err
-	case allowedFormats.has(FormatGNU):
-		tw.err = tw.writeGNUHeader(&tw.hdr)
-		return tw.err
-	default:
-		return err // Non-fatal error
-	}
-}
-
-func (tw *Writer) writeUSTARHeader(hdr *Header) error {
-	// Check if we can use USTAR prefix/suffix splitting.
-	var namePrefix string
-	if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
-		namePrefix, hdr.Name = prefix, suffix
-	}
-
-	// Pack the main header.
-	var f formatter
-	blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
-	f.formatString(blk.USTAR().Prefix(), namePrefix)
-	blk.SetFormat(FormatUSTAR)
-	if f.err != nil {
-		return f.err // Should never happen since header is validated
-	}
-	return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
-}
-
-func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
-	realName, realSize := hdr.Name, hdr.Size
-
-	// TODO(dsnet): Re-enable this when adding sparse support.
-	// See https://golang.org/issue/22735
-	/*
-		// Handle sparse files.
-		var spd sparseDatas
-		var spb []byte
-		if len(hdr.SparseHoles) > 0 {
-			sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
-			sph = alignSparseEntries(sph, hdr.Size)
-			spd = invertSparseEntries(sph, hdr.Size)
-
-			// Format the sparse map.
-			hdr.Size = 0 // Replace with encoded size
-			spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
-			for _, s := range spd {
-				hdr.Size += s.Length
-				spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
-				spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
-			}
-			pad := blockPadding(int64(len(spb)))
-			spb = append(spb, zeroBlock[:pad]...)
-			hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
-
-			// Add and modify appropriate PAX records.
-			dir, file := path.Split(realName)
-			hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
-			paxHdrs[paxGNUSparseMajor] = "1"
-			paxHdrs[paxGNUSparseMinor] = "0"
-			paxHdrs[paxGNUSparseName] = realName
-			paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
-			paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
-			delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
-		}
-	*/
-	_ = realSize
-
-	// Write PAX records to the output.
-	isGlobal := hdr.Typeflag == TypeXGlobalHeader
-	if len(paxHdrs) > 0 || isGlobal {
-		// Sort keys for deterministic ordering.
-		var keys []string
-		for k := range paxHdrs {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-
-		// Write each record to a buffer.
-		var buf bytes.Buffer
-		for _, k := range keys {
-			rec, err := formatPAXRecord(k, paxHdrs[k])
-			if err != nil {
-				return err
-			}
-			buf.WriteString(rec)
-		}
-
-		// Write the extended header file.
-		var name string
-		var flag byte
-		if isGlobal {
-			name = realName
-			if name == "" {
-				name = "GlobalHead.0.0"
-			}
-			flag = TypeXGlobalHeader
-		} else {
-			dir, file := path.Split(realName)
-			name = path.Join(dir, "PaxHeaders.0", file)
-			flag = TypeXHeader
-		}
-		data := buf.String()
-		if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
-			return err // Global headers return here
-		}
-	}
-
-	// Pack the main header.
-	var f formatter // Ignore errors since they are expected
-	fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
-	blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
-	blk.SetFormat(FormatPAX)
-	if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
-		return err
-	}
-
-	// TODO(dsnet): Re-enable this when adding sparse support.
-	// See https://golang.org/issue/22735
-	/*
-		// Write the sparse map and setup the sparse writer if necessary.
-		if len(spd) > 0 {
-			// Use tw.curr since the sparse map is accounted for in hdr.Size.
-			if _, err := tw.curr.Write(spb); err != nil {
-				return err
-			}
-			tw.curr = &sparseFileWriter{tw.curr, spd, 0}
-		}
-	*/
-	return nil
-}
-
-func (tw *Writer) writeGNUHeader(hdr *Header) error {
-	// Use long-link files if Name or Linkname exceeds the field size.
-	const longName = "././@LongLink"
-	if len(hdr.Name) > nameSize {
-		data := hdr.Name + "\x00"
-		if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
-			return err
-		}
-	}
-	if len(hdr.Linkname) > nameSize {
-		data := hdr.Linkname + "\x00"
-		if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
-			return err
-		}
-	}
-
-	// Pack the main header.
-	var f formatter // Ignore errors since they are expected
-	var spd sparseDatas
-	var spb []byte
-	blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
-	if !hdr.AccessTime.IsZero() {
-		f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
-	}
-	if !hdr.ChangeTime.IsZero() {
-		f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
-	}
-	// TODO(dsnet): Re-enable this when adding sparse support.
-	// See https://golang.org/issue/22735
-	/*
-		if hdr.Typeflag == TypeGNUSparse {
-			sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
-			sph = alignSparseEntries(sph, hdr.Size)
-			spd = invertSparseEntries(sph, hdr.Size)
-
-			// Format the sparse map.
-			formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
-				for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
-					f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
-					f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
-					sp = sp[1:]
-				}
-				if len(sp) > 0 {
-					sa.IsExtended()[0] = 1
-				}
-				return sp
-			}
-			sp2 := formatSPD(spd, blk.GNU().Sparse())
-			for len(sp2) > 0 {
-				var spHdr block
-				sp2 = formatSPD(sp2, spHdr.Sparse())
-				spb = append(spb, spHdr[:]...)
-			}
-
-			// Update size fields in the header block.
-			realSize := hdr.Size
-			hdr.Size = 0 // Encoded size; does not account for encoded sparse map
-			for _, s := range spd {
-				hdr.Size += s.Length
-			}
-			copy(blk.V7().Size(), zeroBlock[:]) // Reset field
-			f.formatNumeric(blk.V7().Size(), hdr.Size)
-			f.formatNumeric(blk.GNU().RealSize(), realSize)
-		}
-	*/
-	blk.SetFormat(FormatGNU)
-	if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
-		return err
-	}
-
-	// Write the extended sparse map and setup the sparse writer if necessary.
-	if len(spd) > 0 {
-		// Use tw.w since the sparse map is not accounted for in hdr.Size.
-		if _, err := tw.w.Write(spb); err != nil {
-			return err
-		}
-		tw.curr = &sparseFileWriter{tw.curr, spd, 0}
-	}
-	return nil
-}
-
-type (
-	stringFormatter func([]byte, string)
-	numberFormatter func([]byte, int64)
-)
-
-// templateV7Plus fills out the V7 fields of a block using values from hdr.
-// It also fills out fields (uname, gname, devmajor, devminor) that are
-// shared in the USTAR, PAX, and GNU formats using the provided formatters.
-//
-// The block returned is only valid until the next call to
-// templateV7Plus or writeRawFile.
-func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
-	tw.blk.Reset()
-
-	modTime := hdr.ModTime
-	if modTime.IsZero() {
-		modTime = time.Unix(0, 0)
-	}
-
-	v7 := tw.blk.V7()
-	v7.TypeFlag()[0] = hdr.Typeflag
-	fmtStr(v7.Name(), hdr.Name)
-	fmtStr(v7.LinkName(), hdr.Linkname)
-	fmtNum(v7.Mode(), hdr.Mode)
-	fmtNum(v7.UID(), int64(hdr.Uid))
-	fmtNum(v7.GID(), int64(hdr.Gid))
-	fmtNum(v7.Size(), hdr.Size)
-	fmtNum(v7.ModTime(), modTime.Unix())
-
-	ustar := tw.blk.USTAR()
-	fmtStr(ustar.UserName(), hdr.Uname)
-	fmtStr(ustar.GroupName(), hdr.Gname)
-	fmtNum(ustar.DevMajor(), hdr.Devmajor)
-	fmtNum(ustar.DevMinor(), hdr.Devminor)
-
-	return &tw.blk
-}
-
-// writeRawFile writes a minimal file with the given name and flag type.
-// It uses format to encode the header format and will write data as the body.
-// It uses default values for all of the other fields (as BSD and GNU tar does).
-func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
-	tw.blk.Reset()
-
-	// Best effort for the filename.
-	name = toASCII(name)
-	if len(name) > nameSize {
-		name = name[:nameSize]
-	}
-	name = strings.TrimRight(name, "/")
-
-	var f formatter
-	v7 := tw.blk.V7()
-	v7.TypeFlag()[0] = flag
-	f.formatString(v7.Name(), name)
-	f.formatOctal(v7.Mode(), 0)
-	f.formatOctal(v7.UID(), 0)
-	f.formatOctal(v7.GID(), 0)
-	f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
-	f.formatOctal(v7.ModTime(), 0)
-	tw.blk.SetFormat(format)
-	if f.err != nil {
-		return f.err // Only occurs if size condition is violated
-	}
-
-	// Write the header and data.
-	if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
-		return err
-	}
-	_, err := io.WriteString(tw, data)
-	return err
-}
-
-// writeRawHeader writes the value of blk, regardless of its value.
-// It sets up the Writer such that it can accept a file of the given size.
-// If the flag is a special header-only flag, then the size is treated as zero.
-func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
-	if err := tw.Flush(); err != nil {
-		return err
-	}
-	if _, err := tw.w.Write(blk[:]); err != nil {
-		return err
-	}
-	if isHeaderOnlyType(flag) {
-		size = 0
-	}
-	tw.curr = &regFileWriter{tw.w, size}
-	tw.pad = blockPadding(size)
-	return nil
-}
-
-// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
-// If the path is not splittable, then it will return ("", "", false).
-func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
-	length := len(name)
-	if length <= nameSize || !isASCII(name) {
-		return "", "", false
-	} else if length > prefixSize+1 {
-		length = prefixSize + 1
-	} else if name[length-1] == '/' {
-		length--
-	}
-
-	i := strings.LastIndex(name[:length], "/")
-	nlen := len(name) - i - 1 // nlen is length of suffix
-	plen := i                 // plen is length of prefix
-	if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
-		return "", "", false
-	}
-	return name[:i], name[i+1:], true
-}
-
-// Write writes to the current file in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// Header.Size bytes are written after WriteHeader.
-//
-// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
-// of what the Header.Size claims.
-func (tw *Writer) Write(b []byte) (int, error) {
-	if tw.err != nil {
-		return 0, tw.err
-	}
-	n, err := tw.curr.Write(b)
-	if err != nil && err != ErrWriteTooLong {
-		tw.err = err
-	}
-	return n, err
-}
-
-// readFrom populates the content of the current file by reading from r.
-// The bytes read must match the number of remaining bytes in the current file.
-//
-// If the current file is sparse and r is an io.ReadSeeker,
-// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
-// assuming that skipped regions are all NULs.
-// This always reads the last byte to ensure r is the right size.
-//
-// TODO(dsnet): Re-export this when adding sparse file support.
-// See https://golang.org/issue/22735
-func (tw *Writer) readFrom(r io.Reader) (int64, error) {
-	if tw.err != nil {
-		return 0, tw.err
-	}
-	n, err := tw.curr.ReadFrom(r)
-	if err != nil && err != ErrWriteTooLong {
-		tw.err = err
-	}
-	return n, err
-}
-
-// Close closes the tar archive by flushing the padding, and writing the footer.
-// If the current file (from a prior call to WriteHeader) is not fully written,
-// then this returns an error.
-func (tw *Writer) Close() error {
-	if tw.err == ErrWriteAfterClose {
-		return nil
-	}
-	if tw.err != nil {
-		return tw.err
-	}
-
-	// Trailer: two zero blocks.
-	err := tw.Flush()
-	for i := 0; i < 2 && err == nil; i++ {
-		_, err = tw.w.Write(zeroBlock[:])
-	}
-
-	// Ensure all future actions are invalid.
-	tw.err = ErrWriteAfterClose
-	return err // Report IO errors
-}
-
-// regFileWriter is a fileWriter for writing data to a regular file entry.
-type regFileWriter struct {
-	w  io.Writer // Underlying Writer
-	nb int64     // Number of remaining bytes to write
-}
-
-func (fw *regFileWriter) Write(b []byte) (n int, err error) {
-	overwrite := int64(len(b)) > fw.nb
-	if overwrite {
-		b = b[:fw.nb]
-	}
-	if len(b) > 0 {
-		n, err = fw.w.Write(b)
-		fw.nb -= int64(n)
-	}
-	switch {
-	case err != nil:
-		return n, err
-	case overwrite:
-		return n, ErrWriteTooLong
-	default:
-		return n, nil
-	}
-}
-
-func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
-	return io.Copy(struct{ io.Writer }{fw}, r)
-}
-
-func (fw regFileWriter) LogicalRemaining() int64 {
-	return fw.nb
-}
-func (fw regFileWriter) PhysicalRemaining() int64 {
-	return fw.nb
-}
-
-// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
-type sparseFileWriter struct {
-	fw  fileWriter  // Underlying fileWriter
-	sp  sparseDatas // Normalized list of data fragments
-	pos int64       // Current position in sparse file
-}
-
-func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
-	overwrite := int64(len(b)) > sw.LogicalRemaining()
-	if overwrite {
-		b = b[:sw.LogicalRemaining()]
-	}
-
-	b0 := b
-	endPos := sw.pos + int64(len(b))
-	for endPos > sw.pos && err == nil {
-		var nf int // Bytes written in fragment
-		dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
-		if sw.pos < dataStart { // In a hole fragment
-			bf := b[:min(int64(len(b)), dataStart-sw.pos)]
-			nf, err = zeroWriter{}.Write(bf)
-		} else { // In a data fragment
-			bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
-			nf, err = sw.fw.Write(bf)
-		}
-		b = b[nf:]
-		sw.pos += int64(nf)
-		if sw.pos >= dataEnd && len(sw.sp) > 1 {
-			sw.sp = sw.sp[1:] // Ensure last fragment always remains
-		}
-	}
-
-	n = len(b0) - len(b)
-	switch {
-	case err == ErrWriteTooLong:
-		return n, errMissData // Not possible; implies bug in validation logic
-	case err != nil:
-		return n, err
-	case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
-		return n, errUnrefData // Not possible; implies bug in validation logic
-	case overwrite:
-		return n, ErrWriteTooLong
-	default:
-		return n, nil
-	}
-}
-
-func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
-	rs, ok := r.(io.ReadSeeker)
-	if ok {
-		if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
-			ok = false // Not all io.Seeker can really seek
-		}
-	}
-	if !ok {
-		return io.Copy(struct{ io.Writer }{sw}, r)
-	}
-
-	var readLastByte bool
-	pos0 := sw.pos
-	for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
-		var nf int64 // Size of fragment
-		dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
-		if sw.pos < dataStart { // In a hole fragment
-			nf = dataStart - sw.pos
-			if sw.PhysicalRemaining() == 0 {
-				readLastByte = true
-				nf--
-			}
-			_, err = rs.Seek(nf, io.SeekCurrent)
-		} else { // In a data fragment
-			nf = dataEnd - sw.pos
-			nf, err = io.CopyN(sw.fw, rs, nf)
-		}
-		sw.pos += nf
-		if sw.pos >= dataEnd && len(sw.sp) > 1 {
-			sw.sp = sw.sp[1:] // Ensure last fragment always remains
-		}
-	}
-
-	// If the last fragment is a hole, then seek to 1-byte before EOF, and
-	// read a single byte to ensure the file is the right size.
-	if readLastByte && err == nil {
-		_, err = mustReadFull(rs, []byte{0})
-		sw.pos++
-	}
-
-	n = sw.pos - pos0
-	switch {
-	case err == io.EOF:
-		return n, io.ErrUnexpectedEOF
-	case err == ErrWriteTooLong:
-		return n, errMissData // Not possible; implies bug in validation logic
-	case err != nil:
-		return n, err
-	case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
-		return n, errUnrefData // Not possible; implies bug in validation logic
-	default:
-		return n, ensureEOF(rs)
-	}
-}
-
-func (sw sparseFileWriter) LogicalRemaining() int64 {
-	return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
-}
-func (sw sparseFileWriter) PhysicalRemaining() int64 {
-	return sw.fw.PhysicalRemaining()
-}
-
-// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
-type zeroWriter struct{}
-
-func (zeroWriter) Write(b []byte) (int, error) {
-	for i, c := range b {
-		if c != 0 {
-			return i, errWriteHole
-		}
-	}
-	return len(b), nil
-}
-
-// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
-func ensureEOF(r io.Reader) error {
-	n, err := tryReadFull(r, []byte{0})
-	switch {
-	case n > 0:
-		return ErrWriteTooLong
-	case err == io.EOF:
-		return nil
-	default:
-		return err
-	}
-}

+ 29 - 5
vendor/github.com/beorn7/perks/quantile/stream.go

@@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
 // is guaranteed to be within (Quantile±Epsilon).
 //
 // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
 	ƒ := func(s *stream, r float64) float64 {
 		var m = math.MaxFloat64
 		var f float64
-		for quantile, epsilon := range targets {
-			if quantile*s.n <= r {
-				f = (2 * epsilon * r) / quantile
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
 			} else {
-				f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
 			}
 			if f < m {
 				m = f
@@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
 	return newStream(ƒ)
 }
 
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
 // Stream computes quantiles for a stream of float64s. It is not thread-safe by
 // design. Take care when using across multiple goroutines.
 type Stream struct {

+ 33 - 7
vendor/github.com/coreos/etcd/README.md

@@ -1,9 +1,12 @@
 # etcd
 
-[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd)](https://goreportcard.com/report/github.com/coreos/etcd)
-[![Build Status](https://travis-ci.org/coreos/etcd.svg?branch=master)](https://travis-ci.org/coreos/etcd)
-[![Build Status](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
-[![Docker Repository on Quay.io](https://quay.io/repository/coreos/etcd-git/status "Docker Repository on Quay.io")](https://quay.io/repository/coreos/etcd-git)
+[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd?style=flat-square)](https://goreportcard.com/report/github.com/coreos/etcd)
+[![Coverage](https://codecov.io/gh/coreos/etcd/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/etcd)
+[![Build Status Travis](https://img.shields.io/travis/coreos/etcdlabs.svg?style=flat-square&&branch=master)](https://travis-ci.org/coreos/etcd)
+[![Build Status Semaphore](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
+[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd)
+[![Releases](https://img.shields.io/github/release/coreos/etcd/all.svg?style=flat-square)](https://github.com/coreos/etcd/releases)
+[![LICENSE](https://img.shields.io/github/license/coreos/etcd.svg?style=flat-square)](https://github.com/coreos/etcd/blob/master/LICENSE)
 
 **Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
 
@@ -33,13 +36,21 @@ See [etcdctl][etcdctl] for a simple command line client.
 [etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
 [etcd-tests]: http://dash.etcd.io
 
+## Community meetings
+
+etcd contributors and maintainers have bi-weekly meetings at 11:00 AM (USA Pacific) on Tuesdays. There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics). Anyone is welcome to join via [Zoom][zoom] or audio-only: +1 669 900 6833. An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
+
+[rfc5545]: https://tools.ietf.org/html/rfc5545
+[zoom]: https://coreos.zoom.us/j/854793406
+[shared-meeting-notes]: https://docs.google.com/document/d/1DbVXOHvd9scFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit#
+
 ## Getting started
 
 ### Getting etcd
 
 The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
 
-For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.8+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
+For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.9+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
 
 [rkt]: https://github.com/rkt/rkt/releases/
 [github-release]: https://github.com/coreos/etcd/releases/
@@ -48,7 +59,22 @@ For those wanting to try the very latest version, [build the latest version of e
 
 ### Running etcd
 
-First start a single-member cluster of etcd:
+First start a single-member cluster of etcd.
+
+If etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below:
+
+```sh
+/tmp/etcd-download-test/etcd
+```
+The etcd command can be simply run as such if it is moved to the system path as below:
+
+```sh
+mv /tmp/etcd-download-test/etcd /usr/locale/bin/
+
+etcd
+```
+
+If etcd is [build from the master branch][dl-build], run it as below:
 
 ```sh
 ./bin/etcd
@@ -87,7 +113,7 @@ Our [Procfile script](./Procfile) will set up a local example cluster. Start it
 goreman start
 ```
 
-This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
+This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd `grpc-proxy`, which runs locally and composes a cluster.
 
 Every cluster member and proxy accepts key value reads and key value writes.
 

+ 1 - 1
vendor/github.com/coreos/etcd/client/README.md

@@ -25,8 +25,8 @@ package main
 import (
 	"log"
 	"time"
+	"context"
 
-	"golang.org/x/net/context"
 	"github.com/coreos/etcd/client"
 )
 

+ 1 - 2
vendor/github.com/coreos/etcd/client/auth_role.go

@@ -16,11 +16,10 @@ package client
 
 import (
 	"bytes"
+	"context"
 	"encoding/json"
 	"net/http"
 	"net/url"
-
-	"golang.org/x/net/context"
 )
 
 type Role struct {

+ 1 - 2
vendor/github.com/coreos/etcd/client/auth_user.go

@@ -16,12 +16,11 @@ package client
 
 import (
 	"bytes"
+	"context"
 	"encoding/json"
 	"net/http"
 	"net/url"
 	"path"
-
-	"golang.org/x/net/context"
 )
 
 var (

+ 20 - 13
vendor/github.com/coreos/etcd/client/client.go

@@ -15,6 +15,7 @@
 package client
 
 import (
+	"context"
 	"encoding/json"
 	"errors"
 	"fmt"
@@ -29,8 +30,6 @@ import (
 	"time"
 
 	"github.com/coreos/etcd/version"
-
-	"golang.org/x/net/context"
 )
 
 var (
@@ -372,12 +371,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
 			if err == context.Canceled || err == context.DeadlineExceeded {
 				return nil, nil, err
 			}
-			if isOneShot {
-				return nil, nil, err
-			}
-			continue
-		}
-		if resp.StatusCode/100 == 5 {
+		} else if resp.StatusCode/100 == 5 {
 			switch resp.StatusCode {
 			case http.StatusInternalServerError, http.StatusServiceUnavailable:
 				// TODO: make sure this is a no leader response
@@ -385,10 +379,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
 			default:
 				cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
 			}
-			if isOneShot {
-				return nil, nil, cerr.Errors[0]
+			err = cerr.Errors[0]
+		}
+		if err != nil {
+			if !isOneShot {
+				continue
 			}
-			continue
+			c.Lock()
+			c.pinned = (k + 1) % leps
+			c.Unlock()
+			return nil, nil, err
 		}
 		if k != pinned {
 			c.Lock()
@@ -670,8 +670,15 @@ func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
 }
 
 func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
-	p := r.Perm(len(eps))
-	neps := make([]url.URL, len(eps))
+	// copied from Go 1.9<= rand.Rand.Perm
+	n := len(eps)
+	p := make([]int, n)
+	for i := 0; i < n; i++ {
+		j := r.Intn(i + 1)
+		p[i] = p[j]
+		p[j] = i
+	}
+	neps := make([]url.URL, n)
 	for i, k := range p {
 		neps[i] = eps[k]
 	}

+ 2 - 2
vendor/github.com/coreos/etcd/client/doc.go

@@ -19,9 +19,9 @@ Create a Config and exchange it for a Client:
 
 	import (
 		"net/http"
+		"context"
 
 		"github.com/coreos/etcd/client"
-		"golang.org/x/net/context"
 	)
 
 	cfg := client.Config{
@@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations:
 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 	defer cancel()
 
-	// set a new key, ignoring it's previous state
+	// set a new key, ignoring its previous state
 	_, err := kAPI.Set(ctx, "/ping", "pong", nil)
 	if err != nil {
 		if err == context.DeadlineExceeded {

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 4243 - 299
vendor/github.com/coreos/etcd/client/keys.generated.go


+ 2 - 3
vendor/github.com/coreos/etcd/client/keys.go

@@ -17,6 +17,7 @@ package client
 //go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
 
 import (
+	"context"
 	"encoding/json"
 	"errors"
 	"fmt"
@@ -28,7 +29,6 @@ import (
 
 	"github.com/coreos/etcd/pkg/pathutil"
 	"github.com/ugorji/go/codec"
-	"golang.org/x/net/context"
 )
 
 const (
@@ -653,8 +653,7 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp
 	default:
 		err = unmarshalFailedKeysResponse(body)
 	}
-
-	return
+	return res, err
 }
 
 func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {

+ 2 - 3
vendor/github.com/coreos/etcd/client/members.go

@@ -16,14 +16,13 @@ package client
 
 import (
 	"bytes"
+	"context"
 	"encoding/json"
 	"fmt"
 	"net/http"
 	"net/url"
 	"path"
 
-	"golang.org/x/net/context"
-
 	"github.com/coreos/etcd/pkg/types"
 )
 
@@ -44,7 +43,7 @@ type Member struct {
 	PeerURLs []string `json:"peerURLs"`
 
 	// ClientURLs represents the HTTP(S) endpoints on which this Member
-	// serves it's client-facing APIs.
+	// serves its client-facing APIs.
 	ClientURLs []string `json:"clientURLs"`
 }
 

+ 1 - 1
vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go

@@ -121,5 +121,5 @@ func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.O
 			err = syscall.EINVAL
 		}
 	}
-	return
+	return err
 }

+ 22 - 0
vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go

@@ -30,6 +30,8 @@ func preallocExtend(f *os.File, sizeInBytes int64) error {
 }
 
 func preallocFixed(f *os.File, sizeInBytes int64) error {
+	// allocate all requested space or no space at all
+	// TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
 	fstore := &syscall.Fstore_t{
 		Flags:   syscall.F_ALLOCATEALL,
 		Posmode: syscall.F_PEOFPOSMODE,
@@ -39,5 +41,25 @@ func preallocFixed(f *os.File, sizeInBytes int64) error {
 	if errno == 0 || errno == syscall.ENOTSUP {
 		return nil
 	}
+
+	// wrong argument to fallocate syscall
+	if errno == syscall.EINVAL {
+		// filesystem "st_blocks" are allocated in the units of
+		// "Allocation Block Size" (run "diskutil info /" command)
+		var stat syscall.Stat_t
+		syscall.Fstat(int(f.Fd()), &stat)
+
+		// syscall.Statfs_t.Bsize is "optimal transfer block size"
+		// and contains matching 4096 value when latest OS X kernel
+		// supports 4,096 KB filesystem block size
+		var statfs syscall.Statfs_t
+		syscall.Fstatfs(int(f.Fd()), &statfs)
+		blockSize := int64(statfs.Bsize)
+
+		if stat.Blocks*blockSize >= sizeInBytes {
+			// enough blocks are already allocated
+			return nil
+		}
+	}
 	return errno
 }

+ 2 - 1
vendor/github.com/coreos/etcd/pkg/srv/srv.go

@@ -71,9 +71,10 @@ func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error)
 			// SRV records have a trailing dot but URL shouldn't.
 			shortHost := strings.TrimSuffix(srv.Target, ".")
 			urlHost := net.JoinHostPort(shortHost, port)
-			stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
 			if ok && url.Scheme != scheme {
 				err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
+			} else {
+				stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
 			}
 		}
 		if len(stringParts) == 0 {

+ 2 - 2
vendor/github.com/coreos/etcd/pkg/types/set.go

@@ -61,7 +61,7 @@ func (us *unsafeSet) Remove(value string) {
 // Contains returns whether the set contains the given value
 func (us *unsafeSet) Contains(value string) (exists bool) {
 	_, exists = us.d[value]
-	return
+	return exists
 }
 
 // ContainsAll returns whether the set contains all given values
@@ -94,7 +94,7 @@ func (us *unsafeSet) Values() (values []string) {
 	for val := range us.d {
 		values = append(values, val)
 	}
-	return
+	return values
 }
 
 // Copy creates a new Set containing the values of the first

+ 6 - 6
vendor/github.com/coreos/etcd/raft/README.md

@@ -25,12 +25,12 @@ This raft implementation is a full feature implementation of Raft protocol. Feat
 - Membership changes
 - Leadership transfer extension
 - Efficient linearizable read-only queries served by both the leader and followers
- - leader checks with quorum and bypasses Raft log before processing read-only queries
- - followers asks leader to get a safe read index before processing read-only queries
+  - leader checks with quorum and bypasses Raft log before processing read-only queries
+  - followers asks leader to get a safe read index before processing read-only queries
 - More efficient lease-based linearizable read-only queries served by both the leader and followers
- - leader bypasses Raft log and processing read-only queries locally
- - followers asks leader to get a safe read index before processing read-only queries
- - this approach relies on the clock of the all the machines in raft group
+  - leader bypasses Raft log and processing read-only queries locally
+  - followers asks leader to get a safe read index before processing read-only queries
+  - this approach relies on the clock of the all the machines in raft group
 
 This raft implementation also includes a few optional enhancements:
 
@@ -112,7 +112,7 @@ After creating a Node, the user has a few responsibilities:
 
 First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
 
-1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
 
 2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
 

+ 4 - 2
vendor/github.com/coreos/etcd/raft/node.go

@@ -15,10 +15,10 @@
 package raft
 
 import (
+	"context"
 	"errors"
 
 	pb "github.com/coreos/etcd/raft/raftpb"
-	"golang.org/x/net/context"
 )
 
 type SnapshotStatus int
@@ -319,7 +319,7 @@ func (n *node) run(r *raft) {
 			r.Step(m)
 		case m := <-n.recvc:
 			// filter out response message from unknown From.
-			if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m.Type) {
+			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
 				r.Step(m) // raft never returns an error
 			}
 		case cc := <-n.confc:
@@ -334,6 +334,8 @@ func (n *node) run(r *raft) {
 			switch cc.Type {
 			case pb.ConfChangeAddNode:
 				r.addNode(cc.NodeID)
+			case pb.ConfChangeAddLearnerNode:
+				r.addLearner(cc.NodeID)
 			case pb.ConfChangeRemoveNode:
 				// block incoming proposal when local node is
 				// removed

+ 6 - 1
vendor/github.com/coreos/etcd/raft/progress.go

@@ -48,6 +48,7 @@ type Progress struct {
 	// When in ProgressStateSnapshot, leader should have sent out snapshot
 	// before and stops sending any replication message.
 	State ProgressStateType
+
 	// Paused is used in ProgressStateProbe.
 	// When Paused is true, raft should pause sending replication message to this peer.
 	Paused bool
@@ -76,6 +77,9 @@ type Progress struct {
 	// be freed by calling inflights.freeTo with the index of the last
 	// received entry.
 	ins *inflights
+
+	// IsLearner is true if this progress is tracked for a learner.
+	IsLearner bool
 }
 
 func (pr *Progress) resetState(state ProgressStateType) {
@@ -243,7 +247,8 @@ func (in *inflights) freeTo(to uint64) {
 		return
 	}
 
-	i, idx := 0, in.start
+	idx := in.start
+	var i int
 	for i = 0; i < in.count; i++ {
 		if to < in.buffer[idx] { // found the first large inflight
 			break

+ 212 - 63
vendor/github.com/coreos/etcd/raft/raft.go

@@ -116,6 +116,10 @@ type Config struct {
 	// used for testing right now.
 	peers []uint64
 
+	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
+	// learners only receives entries from the leader node. It does not vote or promote itself.
+	learners []uint64
+
 	// ElectionTick is the number of Node.Tick invocations that must pass between
 	// elections. That is, if a follower does not receive any message from the
 	// leader of current term before ElectionTick has elapsed, it will become
@@ -171,11 +175,22 @@ type Config struct {
 	// If the clock drift is unbounded, leader might keep the lease longer than it
 	// should (clock can move backward/pause without any bound). ReadIndex is not safe
 	// in that case.
+	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
 	ReadOnlyOption ReadOnlyOption
 
 	// Logger is the logger used for raft log. For multinode which can host
 	// multiple raft group, each raft group can have its own logger
 	Logger Logger
+
+	// DisableProposalForwarding set to true means that followers will drop
+	// proposals, rather than forwarding them to the leader. One use case for
+	// this feature would be in a situation where the Raft leader is used to
+	// compute the data of a proposal, for example, adding a timestamp from a
+	// hybrid logical clock to data in a monotonically increasing way. Forwarding
+	// should be disabled to prevent a follower with an innaccurate hybrid
+	// logical clock from assigning the timestamp and then forwarding the data
+	// to the leader.
+	DisableProposalForwarding bool
 }
 
 func (c *Config) validate() error {
@@ -203,6 +218,10 @@ func (c *Config) validate() error {
 		c.Logger = raftLogger
 	}
 
+	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+	}
+
 	return nil
 }
 
@@ -220,9 +239,13 @@ type raft struct {
 	maxInflight int
 	maxMsgSize  uint64
 	prs         map[uint64]*Progress
+	learnerPrs  map[uint64]*Progress
 
 	state StateType
 
+	// isLearner is true if the local raft node is a learner.
+	isLearner bool
+
 	votes map[uint64]bool
 
 	msgs []pb.Message
@@ -256,6 +279,7 @@ type raft struct {
 	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
 	// when raft changes its state to follower or candidate.
 	randomizedElectionTimeout int
+	disableProposalForwarding bool
 
 	tick func()
 	step stepFunc
@@ -273,32 +297,47 @@ func newRaft(c *Config) *raft {
 		panic(err) // TODO(bdarnell)
 	}
 	peers := c.peers
-	if len(cs.Nodes) > 0 {
-		if len(peers) > 0 {
+	learners := c.learners
+	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
+		if len(peers) > 0 || len(learners) > 0 {
 			// TODO(bdarnell): the peers argument is always nil except in
 			// tests; the argument should be removed and these tests should be
 			// updated to specify their nodes through a snapshot.
-			panic("cannot specify both newRaft(peers) and ConfState.Nodes)")
+			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
 		}
 		peers = cs.Nodes
+		learners = cs.Learners
 	}
 	r := &raft{
-		id:               c.ID,
-		lead:             None,
-		raftLog:          raftlog,
-		maxMsgSize:       c.MaxSizePerMsg,
-		maxInflight:      c.MaxInflightMsgs,
-		prs:              make(map[uint64]*Progress),
-		electionTimeout:  c.ElectionTick,
-		heartbeatTimeout: c.HeartbeatTick,
-		logger:           c.Logger,
-		checkQuorum:      c.CheckQuorum,
-		preVote:          c.PreVote,
-		readOnly:         newReadOnly(c.ReadOnlyOption),
+		id:                        c.ID,
+		lead:                      None,
+		isLearner:                 false,
+		raftLog:                   raftlog,
+		maxMsgSize:                c.MaxSizePerMsg,
+		maxInflight:               c.MaxInflightMsgs,
+		prs:                       make(map[uint64]*Progress),
+		learnerPrs:                make(map[uint64]*Progress),
+		electionTimeout:           c.ElectionTick,
+		heartbeatTimeout:          c.HeartbeatTick,
+		logger:                    c.Logger,
+		checkQuorum:               c.CheckQuorum,
+		preVote:                   c.PreVote,
+		readOnly:                  newReadOnly(c.ReadOnlyOption),
+		disableProposalForwarding: c.DisableProposalForwarding,
 	}
 	for _, p := range peers {
 		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
 	}
+	for _, p := range learners {
+		if _, ok := r.prs[p]; ok {
+			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
+		}
+		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
+		if r.id == p {
+			r.isLearner = true
+		}
+	}
+
 	if !isHardStateEqual(hs, emptyState) {
 		r.loadState(hs)
 	}
@@ -332,10 +371,13 @@ func (r *raft) hardState() pb.HardState {
 func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
 
 func (r *raft) nodes() []uint64 {
-	nodes := make([]uint64, 0, len(r.prs))
+	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
 	for id := range r.prs {
 		nodes = append(nodes, id)
 	}
+	for id := range r.learnerPrs {
+		nodes = append(nodes, id)
+	}
 	sort.Sort(uint64Slice(nodes))
 	return nodes
 }
@@ -343,10 +385,20 @@ func (r *raft) nodes() []uint64 {
 // send persists state to stable storage and then sends to its mailbox.
 func (r *raft) send(m pb.Message) {
 	m.From = r.id
-	if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
 		if m.Term == 0 {
-			// PreVote RPCs are sent at a term other than our actual term, so the code
-			// that sends these messages is responsible for setting the term.
+			// All {pre-,}campaign messages need to have the term set when
+			// sending.
+			// - MsgVote: m.Term is the term the node is campaigning for,
+			//   non-zero as we increment the term when campaigning.
+			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+			//   granted, non-zero for the same reason MsgVote is
+			// - MsgPreVote: m.Term is the term the node will campaign,
+			//   non-zero as we use m.Term to indicate the next term we'll be
+			//   campaigning for
+			// - MsgPreVoteResp: m.Term is the term received in the original
+			//   MsgPreVote if the pre-vote was granted, non-zero for the
+			//   same reasons MsgPreVote is
 			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
 		}
 	} else {
@@ -364,9 +416,17 @@ func (r *raft) send(m pb.Message) {
 	r.msgs = append(r.msgs, m)
 }
 
+func (r *raft) getProgress(id uint64) *Progress {
+	if pr, ok := r.prs[id]; ok {
+		return pr
+	}
+
+	return r.learnerPrs[id]
+}
+
 // sendAppend sends RPC, with entries to the given peer.
 func (r *raft) sendAppend(to uint64) {
-	pr := r.prs[to]
+	pr := r.getProgress(to)
 	if pr.IsPaused() {
 		return
 	}
@@ -431,7 +491,7 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
 	// or it might not have all the committed entries.
 	// The leader MUST NOT forward the follower's commit to
 	// an unmatched index.
-	commit := min(r.prs[to].Match, r.raftLog.committed)
+	commit := min(r.getProgress(to).Match, r.raftLog.committed)
 	m := pb.Message{
 		To:      to,
 		Type:    pb.MsgHeartbeat,
@@ -442,15 +502,26 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
 	r.send(m)
 }
 
+func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
+	for id, pr := range r.prs {
+		f(id, pr)
+	}
+
+	for id, pr := range r.learnerPrs {
+		f(id, pr)
+	}
+}
+
 // bcastAppend sends RPC, with entries to all peers that are not up-to-date
 // according to the progress recorded in r.prs.
 func (r *raft) bcastAppend() {
-	for id := range r.prs {
+	r.forEachProgress(func(id uint64, _ *Progress) {
 		if id == r.id {
-			continue
+			return
 		}
+
 		r.sendAppend(id)
-	}
+	})
 }
 
 // bcastHeartbeat sends RPC, without entries to all the peers.
@@ -464,12 +535,12 @@ func (r *raft) bcastHeartbeat() {
 }
 
 func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
-	for id := range r.prs {
+	r.forEachProgress(func(id uint64, _ *Progress) {
 		if id == r.id {
-			continue
+			return
 		}
 		r.sendHeartbeat(id, ctx)
-	}
+	})
 }
 
 // maybeCommit attempts to advance the commit index. Returns true if
@@ -478,8 +549,8 @@ func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
 func (r *raft) maybeCommit() bool {
 	// TODO(bmizerany): optimize.. Currently naive
 	mis := make(uint64Slice, 0, len(r.prs))
-	for id := range r.prs {
-		mis = append(mis, r.prs[id].Match)
+	for _, p := range r.prs {
+		mis = append(mis, p.Match)
 	}
 	sort.Sort(sort.Reverse(mis))
 	mci := mis[r.quorum()-1]
@@ -500,12 +571,13 @@ func (r *raft) reset(term uint64) {
 	r.abortLeaderTransfer()
 
 	r.votes = make(map[uint64]bool)
-	for id := range r.prs {
-		r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)}
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
 		if id == r.id {
-			r.prs[id].Match = r.raftLog.lastIndex()
+			pr.Match = r.raftLog.lastIndex()
 		}
-	}
+	})
+
 	r.pendingConf = false
 	r.readOnly = newReadOnly(r.readOnly.option)
 }
@@ -517,7 +589,7 @@ func (r *raft) appendEntry(es ...pb.Entry) {
 		es[i].Index = li + 1 + uint64(i)
 	}
 	r.raftLog.append(es...)
-	r.prs[r.id].maybeUpdate(r.raftLog.lastIndex())
+	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
 	// Regardless of maybeCommit's return, our caller will call bcastAppend.
 	r.maybeCommit()
 }
@@ -589,6 +661,7 @@ func (r *raft) becomePreCandidate() {
 	// but doesn't change anything else. In particular it does not increase
 	// r.Term or change r.Vote.
 	r.step = stepCandidate
+	r.votes = make(map[uint64]bool)
 	r.tick = r.tickElection
 	r.state = StatePreCandidate
 	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
@@ -682,7 +755,6 @@ func (r *raft) Step(m pb.Message) error {
 	case m.Term == 0:
 		// local message
 	case m.Term > r.Term:
-		lead := m.From
 		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
 			force := bytes.Equal(m.Context, []byte(campaignTransfer))
 			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
@@ -693,7 +765,6 @@ func (r *raft) Step(m pb.Message) error {
 					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
 				return nil
 			}
-			lead = None
 		}
 		switch {
 		case m.Type == pb.MsgPreVote:
@@ -707,7 +778,11 @@ func (r *raft) Step(m pb.Message) error {
 		default:
 			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
 				r.id, r.Term, m.Type, m.From, m.Term)
-			r.becomeFollower(m.Term, lead)
+			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+				r.becomeFollower(m.Term, m.From)
+			} else {
+				r.becomeFollower(m.Term, None)
+			}
 		}
 
 	case m.Term < r.Term:
@@ -757,12 +832,27 @@ func (r *raft) Step(m pb.Message) error {
 		}
 
 	case pb.MsgVote, pb.MsgPreVote:
+		if r.isLearner {
+			// TODO: learner may need to vote, in case of node down when confchange.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			return nil
+		}
 		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
 		// always equal r.Term.
 		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
 			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
 				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)})
+			// When responding to Msg{Pre,}Vote messages we include the term
+			// from the message, not the local term. To see why consider the
+			// case where a single node was previously partitioned away and
+			// it's local term is now of date. If we include the local term
+			// (recall that for pre-votes we don't update the local term), the
+			// (pre-)campaigning node on the other end will proceed to ignore
+			// the message (it ignores all out of date messages).
+			// The term in the original message and current local term are the
+			// same in the case of regular votes, but different for pre-votes.
+			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
 			if m.Type == pb.MsgVote {
 				// Only record real votes.
 				r.electionElapsed = 0
@@ -771,7 +861,7 @@ func (r *raft) Step(m pb.Message) error {
 		} else {
 			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
 				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true})
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
 		}
 
 	default:
@@ -836,10 +926,7 @@ func stepLeader(r *raft, m pb.Message) {
 				r.readOnly.addRequest(r.raftLog.committed, m)
 				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
 			case ReadOnlyLeaseBased:
-				var ri uint64
-				if r.checkQuorum {
-					ri = r.raftLog.committed
-				}
+				ri := r.raftLog.committed
 				if m.From == None || m.From == r.id { // from local member
 					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
 				} else {
@@ -854,8 +941,8 @@ func stepLeader(r *raft, m pb.Message) {
 	}
 
 	// All other message types require a progress for m.From (pr).
-	pr, prOk := r.prs[m.From]
-	if !prOk {
+	pr := r.getProgress(m.From)
+	if pr == nil {
 		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
 		return
 	}
@@ -954,6 +1041,10 @@ func stepLeader(r *raft, m pb.Message) {
 		}
 		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
 	case pb.MsgTransferLeader:
+		if pr.IsLearner {
+			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+			return
+		}
 		leadTransferee := m.From
 		lastLeadTransferee := r.leadTransferee
 		if lastLeadTransferee != None {
@@ -1033,6 +1124,9 @@ func stepFollower(r *raft, m pb.Message) {
 		if r.lead == None {
 			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
 			return
+		} else if r.disableProposalForwarding {
+			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+			return
 		}
 		m.To = r.lead
 		r.send(m)
@@ -1127,20 +1221,37 @@ func (r *raft) restore(s pb.Snapshot) bool {
 		return false
 	}
 
+	// The normal peer can't become learner.
+	if !r.isLearner {
+		for _, id := range s.Metadata.ConfState.Learners {
+			if id == r.id {
+				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
+				return false
+			}
+		}
+	}
+
 	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
 		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
 
 	r.raftLog.restore(s)
 	r.prs = make(map[uint64]*Progress)
-	for _, n := range s.Metadata.ConfState.Nodes {
+	r.learnerPrs = make(map[uint64]*Progress)
+	r.restoreNode(s.Metadata.ConfState.Nodes, false)
+	r.restoreNode(s.Metadata.ConfState.Learners, true)
+	return true
+}
+
+func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
+	for _, n := range nodes {
 		match, next := uint64(0), r.raftLog.lastIndex()+1
 		if n == r.id {
 			match = next - 1
+			r.isLearner = isLearner
 		}
-		r.setProgress(n, match, next)
-		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n])
+		r.setProgress(n, match, next, isLearner)
+		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
 	}
-	return true
 }
 
 // promotable indicates whether state machine can be promoted to leader,
@@ -1151,18 +1262,46 @@ func (r *raft) promotable() bool {
 }
 
 func (r *raft) addNode(id uint64) {
+	r.addNodeOrLearnerNode(id, false)
+}
+
+func (r *raft) addLearner(id uint64) {
+	r.addNodeOrLearnerNode(id, true)
+}
+
+func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
 	r.pendingConf = false
-	if _, ok := r.prs[id]; ok {
-		// Ignore any redundant addNode calls (which can happen because the
-		// initial bootstrapping entries are applied twice).
-		return
+	pr := r.getProgress(id)
+	if pr == nil {
+		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
+	} else {
+		if isLearner && !pr.IsLearner {
+			// can only change Learner to Voter
+			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
+			return
+		}
+
+		if isLearner == pr.IsLearner {
+			// Ignore any redundant addNode calls (which can happen because the
+			// initial bootstrapping entries are applied twice).
+			return
+		}
+
+		// change Learner to Voter, use origin Learner progress
+		delete(r.learnerPrs, id)
+		pr.IsLearner = false
+		r.prs[id] = pr
+	}
+
+	if r.id == id {
+		r.isLearner = isLearner
 	}
 
-	r.setProgress(id, 0, r.raftLog.lastIndex()+1)
 	// When a node is first added, we should mark it as recently active.
 	// Otherwise, CheckQuorum may cause us to step down if it is invoked
 	// before the added node has a chance to communicate with us.
-	r.prs[id].RecentActive = true
+	pr = r.getProgress(id)
+	pr.RecentActive = true
 }
 
 func (r *raft) removeNode(id uint64) {
@@ -1170,7 +1309,7 @@ func (r *raft) removeNode(id uint64) {
 	r.pendingConf = false
 
 	// do not try to commit or abort transferring if there is no nodes in the cluster.
-	if len(r.prs) == 0 {
+	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
 		return
 	}
 
@@ -1187,12 +1326,22 @@ func (r *raft) removeNode(id uint64) {
 
 func (r *raft) resetPendingConf() { r.pendingConf = false }
 
-func (r *raft) setProgress(id, match, next uint64) {
-	r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
+	if !isLearner {
+		delete(r.learnerPrs, id)
+		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+		return
+	}
+
+	if _, ok := r.prs[id]; ok {
+		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
+	}
+	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
 }
 
 func (r *raft) delProgress(id uint64) {
 	delete(r.prs, id)
+	delete(r.learnerPrs, id)
 }
 
 func (r *raft) loadState(state pb.HardState) {
@@ -1222,18 +1371,18 @@ func (r *raft) resetRandomizedElectionTimeout() {
 func (r *raft) checkQuorumActive() bool {
 	var act int
 
-	for id := range r.prs {
+	r.forEachProgress(func(id uint64, pr *Progress) {
 		if id == r.id { // self is always active
 			act++
-			continue
+			return
 		}
 
-		if r.prs[id].RecentActive {
+		if pr.RecentActive && !pr.IsLearner {
 			act++
 		}
 
-		r.prs[id].RecentActive = false
-	}
+		pr.RecentActive = false
+	})
 
 	return act >= r.quorum()
 }

+ 193 - 89
vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go

@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: raft.proto
-// DO NOT EDIT!
 
 /*
 	Package raftpb is a generated protocol buffer package.
@@ -26,6 +25,8 @@ import (
 
 	math "math"
 
+	_ "github.com/gogo/protobuf/gogoproto"
+
 	io "io"
 )
 
@@ -162,20 +163,23 @@ func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft,
 type ConfChangeType int32
 
 const (
-	ConfChangeAddNode    ConfChangeType = 0
-	ConfChangeRemoveNode ConfChangeType = 1
-	ConfChangeUpdateNode ConfChangeType = 2
+	ConfChangeAddNode        ConfChangeType = 0
+	ConfChangeRemoveNode     ConfChangeType = 1
+	ConfChangeUpdateNode     ConfChangeType = 2
+	ConfChangeAddLearnerNode ConfChangeType = 3
 )
 
 var ConfChangeType_name = map[int32]string{
 	0: "ConfChangeAddNode",
 	1: "ConfChangeRemoveNode",
 	2: "ConfChangeUpdateNode",
+	3: "ConfChangeAddLearnerNode",
 }
 var ConfChangeType_value = map[string]int32{
-	"ConfChangeAddNode":    0,
-	"ConfChangeRemoveNode": 1,
-	"ConfChangeUpdateNode": 2,
+	"ConfChangeAddNode":        0,
+	"ConfChangeRemoveNode":     1,
+	"ConfChangeUpdateNode":     2,
+	"ConfChangeAddLearnerNode": 3,
 }
 
 func (x ConfChangeType) Enum() *ConfChangeType {
@@ -267,6 +271,7 @@ func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []in
 
 type ConfState struct {
 	Nodes            []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+	Learners         []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
 	XXX_unrecognized []byte   `json:"-"`
 }
 
@@ -537,6 +542,13 @@ func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
 			i = encodeVarintRaft(dAtA, i, uint64(num))
 		}
 	}
+	if len(m.Learners) > 0 {
+		for _, num := range m.Learners {
+			dAtA[i] = 0x10
+			i++
+			i = encodeVarintRaft(dAtA, i, uint64(num))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
@@ -579,24 +591,6 @@ func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -700,6 +694,11 @@ func (m *ConfState) Size() (n int) {
 			n += 1 + sovRaft(uint64(e))
 		}
 	}
+	if len(m.Learners) > 0 {
+		for _, e := range m.Learners {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 	}
@@ -1558,25 +1557,129 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
 		}
 		switch fieldNum {
 		case 1:
-			if wireType != 0 {
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Nodes = append(m.Nodes, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Nodes = append(m.Nodes, v)
+				}
+			} else {
 				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
 			}
-			var v uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
+		case 2:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
 				}
-				if iNdEx >= l {
+				m.Learners = append(m.Learners, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex > l {
 					return io.ErrUnexpectedEOF
 				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Learners = append(m.Learners, v)
 				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
 			}
-			m.Nodes = append(m.Nodes, v)
 		default:
 			iNdEx = preIndex
 			skippy, err := skipRaft(dAtA[iNdEx:])
@@ -1846,55 +1949,56 @@ var (
 func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
 
 var fileDescriptorRaft = []byte{
-	// 790 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46,
-	0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e,
-	0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc,
-	0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79,
-	0x80, 0x3c, 0x40, 0x2e, 0x79, 0x1f, 0x1f, 0x0d, 0xe4, 0x1e, 0xc4, 0xce, 0x8b, 0x04, 0xbb, 0x5c,
-	0x4a, 0x94, 0x74, 0xdb, 0xf9, 0xbe, 0xe1, 0xcc, 0x37, 0xdf, 0xce, 0x12, 0x40, 0xd0, 0xa9, 0x3c,
-	0x8e, 0x04, 0x97, 0x1c, 0x17, 0xd5, 0x39, 0xba, 0xde, 0x6f, 0xf8, 0xdc, 0xe7, 0x1a, 0xfa, 0x4d,
-	0x9d, 0x12, 0xb6, 0xfd, 0x0e, 0x0a, 0x7f, 0x87, 0x52, 0xdc, 0xe3, 0x5f, 0xc1, 0x19, 0xdf, 0x47,
-	0x8c, 0x58, 0x2d, 0xab, 0x53, 0xeb, 0xd6, 0x8f, 0x93, 0xaf, 0x8e, 0x35, 0xa9, 0x88, 0x53, 0xe7,
-	0xe1, 0xcb, 0x4f, 0xb9, 0x91, 0x4e, 0xc2, 0x04, 0x9c, 0x31, 0x13, 0x01, 0xb1, 0x5b, 0x56, 0xc7,
-	0x59, 0x32, 0x4c, 0x04, 0x78, 0x1f, 0x0a, 0x83, 0xd0, 0x63, 0x77, 0x24, 0x9f, 0xa1, 0x12, 0x08,
-	0x63, 0x70, 0xfa, 0x54, 0x52, 0xe2, 0xb4, 0xac, 0x4e, 0x75, 0xa4, 0xcf, 0xed, 0xf7, 0x16, 0xa0,
-	0xcb, 0x90, 0x46, 0xf1, 0x8c, 0xcb, 0x21, 0x93, 0xd4, 0xa3, 0x92, 0xe2, 0x3f, 0x01, 0x26, 0x3c,
-	0x9c, 0xbe, 0x8a, 0x25, 0x95, 0x89, 0x22, 0x77, 0xa5, 0xa8, 0xc7, 0xc3, 0xe9, 0xa5, 0x22, 0x4c,
-	0xf1, 0xca, 0x24, 0x05, 0x54, 0xf3, 0xb9, 0x6e, 0x9e, 0xd5, 0x95, 0x40, 0x4a, 0xb2, 0x54, 0x92,
-	0xb3, 0xba, 0x34, 0xd2, 0xfe, 0x1f, 0xca, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x3d, 0xab, 0x23,
-	0x7d, 0xc6, 0x7f, 0x41, 0x39, 0x30, 0xca, 0x74, 0x61, 0xb7, 0x4b, 0x52, 0x2d, 0x9b, 0xca, 0x4d,
-	0xdd, 0x65, 0x7e, 0xfb, 0x53, 0x1e, 0x4a, 0x43, 0x16, 0xc7, 0xd4, 0x67, 0xf8, 0x08, 0x1c, 0xb9,
-	0x72, 0x78, 0x2f, 0xad, 0x61, 0xe8, 0xac, 0xc7, 0x2a, 0x0d, 0x37, 0xc0, 0x96, 0x7c, 0x6d, 0x12,
-	0x5b, 0x72, 0x35, 0xc6, 0x54, 0xf0, 0x8d, 0x31, 0x14, 0xb2, 0x1c, 0xd0, 0xd9, 0x1c, 0x10, 0x37,
-	0xa1, 0x74, 0xc3, 0x7d, 0x7d, 0x61, 0x85, 0x0c, 0x99, 0x82, 0x2b, 0xdb, 0x8a, 0xdb, 0xb6, 0x1d,
-	0x41, 0x89, 0x85, 0x52, 0xcc, 0x59, 0x4c, 0x4a, 0xad, 0x7c, 0xc7, 0xed, 0xee, 0xac, 0x6d, 0x46,
-	0x5a, 0xca, 0xe4, 0xe0, 0x03, 0x28, 0x4e, 0x78, 0x10, 0xcc, 0x25, 0x29, 0x67, 0x6a, 0x19, 0x0c,
-	0x77, 0xa1, 0x1c, 0x1b, 0xc7, 0x48, 0x45, 0x3b, 0x89, 0x36, 0x9d, 0x4c, 0x1d, 0x4c, 0xf3, 0x54,
-	0x45, 0xc1, 0x5e, 0xb3, 0x89, 0x24, 0xd0, 0xb2, 0x3a, 0xe5, 0xb4, 0x62, 0x82, 0xe1, 0x5f, 0x00,
-	0x92, 0xd3, 0xd9, 0x3c, 0x94, 0xc4, 0xcd, 0xf4, 0xcc, 0xe0, 0x98, 0x40, 0x69, 0xc2, 0x43, 0xc9,
-	0xee, 0x24, 0xa9, 0xea, 0x8b, 0x4d, 0xc3, 0xf6, 0x4b, 0xa8, 0x9c, 0x51, 0xe1, 0x25, 0xeb, 0x93,
-	0x3a, 0x68, 0x6d, 0x39, 0x48, 0xc0, 0xb9, 0xe5, 0x92, 0xad, 0xef, 0xbb, 0x42, 0x32, 0x03, 0xe7,
-	0xb7, 0x07, 0x6e, 0xff, 0x0c, 0x95, 0xe5, 0xba, 0xe2, 0x06, 0x14, 0x42, 0xee, 0xb1, 0x98, 0x58,
-	0xad, 0x7c, 0xc7, 0x19, 0x25, 0x41, 0xfb, 0x83, 0x05, 0xa0, 0x72, 0x7a, 0x33, 0x1a, 0xfa, 0xfa,
-	0xd6, 0x07, 0xfd, 0x35, 0x05, 0xf6, 0xa0, 0x8f, 0x7f, 0x37, 0x8f, 0xd3, 0xd6, 0xab, 0xf3, 0x63,
-	0xf6, 0x29, 0x24, 0xdf, 0x6d, 0xbd, 0xd0, 0x03, 0x28, 0x9e, 0x73, 0x8f, 0x0d, 0xfa, 0xeb, 0xba,
-	0x12, 0x4c, 0x19, 0xd2, 0x33, 0x86, 0x24, 0x8f, 0x31, 0x0d, 0x0f, 0xff, 0x80, 0xca, 0xf2, 0xc9,
-	0xe3, 0x5d, 0x70, 0x75, 0x70, 0xce, 0x45, 0x40, 0x6f, 0x50, 0x0e, 0xef, 0xc1, 0xae, 0x06, 0x56,
-	0x8d, 0x91, 0x75, 0xf8, 0xd9, 0x06, 0x37, 0xb3, 0xc4, 0x18, 0xa0, 0x38, 0x8c, 0xfd, 0xb3, 0x45,
-	0x84, 0x72, 0xd8, 0x85, 0xd2, 0x30, 0xf6, 0x4f, 0x19, 0x95, 0xc8, 0x32, 0xc1, 0x85, 0xe0, 0x11,
-	0xb2, 0x4d, 0xd6, 0x49, 0x14, 0xa1, 0x3c, 0xae, 0x01, 0x24, 0xe7, 0x11, 0x8b, 0x23, 0xe4, 0x98,
-	0xc4, 0xff, 0xb8, 0x64, 0xa8, 0xa0, 0x44, 0x98, 0x40, 0xb3, 0x45, 0xc3, 0xaa, 0x85, 0x41, 0x25,
-	0x8c, 0xa0, 0xaa, 0x9a, 0x31, 0x2a, 0xe4, 0xb5, 0xea, 0x52, 0xc6, 0x0d, 0x40, 0x59, 0x44, 0x7f,
-	0x54, 0xc1, 0x18, 0x6a, 0xc3, 0xd8, 0xbf, 0x0a, 0x05, 0xa3, 0x93, 0x19, 0xbd, 0xbe, 0x61, 0x08,
-	0x70, 0x1d, 0x76, 0x4c, 0x21, 0x75, 0x41, 0x8b, 0x18, 0xb9, 0x26, 0xad, 0x37, 0x63, 0x93, 0x37,
-	0xff, 0x2e, 0xb8, 0x58, 0x04, 0xa8, 0x8a, 0x7f, 0x80, 0xfa, 0x30, 0xf6, 0xc7, 0x82, 0x86, 0xf1,
-	0x94, 0x89, 0x7f, 0x18, 0xf5, 0x98, 0x40, 0x3b, 0xe6, 0xeb, 0xf1, 0x3c, 0x60, 0x7c, 0x21, 0xcf,
-	0xf9, 0x5b, 0x54, 0x33, 0x62, 0x46, 0x8c, 0x7a, 0xfa, 0x87, 0x87, 0x76, 0x8d, 0x98, 0x25, 0xa2,
-	0xc5, 0x20, 0x33, 0xef, 0x85, 0x60, 0x7a, 0xc4, 0xba, 0xe9, 0x6a, 0x62, 0x9d, 0x83, 0x0f, 0x5f,
-	0x40, 0x6d, 0xfd, 0x7a, 0x95, 0x8e, 0x15, 0x72, 0xe2, 0x79, 0xea, 0x2e, 0x51, 0x0e, 0x13, 0x68,
-	0xac, 0xe0, 0x11, 0x0b, 0xf8, 0x2d, 0xd3, 0x8c, 0xb5, 0xce, 0x5c, 0x45, 0x1e, 0x95, 0x09, 0x63,
-	0x9f, 0x92, 0x87, 0xa7, 0x66, 0xee, 0xf1, 0xa9, 0x99, 0x7b, 0x78, 0x6e, 0x5a, 0x8f, 0xcf, 0x4d,
-	0xeb, 0xeb, 0x73, 0xd3, 0xfa, 0xf8, 0xad, 0x99, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x30,
-	0x01, 0x41, 0x3a, 0x06, 0x00, 0x00,
+	// 815 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
+	0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
+	0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
+	0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
+	0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
+	0x63, 0xcf, 0x24, 0xb7, 0xae, 0xef, 0xab, 0xae, 0xfa, 0xea, 0xeb, 0x9a, 0x01, 0x50, 0x62, 0xa9,
+	0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
+	0x9b, 0x53, 0xc1, 0x4e, 0x7e, 0x83, 0xd6, 0xb7, 0xa9, 0x56, 0x77, 0xf8, 0x19, 0x04, 0x57, 0x77,
+	0x19, 0x71, 0x6f, 0xec, 0x4d, 0x07, 0xc7, 0x7b, 0x47, 0xc5, 0xad, 0x23, 0x4b, 0x1a, 0xe2, 0x24,
+	0xb8, 0xff, 0xe7, 0x93, 0xc6, 0xdc, 0x26, 0x21, 0x87, 0xe0, 0x8a, 0x54, 0xc2, 0xfd, 0xb1, 0x37,
+	0x0d, 0x36, 0x0c, 0xa9, 0x04, 0xf7, 0xa1, 0x75, 0x91, 0x46, 0xf4, 0x8e, 0x37, 0x2b, 0x54, 0x01,
+	0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, 0x03,
+	0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, 0x42,
+	0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xa1, 0x28, 0xdc, 0x2a, 0x3a, 0x95, 0xe9, 0xf2, 0xb5, 0x21,
+	0x5c, 0xf1, 0xde, 0xa2, 0x04, 0x4c, 0xf3, 0x95, 0x6d, 0x5e, 0xd5, 0x55, 0x40, 0x46, 0xb2, 0x36,
+	0x92, 0xab, 0xba, 0x2c, 0x32, 0xf9, 0x01, 0xba, 0xa5, 0x02, 0x23, 0xd1, 0x28, 0xb0, 0x3d, 0xfb,
+	0x73, 0x7b, 0xc6, 0xaf, 0xa1, 0x9b, 0x38, 0x65, 0xb6, 0x70, 0x78, 0xcc, 0x4b, 0x2d, 0x8f, 0x95,
+	0xbb, 0xba, 0x9b, 0xfc, 0xc9, 0x5f, 0x4d, 0xe8, 0xcc, 0x28, 0xcf, 0x45, 0x4c, 0xf8, 0x1c, 0x02,
+	0xbd, 0x75, 0xf8, 0x59, 0x59, 0xc3, 0xd1, 0x55, 0x8f, 0x4d, 0x1a, 0x0e, 0xc1, 0xd7, 0xb2, 0x36,
+	0x89, 0xaf, 0xa5, 0x19, 0x63, 0xa9, 0xe4, 0xa3, 0x31, 0x0c, 0xb2, 0x19, 0x30, 0x78, 0x3c, 0x20,
+	0x8e, 0xa0, 0x73, 0x2b, 0x63, 0xfb, 0x60, 0xad, 0x0a, 0x59, 0x82, 0x5b, 0xdb, 0xda, 0x4f, 0x6d,
+	0x7b, 0x0e, 0x1d, 0x4a, 0xb5, 0x5a, 0x51, 0xce, 0x3b, 0xe3, 0xe6, 0x34, 0x3c, 0xde, 0xa9, 0x6d,
+	0x46, 0x59, 0xca, 0xe5, 0xe0, 0x01, 0xb4, 0x17, 0x32, 0x49, 0x56, 0x9a, 0x77, 0x2b, 0xb5, 0x1c,
+	0x86, 0xc7, 0xd0, 0xcd, 0x9d, 0x63, 0xbc, 0x67, 0x9d, 0x64, 0x8f, 0x9d, 0x2c, 0x1d, 0x2c, 0xf3,
+	0x4c, 0x45, 0x45, 0x3f, 0xd3, 0x42, 0x73, 0x18, 0x7b, 0xd3, 0x6e, 0x59, 0xb1, 0xc0, 0xf0, 0x53,
+	0x80, 0xe2, 0x74, 0xbe, 0x4a, 0x35, 0x0f, 0x2b, 0x3d, 0x2b, 0x38, 0x72, 0xe8, 0x2c, 0x64, 0xaa,
+	0xe9, 0x9d, 0xe6, 0x7d, 0xfb, 0xb0, 0x65, 0x38, 0xf9, 0x11, 0x7a, 0xe7, 0x42, 0x45, 0xc5, 0xfa,
+	0x94, 0x0e, 0x7a, 0x4f, 0x1c, 0xe4, 0x10, 0xbc, 0x95, 0x9a, 0xea, 0xfb, 0x6e, 0x90, 0xca, 0xc0,
+	0xcd, 0xa7, 0x03, 0x4f, 0xbe, 0x81, 0xde, 0x66, 0x5d, 0x71, 0x08, 0xad, 0x54, 0x46, 0x94, 0x73,
+	0x6f, 0xdc, 0x9c, 0x06, 0xf3, 0x22, 0xc0, 0x7d, 0xe8, 0xde, 0x92, 0x50, 0x29, 0xa9, 0x9c, 0xfb,
+	0x96, 0xd8, 0xc4, 0x93, 0x3f, 0x3c, 0x00, 0x73, 0xff, 0xf4, 0x46, 0xa4, 0xb1, 0xdd, 0x88, 0x8b,
+	0xb3, 0x9a, 0x3a, 0xff, 0xe2, 0x0c, 0xbf, 0x70, 0x1f, 0xae, 0x6f, 0xd7, 0xea, 0xe3, 0xea, 0x67,
+	0x52, 0xdc, 0x7b, 0xf2, 0xf5, 0x1e, 0x40, 0xfb, 0x52, 0x46, 0x74, 0x71, 0x56, 0xd7, 0x5c, 0x60,
+	0xc6, 0xac, 0x53, 0x67, 0x56, 0xf1, 0xa1, 0x96, 0xe1, 0xe1, 0x97, 0xd0, 0xdb, 0xfc, 0x0e, 0x70,
+	0x17, 0x42, 0x1b, 0x5c, 0x4a, 0x95, 0x88, 0x5b, 0xd6, 0xc0, 0x67, 0xb0, 0x6b, 0x81, 0x6d, 0x63,
+	0xe6, 0x1d, 0xfe, 0xed, 0x43, 0x58, 0x59, 0x70, 0x04, 0x68, 0xcf, 0xf2, 0xf8, 0x7c, 0x9d, 0xb1,
+	0x06, 0x86, 0xd0, 0x99, 0xe5, 0xf1, 0x09, 0x09, 0xcd, 0x3c, 0x17, 0xbc, 0x52, 0x32, 0x63, 0xbe,
+	0xcb, 0x7a, 0x91, 0x65, 0xac, 0x89, 0x03, 0x80, 0xe2, 0x3c, 0xa7, 0x3c, 0x63, 0x81, 0x4b, 0xfc,
+	0x5e, 0x6a, 0x62, 0x2d, 0x23, 0xc2, 0x05, 0x96, 0x6d, 0x3b, 0xd6, 0x2c, 0x13, 0xeb, 0x20, 0x83,
+	0xbe, 0x69, 0x46, 0x42, 0xe9, 0x6b, 0xd3, 0xa5, 0x8b, 0x43, 0x60, 0x55, 0xc4, 0x5e, 0xea, 0x21,
+	0xc2, 0x60, 0x96, 0xc7, 0x6f, 0x52, 0x45, 0x62, 0x71, 0x23, 0xae, 0x6f, 0x89, 0x01, 0xee, 0xc1,
+	0x8e, 0x2b, 0x64, 0x1e, 0x6f, 0x9d, 0xb3, 0xd0, 0xa5, 0x9d, 0xde, 0xd0, 0xe2, 0x97, 0xef, 0xd6,
+	0x52, 0xad, 0x13, 0xd6, 0xc7, 0x8f, 0x60, 0x6f, 0x96, 0xc7, 0x57, 0x4a, 0xa4, 0xf9, 0x92, 0xd4,
+	0x4b, 0x12, 0x11, 0x29, 0xb6, 0xe3, 0x6e, 0x5f, 0xad, 0x12, 0x92, 0x6b, 0x7d, 0x29, 0x7f, 0x65,
+	0x03, 0x27, 0x66, 0x4e, 0x22, 0xb2, 0x3f, 0x43, 0xb6, 0xeb, 0xc4, 0x6c, 0x10, 0x2b, 0x86, 0xb9,
+	0x79, 0x5f, 0x29, 0xb2, 0x23, 0xee, 0xb9, 0xae, 0x2e, 0xb6, 0x39, 0x78, 0x78, 0x07, 0x83, 0xfa,
+	0xf3, 0x1a, 0x1d, 0x5b, 0xe4, 0x45, 0x14, 0x99, 0xb7, 0x64, 0x0d, 0xe4, 0x30, 0xdc, 0xc2, 0x73,
+	0x4a, 0xe4, 0x5b, 0xb2, 0x8c, 0x57, 0x67, 0xde, 0x64, 0x91, 0xd0, 0x05, 0xe3, 0xe3, 0x01, 0xf0,
+	0x5a, 0xa9, 0x97, 0xc5, 0x36, 0x5a, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, 0x8c,
+	0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, 0xa8,
+	0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x52, 0x5b, 0xe0, 0x74, 0x06, 0x00, 0x00,
 }

+ 6 - 4
vendor/github.com/coreos/etcd/raft/raftpb/raft.proto

@@ -76,13 +76,15 @@ message HardState {
 }
 
 message ConfState {
-	repeated uint64 nodes = 1;
+	repeated uint64 nodes    = 1;
+	repeated uint64 learners = 2;
 }
 
 enum ConfChangeType {
-	ConfChangeAddNode    = 0;
-	ConfChangeRemoveNode = 1;
-	ConfChangeUpdateNode = 2;
+	ConfChangeAddNode        = 0;
+	ConfChangeRemoveNode     = 1;
+	ConfChangeUpdateNode     = 2;
+	ConfChangeAddLearnerNode = 3;
 }
 
 message ConfChange {

+ 3 - 1
vendor/github.com/coreos/etcd/raft/rawnode.go

@@ -175,6 +175,8 @@ func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
 	switch cc.Type {
 	case pb.ConfChangeAddNode:
 		rn.raft.addNode(cc.NodeID)
+	case pb.ConfChangeAddLearnerNode:
+		rn.raft.addLearner(cc.NodeID)
 	case pb.ConfChangeRemoveNode:
 		rn.raft.removeNode(cc.NodeID)
 	case pb.ConfChangeUpdateNode:
@@ -191,7 +193,7 @@ func (rn *RawNode) Step(m pb.Message) error {
 	if IsLocalMsg(m.Type) {
 		return ErrStepLocalMsg
 	}
-	if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) {
+	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
 		return rn.raft.Step(m)
 	}
 	return ErrStepPeerNotFound

+ 1 - 1
vendor/github.com/coreos/etcd/raft/read_only.go

@@ -18,7 +18,7 @@ import pb "github.com/coreos/etcd/raft/raftpb"
 
 // ReadState provides state for read only query.
 // It's caller's responsibility to call ReadIndex first before getting
-// this state from ready, It's also caller's duty to differentiate if this
+// this state from ready, it's also caller's duty to differentiate if this
 // state is what it requests through RequestCtx, eg. given a unique id as
 // RequestCtx
 type ReadState struct {

+ 17 - 5
vendor/github.com/coreos/etcd/raft/status.go

@@ -28,11 +28,17 @@ type Status struct {
 
 	Applied  uint64
 	Progress map[uint64]Progress
+
+	LeadTransferee uint64
 }
 
 // getStatus gets a copy of the current raft status.
 func getStatus(r *raft) Status {
-	s := Status{ID: r.id}
+	s := Status{
+		ID:             r.id,
+		LeadTransferee: r.leadTransferee,
+	}
+
 	s.HardState = r.hardState()
 	s.SoftState = *r.softState()
 
@@ -43,6 +49,10 @@ func getStatus(r *raft) Status {
 		for id, p := range r.prs {
 			s.Progress[id] = *p
 		}
+
+		for id, p := range r.learnerPrs {
+			s.Progress[id] = *p
+		}
 	}
 
 	return s
@@ -51,19 +61,21 @@ func getStatus(r *raft) Status {
 // MarshalJSON translates the raft status into JSON.
 // TODO: try to simplify this by introducing ID type into raft
 func (s Status) MarshalJSON() ([]byte, error) {
-	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`,
-		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState)
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
 
 	if len(s.Progress) == 0 {
-		j += "}}"
+		j += "},"
 	} else {
 		for k, v := range s.Progress {
 			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
 			j += subj
 		}
 		// remove the trailing ","
-		j = j[:len(j)-1] + "}}"
+		j = j[:len(j)-1] + "},"
 	}
+
+	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
 	return []byte(j), nil
 }
 

+ 3 - 20
vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go

@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: snap.proto
-// DO NOT EDIT!
 
 /*
 	Package snappb is a generated protocol buffer package.
@@ -20,6 +19,8 @@ import (
 
 	math "math"
 
+	_ "github.com/gogo/protobuf/gogoproto"
+
 	io "io"
 )
 
@@ -78,24 +79,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func encodeFixed64Snap(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Snap(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)

+ 1 - 1
vendor/github.com/coreos/etcd/version/version.go

@@ -26,7 +26,7 @@ import (
 var (
 	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
 	MinClusterVersion = "3.0.0"
-	Version           = "3.2.1"
+	Version           = "3.3.9"
 	APIVersion        = "unknown"
 
 	// Git SHA Value will be set during build

+ 6 - 3
vendor/github.com/coreos/etcd/wal/decoder.go

@@ -29,6 +29,9 @@ import (
 
 const minSectorSize = 512
 
+// frameSizeBytes is frame size in bytes, including record size and padding size.
+const frameSizeBytes = 8
+
 type decoder struct {
 	mu  sync.Mutex
 	brs []*bufio.Reader
@@ -104,7 +107,7 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error {
 		}
 	}
 	// record decoded as valid; point last valid offset to end of record
-	d.lastValidOff += recBytes + padBytes + 8
+	d.lastValidOff += frameSizeBytes + recBytes + padBytes
 	return nil
 }
 
@@ -116,7 +119,7 @@ func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
 		// padding is stored in lower 3 bits of length MSB
 		padBytes = int64((uint64(lenField) >> 56) & 0x7)
 	}
-	return
+	return recBytes, padBytes
 }
 
 // isTornEntry determines whether the last entry of the WAL was partially written
@@ -126,7 +129,7 @@ func (d *decoder) isTornEntry(data []byte) bool {
 		return false
 	}
 
-	fileOff := d.lastValidOff + 8
+	fileOff := d.lastValidOff + frameSizeBytes
 	curOff := 0
 	chunks := [][]byte{}
 	// split data on sector boundaries

+ 1 - 1
vendor/github.com/coreos/etcd/wal/encoder.go

@@ -103,7 +103,7 @@ func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
 	if padBytes != 0 {
 		lenField |= uint64(0x80|padBytes) << 56
 	}
-	return
+	return lenField, padBytes
 }
 
 func (e *encoder) flush() error {

+ 1 - 1
vendor/github.com/coreos/etcd/wal/file_pipeline.go

@@ -55,7 +55,7 @@ func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
 	case f = <-fp.filec:
 	case err = <-fp.errc:
 	}
-	return
+	return f, err
 }
 
 func (fp *filePipeline) Close() error {

+ 48 - 1
vendor/github.com/coreos/etcd/wal/wal.go

@@ -157,6 +157,48 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
 	return w, nil
 }
 
+func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
+	if err := os.RemoveAll(w.dir); err != nil {
+		return nil, err
+	}
+	// On non-Windows platforms, hold the lock while renaming. Releasing
+	// the lock and trying to reacquire it quickly can be flaky because
+	// it's possible the process will fork to spawn a process while this is
+	// happening. The fds are set up as close-on-exec by the Go runtime,
+	// but there is a window between the fork and the exec where another
+	// process holds the lock.
+	if err := os.Rename(tmpdirpath, w.dir); err != nil {
+		if _, ok := err.(*os.LinkError); ok {
+			return w.renameWalUnlock(tmpdirpath)
+		}
+		return nil, err
+	}
+	w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
+	df, err := fileutil.OpenDir(w.dir)
+	w.dirFile = df
+	return w, err
+}
+
+func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) {
+	// rename of directory with locked files doesn't work on windows/cifs;
+	// close the WAL to release the locks so the directory can be renamed.
+	plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir)
+	w.Close()
+	if err := os.Rename(tmpdirpath, w.dir); err != nil {
+		return nil, err
+	}
+	// reopen and relock
+	newWAL, oerr := Open(w.dir, walpb.Snapshot{})
+	if oerr != nil {
+		return nil, oerr
+	}
+	if _, _, _, err := newWAL.ReadAll(); err != nil {
+		newWAL.Close()
+		return nil, err
+	}
+	return newWAL, nil
+}
+
 // Open opens the WAL at the given snap.
 // The snap SHOULD have been previously saved to the WAL, or the following
 // ReadAll will fail.
@@ -413,6 +455,7 @@ func (w *WAL) cut() error {
 		return err
 	}
 
+	// reopen newTail with its new path so calls to Name() match the wal filename format
 	newTail.Close()
 
 	if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
@@ -460,6 +503,10 @@ func (w *WAL) ReleaseLockTo(index uint64) error {
 	w.mu.Lock()
 	defer w.mu.Unlock()
 
+	if len(w.locks) == 0 {
+		return nil
+	}
+
 	var smaller int
 	found := false
 
@@ -477,7 +524,7 @@ func (w *WAL) ReleaseLockTo(index uint64) error {
 
 	// if no lock index is greater than the release index, we can
 	// release lock up to the last one(excluding).
-	if !found && len(w.locks) != 0 {
+	if !found {
 		smaller = len(w.locks) - 1
 	}
 

+ 0 - 44
vendor/github.com/coreos/etcd/wal/wal_unix.go

@@ -1,44 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package wal
-
-import (
-	"os"
-
-	"github.com/coreos/etcd/pkg/fileutil"
-)
-
-func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
-	// On non-Windows platforms, hold the lock while renaming. Releasing
-	// the lock and trying to reacquire it quickly can be flaky because
-	// it's possible the process will fork to spawn a process while this is
-	// happening. The fds are set up as close-on-exec by the Go runtime,
-	// but there is a window between the fork and the exec where another
-	// process holds the lock.
-
-	if err := os.RemoveAll(w.dir); err != nil {
-		return nil, err
-	}
-	if err := os.Rename(tmpdirpath, w.dir); err != nil {
-		return nil, err
-	}
-
-	w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
-	df, err := fileutil.OpenDir(w.dir)
-	w.dirFile = df
-	return w, err
-}

+ 0 - 41
vendor/github.com/coreos/etcd/wal/wal_windows.go

@@ -1,41 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
-	"os"
-
-	"github.com/coreos/etcd/wal/walpb"
-)
-
-func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
-	// rename of directory with locked files doesn't work on
-	// windows; close the WAL to release the locks so the directory
-	// can be renamed
-	w.Close()
-	if err := os.Rename(tmpdirpath, w.dir); err != nil {
-		return nil, err
-	}
-	// reopen and relock
-	newWAL, oerr := Open(w.dir, walpb.Snapshot{})
-	if oerr != nil {
-		return nil, oerr
-	}
-	if _, _, _, err := newWAL.ReadAll(); err != nil {
-		newWAL.Close()
-		return nil, err
-	}
-	return newWAL, nil
-}

+ 3 - 20
vendor/github.com/coreos/etcd/wal/walpb/record.pb.go

@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: record.proto
-// DO NOT EDIT!
 
 /*
 	Package walpb is a generated protocol buffer package.
@@ -21,6 +20,8 @@ import (
 
 	math "math"
 
+	_ "github.com/gogo/protobuf/gogoproto"
+
 	io "io"
 )
 
@@ -122,24 +123,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
-func encodeFixed64Record(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Record(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)

+ 0 - 5
vendor/github.com/prometheus/client_golang/NOTICE

@@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
 
 The following components are included in this product:
 
-goautoneg
-http://bitbucket.org/ww/goautoneg
-Copyright 2011, Open Knowledge Foundation Ltd.
-See README.txt for license details.
-
 perks - a fork of https://github.com/bmizerany/perks
 https://github.com/beorn7/perks
 Copyright 2013-2015 Blake Mizerany, Björn Rabenstein

+ 1 - 53
vendor/github.com/prometheus/client_golang/prometheus/README.md

@@ -1,53 +1 @@
-# Overview
-This is the [Prometheus](http://www.prometheus.io) telemetric
-instrumentation client [Go](http://golang.org) client library.  It
-enable authors to define process-space metrics for their servers and
-expose them through a web service interface for extraction,
-aggregation, and a whole slew of other post processing techniques.
-
-# Installing
-    $ go get github.com/prometheus/client_golang/prometheus
-
-# Example
-```go
-package main
-
-import (
-	"net/http"
-
-	"github.com/prometheus/client_golang/prometheus"
-)
-
-var (
-	indexed = prometheus.NewCounter(prometheus.CounterOpts{
-		Namespace: "my_company",
-		Subsystem: "indexer",
-		Name:      "documents_indexed",
-		Help:      "The number of documents indexed.",
-	})
-	size = prometheus.NewGauge(prometheus.GaugeOpts{
-		Namespace: "my_company",
-		Subsystem: "storage",
-		Name:      "documents_total_size_bytes",
-		Help:      "The total size of all documents in the storage.",
-	})
-)
-
-func main() {
-	http.Handle("/metrics", prometheus.Handler())
-
-	indexed.Inc()
-	size.Set(5)
-
-	http.ListenAndServe(":8080", nil)
-}
-
-func init() {
-	prometheus.MustRegister(indexed)
-	prometheus.MustRegister(size)
-}
-```
-
-# Documentation
-
-[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).

+ 26 - 26
vendor/github.com/prometheus/client_golang/prometheus/collector.go

@@ -15,15 +15,15 @@ package prometheus
 
 // Collector is the interface implemented by anything that can be used by
 // Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+// collection. See Registerer.Register.
 //
-// The stock metrics provided by this package (like Gauge, Counter, Summary) are
-// also Collectors (which only ever collect one metric, namely itself). An
-// implementer of Collector may, however, collect multiple metrics in a
-// coordinated fashion and/or create metrics on the fly. Examples for collectors
-// already implemented in this library are the metric vectors (i.e. collection
-// of multiple instances of the same Metric but with different label values)
-// like GaugeVec or SummaryVec, and the ExpvarCollector.
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
 type Collector interface {
 	// Describe sends the super-set of all possible descriptors of metrics
 	// collected by this Collector to the provided channel and returns once
@@ -37,39 +37,39 @@ type Collector interface {
 	// executing this method, it must send an invalid descriptor (created
 	// with NewInvalidDesc) to signal the error to the registry.
 	Describe(chan<- *Desc)
-	// Collect is called by Prometheus when collecting metrics. The
-	// implementation sends each collected metric via the provided channel
-	// and returns once the last metric has been sent. The descriptor of
-	// each sent metric is one of those returned by Describe. Returned
-	// metrics that share the same descriptor must differ in their variable
-	// label values. This method may be called concurrently and must
-	// therefore be implemented in a concurrency safe way. Blocking occurs
-	// at the expense of total performance of rendering all registered
-	// metrics. Ideally, Collector implementations support concurrent
-	// readers.
+	// Collect is called by the Prometheus registry when collecting
+	// metrics. The implementation sends each collected metric via the
+	// provided channel and returns once the last metric has been sent. The
+	// descriptor of each sent metric is one of those returned by
+	// Describe. Returned metrics that share the same descriptor must differ
+	// in their variable label values. This method may be called
+	// concurrently and must therefore be implemented in a concurrency safe
+	// way. Blocking occurs at the expense of total performance of rendering
+	// all registered metrics. Ideally, Collector implementations support
+	// concurrent readers.
 	Collect(chan<- Metric)
 }
 
-// SelfCollector implements Collector for a single Metric so that that the
-// Metric collects itself. Add it as an anonymous field to a struct that
-// implements Metric, and call Init with the Metric itself as an argument.
-type SelfCollector struct {
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
 	self Metric
 }
 
-// Init provides the SelfCollector with a reference to the metric it is supposed
+// init provides the selfCollector with a reference to the metric it is supposed
 // to collect. It is usually called within the factory function to create a
 // metric. See example.
-func (c *SelfCollector) Init(self Metric) {
+func (c *selfCollector) init(self Metric) {
 	c.self = self
 }
 
 // Describe implements Collector.
-func (c *SelfCollector) Describe(ch chan<- *Desc) {
+func (c *selfCollector) Describe(ch chan<- *Desc) {
 	ch <- c.self.Desc()
 }
 
 // Collect implements Collector.
-func (c *SelfCollector) Collect(ch chan<- Metric) {
+func (c *selfCollector) Collect(ch chan<- Metric) {
 	ch <- c.self
 }

+ 14 - 15
vendor/github.com/prometheus/client_golang/prometheus/counter.go

@@ -35,6 +35,9 @@ type Counter interface {
 	// Prometheus metric. Do not use it for regular handling of a
 	// Prometheus counter (as it can be used to break the contract of
 	// monotonically increasing values).
+	//
+	// Deprecated: Use NewConstMetric to create a counter for an external
+	// value. A Counter should never be set.
 	Set(float64)
 	// Inc increments the counter by 1.
 	Inc()
@@ -55,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
 		opts.ConstLabels,
 	)
 	result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
-	result.Init(result) // Init self-collection.
+	result.init(result) // Init self-collection.
 	return result
 }
 
@@ -79,7 +82,7 @@ func (c *counter) Add(v float64) {
 // CounterVec embeds MetricVec. See there for a full list of methods with
 // detailed documentation.
 type CounterVec struct {
-	MetricVec
+	*MetricVec
 }
 
 // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@@ -93,19 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
 		opts.ConstLabels,
 	)
 	return &CounterVec{
-		MetricVec: MetricVec{
-			children: map[uint64]Metric{},
-			desc:     desc,
-			newMetric: func(lvs ...string) Metric {
-				result := &counter{value: value{
-					desc:       desc,
-					valType:    CounterValue,
-					labelPairs: makeLabelPairs(desc, lvs),
-				}}
-				result.Init(result) // Init self-collection.
-				return result
-			},
-		},
+		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			result := &counter{value: value{
+				desc:       desc,
+				valType:    CounterValue,
+				labelPairs: makeLabelPairs(desc, lvs),
+			}}
+			result.init(result) // Init self-collection.
+			return result
+		}),
 	}
 }
 

+ 13 - 0
vendor/github.com/prometheus/client_golang/prometheus/desc.go

@@ -1,3 +1,16 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package prometheus
 
 import (

+ 131 - 61
vendor/github.com/prometheus/client_golang/prometheus/doc.go

@@ -11,18 +11,17 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package prometheus provides embeddable metric primitives for servers and
-// standardized exposition of telemetry through a web services interface.
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
 //
 // All exported functions and methods are safe to be used concurrently unless
-// specified otherwise.
+//specified otherwise.
 //
-// To expose metrics registered with the Prometheus registry, an HTTP server
-// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+// A Basic Example
 //
-//     http.Handle("/metrics", prometheus.Handler())
-//
-// As a starting point a very basic usage example:
+// As a starting point, a very basic usage example:
 //
 //    package main
 //
@@ -30,6 +29,7 @@
 //    	"net/http"
 //
 //    	"github.com/prometheus/client_golang/prometheus"
+//    	"github.com/prometheus/client_golang/prometheus/promhttp"
 //    )
 //
 //    var (
@@ -37,75 +37,145 @@
 //    		Name: "cpu_temperature_celsius",
 //    		Help: "Current temperature of the CPU.",
 //    	})
-//    	hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
-//    		Name: "hd_errors_total",
-//    		Help: "Number of hard-disk errors.",
-//    	})
+//    	hdFailures = prometheus.NewCounterVec(
+//    		prometheus.CounterOpts{
+//    			Name: "hd_errors_total",
+//    			Help: "Number of hard-disk errors.",
+//    		},
+//    		[]string{"device"},
+//    	)
 //    )
 //
 //    func init() {
+//    	// Metrics have to be registered to be exposed:
 //    	prometheus.MustRegister(cpuTemp)
 //    	prometheus.MustRegister(hdFailures)
 //    }
 //
 //    func main() {
 //    	cpuTemp.Set(65.3)
-//    	hdFailures.Inc()
+//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
 //
-//    	http.Handle("/metrics", prometheus.Handler())
+//    	// The Handler function provides a default handler to expose metrics
+//    	// via an HTTP server. "/metrics" is the usual endpoint for that.
+//    	http.Handle("/metrics", promhttp.Handler())
 //    	http.ListenAndServe(":8080", nil)
 //    }
 //
 //
-// This is a complete program that exports two metrics, a Gauge and a Counter.
-// It also exports some stats about the HTTP usage of the /metrics
-// endpoint. (See the Handler function for more detail.)
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
 //
-// Two more advanced metric types are the Summary and Histogram. A more
-// thorough description of metric types can be found in the prometheus docs:
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
 // https://prometheus.io/docs/concepts/metric_types/
 //
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
 // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// Those are all the parts needed for basic usage. Detailed documentation and
-// examples are provided below.
-//
-// Everything else this package offers is essentially for "power users" only. A
-// few pointers to "power user features":
-//
-// All the various ...Opts structs have a ConstLabels field for labels that
-// never change their value (which is only useful under special circumstances,
-// see documentation of the Opts type).
-//
-// The Untyped metric behaves like a Gauge, but signals the Prometheus server
-// not to assume anything about its type.
-//
-// Functions to fine-tune how the metric registry works: EnableCollectChecks,
-// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
-//
-// For custom metric collection, there are two entry points: Custom Metric
-// implementations and custom Collector implementations. A Metric is the
-// fundamental unit in the Prometheus data model: a sample at a point in time
-// together with its meta-data (like its fully-qualified name and any number of
-// pairs of label name and label value) that knows how to marshal itself into a
-// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
-// gets registered with the Prometheus registry and manages the collection of
-// one or more Metrics. Many parts of this package are building blocks for
-// Metrics and Collectors. Desc is the metric descriptor, actually used by all
-// metrics under the hood, and by Collectors to describe the Metrics to be
-// collected, but only to be dealt with by users if they implement their own
-// Metrics or Collectors. To create a Desc, the BuildFQName function will come
-// in handy. Other useful components for Metric and Collector implementation
-// include: LabelPairSorter to sort the DTO version of label pairs,
-// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
-// collection time, MetricVec to bundle custom Metrics into a metric vector
-// Collector, SelfCollector to make a custom Metric collect itself.
-//
-// A good example for a custom Collector is the ExpVarCollector included in this
-// package, which exports variables exported via the "expvar" package as
-// Prometheus metrics.
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
+// HistogramOpts, or UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might
+// cause. As suggested by the name, MustRegister panics if an error occurs. With
+// the Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data
+// model. Inconsistencies are ideally detected at registration time, not at
+// collect time. The former will usually be detected at start-up time of a
+// program, while the latter will only happen at scrape time, possibly not even
+// on the first scrape if the inconsistency only becomes relevant later. That is
+// the main reason why a Collector and a Metric have to describe themselves to
+// the registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in
+// the same way on a custom registry as the global functions Register and
+// Unregister on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries
+// with special properties, see NewPedanticRegistry. You can avoid global state,
+// as it is imposed by the DefaultRegistry. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp
+// sub-package. (The top-level functions in the prometheus package are
+// deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added. Sending metrics to
+// Graphite would be an example that will soon be implemented.
 package prometheus

+ 16 - 16
vendor/github.com/prometheus/client_golang/prometheus/expvar.go → vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go

@@ -18,21 +18,21 @@ import (
 	"expvar"
 )
 
-// ExpvarCollector collects metrics from the expvar interface. It provides a
-// quick way to expose numeric values that are already exported via expvar as
-// Prometheus metrics. Note that the data models of expvar and Prometheus are
-// fundamentally different, and that the ExpvarCollector is inherently
-// slow. Thus, the ExpvarCollector is probably great for experiments and
-// prototying, but you should seriously consider a more direct implementation of
-// Prometheus metrics for monitoring production systems.
-//
-// Use NewExpvarCollector to create new instances.
-type ExpvarCollector struct {
+type expvarCollector struct {
 	exports map[string]*Desc
 }
 
-// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
-// to be registered with the Prometheus registry.
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
 //
 // The exports map has the following meaning:
 //
@@ -59,21 +59,21 @@ type ExpvarCollector struct {
 // sample values.
 //
 // Anything that does not fit into the scheme above is silently ignored.
-func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
-	return &ExpvarCollector{
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+	return &expvarCollector{
 		exports: exports,
 	}
 }
 
 // Describe implements Collector.
-func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
 	for _, desc := range e.exports {
 		ch <- desc
 	}
 }
 
 // Collect implements Collector.
-func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+func (e *expvarCollector) Collect(ch chan<- Metric) {
 	for name, desc := range e.exports {
 		var m Metric
 		expVar := expvar.Get(name)

+ 4 - 8
vendor/github.com/prometheus/client_golang/prometheus/gauge.go

@@ -58,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
 // (e.g. number of operations queued, partitioned by user and operation
 // type). Create instances with NewGaugeVec.
 type GaugeVec struct {
-	MetricVec
+	*MetricVec
 }
 
 // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@@ -72,13 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
 		opts.ConstLabels,
 	)
 	return &GaugeVec{
-		MetricVec: MetricVec{
-			children: map[uint64]Metric{},
-			desc:     desc,
-			newMetric: func(lvs ...string) Metric {
-				return newValue(desc, GaugeValue, 0, lvs...)
-			},
-		},
+		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newValue(desc, GaugeValue, 0, lvs...)
+		}),
 	}
 }
 

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go

@@ -17,7 +17,7 @@ type goCollector struct {
 
 // NewGoCollector returns a collector which exports metrics about the current
 // go process.
-func NewGoCollector() *goCollector {
+func NewGoCollector() Collector {
 	return &goCollector{
 		goroutines: NewGauge(GaugeOpts{
 			Namespace: "go",

+ 10 - 14
vendor/github.com/prometheus/client_golang/prometheus/histogram.go

@@ -51,11 +51,11 @@ type Histogram interface {
 // bucket of a histogram ("le" -> "less or equal").
 const bucketLabel = "le"
 
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
 var (
-	// DefBuckets are the default Histogram buckets. The default buckets are
-	// tailored to broadly measure the response time (in seconds) of a
-	// network service. Most likely, however, you will be required to define
-	// buckets customized to your use case.
 	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
 
 	errBucketLabelNotAllowed = fmt.Errorf(
@@ -210,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 	// Finally we know the final length of h.upperBounds and can make counts.
 	h.counts = make([]uint64, len(h.upperBounds))
 
-	h.Init(h) // Init self-collection.
+	h.init(h) // Init self-collection.
 	return h
 }
 
@@ -222,7 +222,7 @@ type histogram struct {
 	sumBits uint64
 	count   uint64
 
-	SelfCollector
+	selfCollector
 	// Note that there is no mutex required.
 
 	desc *Desc
@@ -287,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
 // (e.g. HTTP request latencies, partitioned by status code and method). Create
 // instances with NewHistogramVec.
 type HistogramVec struct {
-	MetricVec
+	*MetricVec
 }
 
 // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@@ -301,13 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
 		opts.ConstLabels,
 	)
 	return &HistogramVec{
-		MetricVec: MetricVec{
-			children: map[uint64]Metric{},
-			desc:     desc,
-			newMetric: func(lvs ...string) Metric {
-				return newHistogram(desc, opts, lvs...)
-			},
-		},
+		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newHistogram(desc, opts, lvs...)
+		}),
 	}
 }
 

+ 112 - 3
vendor/github.com/prometheus/client_golang/prometheus/http.go

@@ -15,14 +15,114 @@ package prometheus
 
 import (
 	"bufio"
+	"bytes"
+	"compress/gzip"
+	"fmt"
 	"io"
 	"net"
 	"net/http"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
+
+	"github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+	contentTypeHeader     = "Content-Type"
+	contentLengthHeader   = "Content-Length"
+	contentEncodingHeader = "Content-Encoding"
+	acceptEncodingHeader  = "Accept-Encoding"
 )
 
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+	buf := bufPool.Get()
+	if buf == nil {
+		return &bytes.Buffer{}
+	}
+	return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+	buf.Reset()
+	bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is non instrumented).
+func Handler() http.Handler {
+	return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		mfs, err := DefaultGatherer.Gather()
+		if err != nil {
+			http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+			return
+		}
+
+		contentType := expfmt.Negotiate(req.Header)
+		buf := getBuf()
+		defer giveBuf(buf)
+		writer, encoding := decorateWriter(req, buf)
+		enc := expfmt.NewEncoder(writer, contentType)
+		var lastErr error
+		for _, mf := range mfs {
+			if err := enc.Encode(mf); err != nil {
+				lastErr = err
+				http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+				return
+			}
+		}
+		if closer, ok := writer.(io.Closer); ok {
+			closer.Close()
+		}
+		if lastErr != nil && buf.Len() == 0 {
+			http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+			return
+		}
+		header := w.Header()
+		header.Set(contentTypeHeader, string(contentType))
+		header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+		if encoding != "" {
+			header.Set(contentEncodingHeader, encoding)
+		}
+		w.Write(buf.Bytes())
+	})
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested.  It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+	header := request.Header.Get(acceptEncodingHeader)
+	parts := strings.Split(header, ",")
+	for _, part := range parts {
+		part := strings.TrimSpace(part)
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+			return gzip.NewWriter(writer), "gzip"
+		}
+	}
+	return writer, ""
+}
+
 var instLabels = []string{"method", "code"}
 
 type nower interface {
@@ -58,7 +158,7 @@ func nowSeries(t ...time.Time) nower {
 // value. http_requests_total is a metric vector partitioned by HTTP method
 // (label name "method") and HTTP status code (label name "code").
 //
-// Note that InstrumentHandler has several issues:
+// Deprecated: InstrumentHandler has several issues:
 //
 // - It uses Summaries rather than Histograms. Summaries are not useful if
 // aggregation across multiple instances is required.
@@ -73,8 +173,8 @@ func nowSeries(t ...time.Time) nower {
 // performing such writes.
 //
 // Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Consider this function
-// DEPRECATED and prefer direct instrumentation in the meantime.
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
 func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
 	return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
 }
@@ -82,6 +182,9 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
 // InstrumentHandlerFunc wraps the given function for instrumentation. It
 // otherwise works in the same way as InstrumentHandler (and shares the same
 // issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
 func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
 	return InstrumentHandlerFuncWithOpts(
 		SummaryOpts{
@@ -117,6 +220,9 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
 // cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
 // and all its fields are set to the equally named fields in the provided
 // SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
 func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
 	return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
 }
@@ -125,6 +231,9 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
 // the same issues) but provides more flexibility (at the cost of a more complex
 // call syntax). See InstrumentHandlerWithOpts for details how the provided
 // SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
 func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
 	reqCnt := NewCounterVec(
 		CounterOpts{

+ 17 - 17
vendor/github.com/prometheus/client_golang/prometheus/metric.go

@@ -22,10 +22,8 @@ import (
 const separatorByte byte = 255
 
 // A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
-// Untyped, and Summary. Users can implement their own Metric types, but that
-// should be rarely needed. See the example for SelfCollector, which is also an
-// example for a user-implemented Metric.
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
 type Metric interface {
 	// Desc returns the descriptor for the Metric. This method idempotently
 	// returns the same descriptor throughout the lifetime of the
@@ -36,21 +34,23 @@ type Metric interface {
 	// Write encodes the Metric into a "Metric" Protocol Buffer data
 	// transmission object.
 	//
-	// Implementers of custom Metric types must observe concurrency safety
-	// as reads of this metric may occur at any time, and any blocking
-	// occurs at the expense of total performance of rendering all
-	// registered metrics. Ideally Metric implementations should support
-	// concurrent readers.
+	// Metric implementations must observe concurrency safety as reads of
+	// this metric may occur at any time, and any blocking occurs at the
+	// expense of total performance of rendering all registered
+	// metrics. Ideally, Metric implementations should support concurrent
+	// readers.
 	//
-	// The Prometheus client library attempts to minimize memory allocations
-	// and will provide a pre-existing reset dto.Metric pointer. Prometheus
-	// may recycle the dto.Metric proto message, so Metric implementations
-	// should just populate the provided dto.Metric and then should not keep
-	// any reference to it.
-	//
-	// While populating dto.Metric, labels must be sorted lexicographically.
-	// (Implementers may find LabelPairSorter useful for that.)
+	// While populating dto.Metric, it is the responsibility of the
+	// implementation to ensure validity of the Metric protobuf (like valid
+	// UTF-8 strings or syntactically valid metric and label names). It is
+	// recommended to sort labels lexicographically. (Implementers may find
+	// LabelPairSorter useful for that.) Callers of Write should still make
+	// sure of sorting if they depend on it.
 	Write(*dto.Metric) error
+	// TODO(beorn7): The original rationale of passing in a pre-allocated
+	// dto.Metric protobuf to save allocations has disappeared. The
+	// signature of this method should be changed to "Write() (*dto.Metric,
+	// error)".
 }
 
 // Opts bundles the options for creating most Metric types. Each metric

+ 2 - 2
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go

@@ -28,7 +28,7 @@ type processCollector struct {
 // NewProcessCollector returns a collector which exports the current state of
 // process metrics including cpu, memory and file descriptor usage as well as
 // the process start time for the given process id under the given namespace.
-func NewProcessCollector(pid int, namespace string) *processCollector {
+func NewProcessCollector(pid int, namespace string) Collector {
 	return NewProcessCollectorPIDFn(
 		func() (int, error) { return pid, nil },
 		namespace,
@@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
 func NewProcessCollectorPIDFn(
 	pidFn func() (int, error),
 	namespace string,
-) *processCollector {
+) Collector {
 	c := processCollector{
 		pidFn:     pidFn,
 		collectFn: func(chan<- Metric) {},

+ 0 - 65
vendor/github.com/prometheus/client_golang/prometheus/push.go

@@ -1,65 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-package prometheus
-
-// Push triggers a metric collection by the default registry and pushes all
-// collected metrics to the Pushgateway specified by url. See the Pushgateway
-// documentation for detailed implications of the job and instance
-// parameter. instance can be left empty. You can use just host:port or ip:port
-// as url, in which case 'http://' is added automatically. You can also include
-// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
-//
-// Note that all previously pushed metrics with the same job and instance will
-// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
-// to push to the Pushgateway.)
-func Push(job, instance, url string) error {
-	return defRegistry.Push(job, instance, url, "PUT")
-}
-
-// PushAdd works like Push, but only previously pushed metrics with the same
-// name (and the same job and instance) will be replaced. (It uses HTTP method
-// 'POST' to push to the Pushgateway.)
-func PushAdd(job, instance, url string) error {
-	return defRegistry.Push(job, instance, url, "POST")
-}
-
-// PushCollectors works like Push, but it does not collect from the default
-// registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushCollectors(job, instance, url string, collectors ...Collector) error {
-	return pushCollectors(job, instance, url, "PUT", collectors...)
-}
-
-// PushAddCollectors works like PushAdd, but it does not collect from the
-// default registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
-	return pushCollectors(job, instance, url, "POST", collectors...)
-}
-
-func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
-	r := newRegistry()
-	for _, collector := range collectors {
-		if _, err := r.Register(collector); err != nil {
-			return err
-		}
-	}
-	return r.Push(job, instance, url, method)
-}

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 517 - 313
vendor/github.com/prometheus/client_golang/prometheus/registry.go


+ 12 - 16
vendor/github.com/prometheus/client_golang/prometheus/summary.go

@@ -53,8 +53,8 @@ type Summary interface {
 	Observe(float64)
 }
 
+// DefObjectives are the default Summary quantile values.
 var (
-	// DefObjectives are the default Summary quantile values.
 	DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
 
 	errQuantileLabelNotAllowed = fmt.Errorf(
@@ -139,11 +139,11 @@ type SummaryOpts struct {
 	BufCap uint32
 }
 
-// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
-// method of perk/quantile is actually not working as advertised - and it might
-// be unfixable, as the underlying algorithm is apparently not capable of
-// merging summaries in the first place. To avoid using Merge, we are currently
-// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
 // essentially multiplied by the number of age buckets. When rotating age
 // buckets, we empty the previous head stream. On scrape time, we simply take
 // the quantiles from the head stream (no merging required). Result: More effort
@@ -227,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
 	}
 	sort.Float64s(s.sortedObjectives)
 
-	s.Init(s) // Init self-collection.
+	s.init(s) // Init self-collection.
 	return s
 }
 
 type summary struct {
-	SelfCollector
+	selfCollector
 
 	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
 	mtx    sync.Mutex // Protects every other moving part.
@@ -390,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
 // (e.g. HTTP request latencies, partitioned by status code and method). Create
 // instances with NewSummaryVec.
 type SummaryVec struct {
-	MetricVec
+	*MetricVec
 }
 
 // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@@ -404,13 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
 		opts.ConstLabels,
 	)
 	return &SummaryVec{
-		MetricVec: MetricVec{
-			children: map[uint64]Metric{},
-			desc:     desc,
-			newMetric: func(lvs ...string) Metric {
-				return newSummary(desc, opts, lvs...)
-			},
-		},
+		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newSummary(desc, opts, lvs...)
+		}),
 	}
 }
 

+ 4 - 8
vendor/github.com/prometheus/client_golang/prometheus/untyped.go

@@ -56,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
 // labels. This is used if you want to count the same thing partitioned by
 // various dimensions. Create instances with NewUntypedVec.
 type UntypedVec struct {
-	MetricVec
+	*MetricVec
 }
 
 // NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
@@ -70,13 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
 		opts.ConstLabels,
 	)
 	return &UntypedVec{
-		MetricVec: MetricVec{
-			children: map[uint64]Metric{},
-			desc:     desc,
-			newMetric: func(lvs ...string) Metric {
-				return newValue(desc, UntypedValue, 0, lvs...)
-			},
-		},
+		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			return newValue(desc, UntypedValue, 0, lvs...)
+		}),
 	}
 }
 

+ 4 - 4
vendor/github.com/prometheus/client_golang/prometheus/value.go

@@ -48,7 +48,7 @@ type value struct {
 	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
 	valBits uint64
 
-	SelfCollector
+	selfCollector
 
 	desc       *Desc
 	valType    ValueType
@@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
 		valBits:    math.Float64bits(val),
 		labelPairs: makeLabelPairs(desc, labelValues),
 	}
-	result.Init(result)
+	result.init(result)
 	return result
 }
 
@@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
 // library to back the implementations of CounterFunc, GaugeFunc, and
 // UntypedFunc.
 type valueFunc struct {
-	SelfCollector
+	selfCollector
 
 	desc       *Desc
 	valType    ValueType
@@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
 		function:   function,
 		labelPairs: makeLabelPairs(desc, nil),
 	}
-	result.Init(result)
+	result.init(result)
 	return result
 }
 

+ 199 - 44
vendor/github.com/prometheus/client_golang/prometheus/vec.go

@@ -16,6 +16,8 @@ package prometheus
 import (
 	"fmt"
 	"sync"
+
+	"github.com/prometheus/common/model"
 )
 
 // MetricVec is a Collector to bundle metrics of the same name that
@@ -25,10 +27,31 @@ import (
 // provided in this package.
 type MetricVec struct {
 	mtx      sync.RWMutex // Protects the children.
-	children map[uint64]Metric
+	children map[uint64][]metricWithLabelValues
 	desc     *Desc
 
-	newMetric func(labelValues ...string) Metric
+	newMetric   func(labelValues ...string) Metric
+	hashAdd     func(h uint64, s string) uint64 // replace hash function for testing collision handling
+	hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+	return &MetricVec{
+		children:    map[uint64][]metricWithLabelValues{},
+		desc:        desc,
+		newMetric:   newMetric,
+		hashAdd:     hashAdd,
+		hashAddByte: hashAddByte,
+	}
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+	values []string
+	metric Metric
 }
 
 // Describe implements Collector. The length of the returned slice
@@ -42,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
 	m.mtx.RLock()
 	defer m.mtx.RUnlock()
 
-	for _, metric := range m.children {
-		ch <- metric
+	for _, metrics := range m.children {
+		for _, metric := range metrics {
+			ch <- metric.metric
+		}
 	}
 }
 
@@ -77,16 +102,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
 		return nil, err
 	}
 
-	m.mtx.RLock()
-	metric, ok := m.children[h]
-	m.mtx.RUnlock()
-	if ok {
-		return metric, nil
-	}
-
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-	return m.getOrCreateMetric(h, lvs...), nil
+	return m.getOrCreateMetricWithLabelValues(h, lvs), nil
 }
 
 // GetMetricWith returns the Metric for the given Labels map (the label names
@@ -107,20 +123,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
 		return nil, err
 	}
 
-	m.mtx.RLock()
-	metric, ok := m.children[h]
-	m.mtx.RUnlock()
-	if ok {
-		return metric, nil
-	}
-
-	lvs := make([]string, len(labels))
-	for i, label := range m.desc.variableLabels {
-		lvs[i] = labels[label]
-	}
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-	return m.getOrCreateMetric(h, lvs...), nil
+	return m.getOrCreateMetricWithLabels(h, labels), nil
 }
 
 // WithLabelValues works as GetMetricWithLabelValues, but panics if an error
@@ -168,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
 	if err != nil {
 		return false
 	}
-	if _, ok := m.children[h]; !ok {
-		return false
-	}
-	delete(m.children, h)
-	return true
+	return m.deleteByHashWithLabelValues(h, lvs)
 }
 
 // Delete deletes the metric where the variable labels are the same as those
@@ -193,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
 	if err != nil {
 		return false
 	}
-	if _, ok := m.children[h]; !ok {
+
+	return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+	metrics, ok := m.children[h]
+	if !ok {
 		return false
 	}
-	delete(m.children, h)
+
+	i := m.findMetricWithLabelValues(metrics, lvs)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		m.children[h] = append(metrics[:i], metrics[i+1:]...)
+	} else {
+		delete(m.children, h)
+	}
+	return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+	metrics, ok := m.children[h]
+	if !ok {
+		return false
+	}
+	i := m.findMetricWithLabels(metrics, labels)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		m.children[h] = append(metrics[:i], metrics[i+1:]...)
+	} else {
+		delete(m.children, h)
+	}
 	return true
 }
 
@@ -216,7 +255,8 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
 	}
 	h := hashNew()
 	for _, val := range vals {
-		h = hashAdd(h, val)
+		h = m.hashAdd(h, val)
+		h = m.hashAddByte(h, model.SeparatorByte)
 	}
 	return h, nil
 }
@@ -231,19 +271,134 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
 		if !ok {
 			return 0, fmt.Errorf("label name %q missing in label map", label)
 		}
-		h = hashAdd(h, val)
+		h = m.hashAdd(h, val)
+		h = m.hashAddByte(h, model.SeparatorByte)
 	}
 	return h, nil
 }
 
-func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
-	metric, ok := m.children[hash]
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithLabelValues(hash, lvs)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithLabelValues(hash, lvs)
+	if !ok {
+		// Copy to avoid allocation in case wo don't go down this code path.
+		copiedLVs := make([]string, len(lvs))
+		copy(copiedLVs, lvs)
+		metric = m.newMetric(copiedLVs...)
+		m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+	}
+	return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithLabels(hash, labels)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithLabels(hash, labels)
 	if !ok {
-		// Copy labelValues. Otherwise, they would be allocated even if we don't go
-		// down this code path.
-		copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
-		metric = m.newMetric(copiedLabelValues...)
-		m.children[hash] = metric
+		lvs := m.extractLabelValues(labels)
+		metric = m.newMetric(lvs...)
+		m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
 	}
 	return metric
 }
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+	metrics, ok := m.children[h]
+	if ok {
+		if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+	metrics, ok := m.children[h]
+	if ok {
+		if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+	for i, metric := range metrics {
+		if m.matchLabelValues(metric.values, lvs) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+	for i, metric := range metrics {
+		if m.matchLabels(metric.values, labels) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+	if len(values) != len(lvs) {
+		return false
+	}
+	for i, v := range values {
+		if v != lvs[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+	if len(labels) != len(values) {
+		return false
+	}
+	for i, k := range m.desc.variableLabels {
+		if values[i] != labels[k] {
+			return false
+		}
+	}
+	return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+	labelValues := make([]string, len(labels))
+	for i, k := range m.desc.variableLabels {
+		labelValues[i] = labels[k]
+	}
+	return labelValues
+}

+ 2 - 2
vendor/github.com/prometheus/common/README.md

@@ -6,7 +6,7 @@ components and libraries.
 
 * **config**: Common configuration structures
 * **expfmt**: Decoding and encoding for the exposition format
-* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus)
+* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)
 * **model**: Shared data structures
 * **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
-* **version**: Version informations and metric
+* **version**: Version information and metrics

+ 32 - 15
vendor/github.com/prometheus/common/expfmt/decode.go

@@ -31,6 +31,7 @@ type Decoder interface {
 	Decode(*dto.MetricFamily) error
 }
 
+// DecodeOptions contains options used by the Decoder and in sample extraction.
 type DecodeOptions struct {
 	// Timestamp is added to each value from the stream that has no explicit timestamp set.
 	Timestamp model.Time
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
 	return nil
 }
 
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
 type SampleDecoder struct {
 	Dec  Decoder
 	Opts *DecodeOptions
@@ -149,37 +152,51 @@ type SampleDecoder struct {
 	f dto.MetricFamily
 }
 
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
 func (sd *SampleDecoder) Decode(s *model.Vector) error {
-	if err := sd.Dec.Decode(&sd.f); err != nil {
+	err := sd.Dec.Decode(&sd.f)
+	if err != nil {
 		return err
 	}
-	*s = extractSamples(&sd.f, sd.Opts)
-	return nil
+	*s, err = extractSamples(&sd.f, sd.Opts)
+	return err
 }
 
-// Extract samples builds a slice of samples from the provided metric families.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
-	var all model.Vector
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurrs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+	var (
+		all     model.Vector
+		lastErr error
+	)
 	for _, f := range fams {
-		all = append(all, extractSamples(f, o)...)
+		some, err := extractSamples(f, o)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		all = append(all, some...)
 	}
-	return all
+	return all, lastErr
 }
 
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
 	switch f.GetType() {
 	case dto.MetricType_COUNTER:
-		return extractCounter(o, f)
+		return extractCounter(o, f), nil
 	case dto.MetricType_GAUGE:
-		return extractGauge(o, f)
+		return extractGauge(o, f), nil
 	case dto.MetricType_SUMMARY:
-		return extractSummary(o, f)
+		return extractSummary(o, f), nil
 	case dto.MetricType_UNTYPED:
-		return extractUntyped(o, f)
+		return extractUntyped(o, f), nil
 	case dto.MetricType_HISTOGRAM:
-		return extractHistogram(o, f)
+		return extractHistogram(o, f), nil
 	}
-	panic("expfmt.extractSamples: unknown metric family type")
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
 }
 
 func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {

+ 5 - 7
vendor/github.com/prometheus/common/expfmt/expfmt.go

@@ -11,27 +11,25 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// A package for reading and writing Prometheus metrics.
+// Package expfmt contains tools for reading and writing Prometheus metrics.
 package expfmt
 
+// Format specifies the HTTP content type of the different wire protocols.
 type Format string
 
+// Constants to assemble the Content-Type values for the different wire protocols.
 const (
-	TextVersion = "0.0.4"
-
+	TextVersion   = "0.0.4"
 	ProtoType     = `application/vnd.google.protobuf`
 	ProtoProtocol = `io.prometheus.client.MetricFamily`
 	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";"
 
 	// The Content-Type values for the different wire protocols.
 	FmtUnknown      Format = `<unknown>`
-	FmtText         Format = `text/plain; version=` + TextVersion
+	FmtText         Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
 	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`
 	FmtProtoText    Format = ProtoFmt + ` encoding=text`
 	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-
-	// fmtJSON2 is hidden as it is deprecated.
-	fmtJSON2 Format = `application/json; version=0.0.2`
 )
 
 const (

+ 5 - 2
vendor/github.com/prometheus/common/expfmt/text_create.go

@@ -25,9 +25,12 @@ import (
 
 // MetricFamilyToText converts a MetricFamily proto message into text format and
 // writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered.  This function does not perform checks on the
-// content of the metric and label names, i.e. invalid metric or label names
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
 // will result in invalid text format output.
+//
 // This method fulfills the type 'prometheus.encoder'.
 func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
 	var written int

+ 7 - 3
vendor/github.com/prometheus/common/expfmt/text_parse.go

@@ -47,7 +47,7 @@ func (e ParseError) Error() string {
 }
 
 // TextParser is used to parse the simple and flat text-based exchange format. Its
-// nil value is ready to use.
+// zero value is ready to use.
 type TextParser struct {
 	metricFamiliesByName map[string]*dto.MetricFamily
 	buf                  *bufio.Reader // Where the parsed input is read through.
@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
 	if p.readTokenAsLabelValue(); p.err != nil {
 		return nil
 	}
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+		return nil
+	}
 	p.currentLabelPair.Value = proto.String(p.currentToken.String())
 	// Special treatment of summaries:
 	// - Quantile labels are special, will result in dto.Quantile later.
@@ -552,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
 // byte considered is the byte already read (now in p.currentByte).  The first
 // newline byte encountered is still copied into p.currentByte, but not into
 // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
-// other escape sequences are invalid and cause an error.
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
 func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
 	p.currentToken.Reset()
 	escaped := false

+ 8 - 4
vendor/github.com/prometheus/common/model/labels.go

@@ -80,14 +80,18 @@ const (
 	QuantileLabel = "quantile"
 )
 
-// LabelNameRE is a regular expression matching valid label names.
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
 var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
 
 // A LabelName is a key for a LabelSet or Metric.  It has a value associated
 // therewith.
 type LabelName string
 
-// IsValid is true iff the label name matches the pattern of LabelNameRE.
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
 func (ln LabelName) IsValid() bool {
 	if len(ln) == 0 {
 		return false
@@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
 	if err := unmarshal(&s); err != nil {
 		return err
 	}
-	if !LabelNameRE.MatchString(s) {
+	if !LabelName(s).IsValid() {
 		return fmt.Errorf("%q is not a valid label name", s)
 	}
 	*ln = LabelName(s)
@@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
 	if err := json.Unmarshal(b, &s); err != nil {
 		return err
 	}
-	if !LabelNameRE.MatchString(s) {
+	if !LabelName(s).IsValid() {
 		return fmt.Errorf("%q is not a valid label name", s)
 	}
 	*ln = LabelName(s)

+ 1 - 1
vendor/github.com/prometheus/common/model/labelset.go

@@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
 	// LabelName as a string and does not call its UnmarshalJSON method.
 	// Thus, we have to replicate the behavior here.
 	for ln := range m {
-		if !LabelNameRE.MatchString(string(ln)) {
+		if !ln.IsValid() {
 			return fmt.Errorf("%q is not a valid label name", ln)
 		}
 	}

+ 8 - 3
vendor/github.com/prometheus/common/model/metric.go

@@ -21,8 +21,11 @@ import (
 )
 
 var (
-	separator    = []byte{0}
-	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+	separator = []byte{0}
+	// MetricNameRE is a regular expression matching valid metric
+	// names. Note that the IsValidMetricName function performs the same
+	// check but faster than a match with this regular expression.
+	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
 )
 
 // A Metric is similar to a LabelSet, but the key difference is that a Metric is
@@ -41,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
 
 // Clone returns a copy of the Metric.
 func (m Metric) Clone() Metric {
-	clone := Metric{}
+	clone := make(Metric, len(m))
 	for k, v := range m {
 		clone[k] = v
 	}
@@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
 }
 
 // IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
 func IsValidMetricName(n LabelValue) bool {
 	if len(n) == 0 {
 		return false

+ 2 - 2
vendor/github.com/prometheus/common/model/silence.go

@@ -59,8 +59,8 @@ func (m *Matcher) Validate() error {
 	return nil
 }
 
-// Silence defines the representation of a silence definiton
-// in the Prometheus eco-system.
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
 type Silence struct {
 	ID uint64 `json:"id,omitempty"`
 

+ 16 - 1
vendor/github.com/prometheus/common/model/time.go

@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
 // This type should not propagate beyond the scope of input/output processing.
 type Duration time.Duration
 
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+	var err error
+	*d, err = ParseDuration(s)
+	return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+	return "duration"
+}
+
 var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
 
-// StringToDuration parses a string into a time.Duration, assuming that a year
+// ParseDuration parses a string into a time.Duration, assuming that a year
 // always has 365d, a week always has 7d, and a day always has 24h.
 func ParseDuration(durationStr string) (Duration, error) {
 	matches := durationRE.FindStringSubmatch(durationStr)
@@ -202,6 +214,9 @@ func (d Duration) String() string {
 		ms   = int64(time.Duration(d) / time.Millisecond)
 		unit = "ms"
 	)
+	if ms == 0 {
+		return "0s"
+	}
 	factors := map[string]int64{
 		"y":  1000 * 60 * 60 * 24 * 365,
 		"w":  1000 * 60 * 60 * 24 * 7,

+ 19 - 6
vendor/github.com/prometheus/common/model/value.go

@@ -22,6 +22,22 @@ import (
 	"strings"
 )
 
+var (
+	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+	// of 0, which is possible to appear in a real SamplePair and thus not
+	// suitable to signal a non-existing SamplePair.
+	ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+	// ZeroSample is the pseudo zero-value of Sample used to signal a
+	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+	// and metric nil. Note that the natural zero value of Sample has a timestamp
+	// of 0, which is possible to appear in a real Sample and thus not suitable
+	// to signal a non-existing Sample.
+	ZeroSample = Sample{Timestamp: Earliest}
+)
+
 // A SampleValue is a representation of a value for a given sample at a given
 // time.
 type SampleValue float64
@@ -84,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
 }
 
 // Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
 func (s *SamplePair) Equal(o *SamplePair) bool {
 	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
 }
@@ -101,7 +117,7 @@ type Sample struct {
 }
 
 // Equal compares first the metrics, then the timestamp, then the value. The
-// sematics of value equality is defined by SampleValue.Equal.
+// semantics of value equality is defined by SampleValue.Equal.
 func (s *Sample) Equal(o *Sample) bool {
 	if s == o {
 		return true
@@ -113,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
 	if !s.Timestamp.Equal(o.Timestamp) {
 		return false
 	}
-	if s.Value.Equal(o.Value) {
-		return false
-	}
 
-	return true
+	return s.Value.Equal(o.Value)
 }
 
 func (s Sample) String() string {

+ 1 - 0
vendor/github.com/prometheus/procfs/README.md

@@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk.
 
 [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
 [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)

+ 95 - 0
vendor/github.com/prometheus/procfs/buddyinfo.go

@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+	Node  string
+	Zone  string
+	Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return nil, err
+	}
+
+	return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+	file, err := os.Open(fs.Path("buddyinfo"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+	var (
+		buddyInfo   = []BuddyInfo{}
+		scanner     = bufio.NewScanner(r)
+		bucketCount = -1
+	)
+
+	for scanner.Scan() {
+		var err error
+		line := scanner.Text()
+		parts := strings.Fields(line)
+
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+		}
+
+		node := strings.TrimRight(parts[1], ",")
+		zone := strings.TrimRight(parts[3], ",")
+		arraySize := len(parts[4:])
+
+		if bucketCount == -1 {
+			bucketCount = arraySize
+		} else {
+			if bucketCount != arraySize {
+				return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+			}
+		}
+
+		sizes := make([]float64, arraySize)
+		for i := 0; i < arraySize; i++ {
+			sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+			if err != nil {
+				return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+			}
+		}
+
+		buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+	}
+
+	return buddyInfo, scanner.Err()
+}

+ 49 - 0
vendor/github.com/prometheus/procfs/fs.go

@@ -1,9 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package procfs
 
 import (
 	"fmt"
 	"os"
 	"path"
+
+	"github.com/prometheus/procfs/nfs"
+	"github.com/prometheus/procfs/xfs"
 )
 
 // FS represents the pseudo-filesystem proc, which provides an interface to
@@ -31,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
 func (fs FS) Path(p ...string) string {
 	return path.Join(append([]string{string(fs)}, p...)...)
 }
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+	f, err := os.Open(fs.Path("fs/xfs/stat"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return xfs.ParseStats(f)
+}
+
+// NFSClientRPCStats retrieves NFS client RPC statistics.
+func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
+	f, err := os.Open(fs.Path("net/rpc/nfs"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+	f, err := os.Open(fs.Path("net/rpc/nfsd"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return nfs.ParseServerRPCStats(f)
+}

+ 46 - 0
vendor/github.com/prometheus/procfs/internal/util/parse.go

@@ -0,0 +1,46 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import "strconv"
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+	us := make([]uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, u)
+	}
+
+	return us, nil
+}

+ 52 - 17
vendor/github.com/prometheus/procfs/ipvs.go

@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package procfs
 
 import (
@@ -31,14 +44,16 @@ type IPVSStats struct {
 type IPVSBackendStatus struct {
 	// The local (virtual) IP address.
 	LocalAddress net.IP
-	// The local (virtual) port.
-	LocalPort uint16
-	// The transport protocol (TCP, UDP).
-	Proto string
 	// The remote (real) IP address.
 	RemoteAddress net.IP
+	// The local (virtual) port.
+	LocalPort uint16
 	// The remote (real) port.
 	RemotePort uint16
+	// The local firewall mark
+	LocalMark string
+	// The transport protocol (TCP, UDP).
+	Proto string
 	// The current number of active connections for this virtual/real address pair.
 	ActiveConn uint64
 	// The current number of inactive connections for this virtual/real address pair.
@@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
 		status       []IPVSBackendStatus
 		scanner      = bufio.NewScanner(file)
 		proto        string
+		localMark    string
 		localAddress net.IP
 		localPort    uint16
 		err          error
 	)
 
 	for scanner.Scan() {
-		fields := strings.Fields(string(scanner.Text()))
+		fields := strings.Fields(scanner.Text())
 		if len(fields) == 0 {
 			continue
 		}
@@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
 				continue
 			}
 			proto = fields[0]
+			localMark = ""
 			localAddress, localPort, err = parseIPPort(fields[1])
 			if err != nil {
 				return nil, err
 			}
+		case fields[0] == "FWM":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = fields[1]
+			localAddress = nil
+			localPort = 0
 		case fields[0] == "->":
 			if len(fields) < 6 {
 				continue
@@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
 			status = append(status, IPVSBackendStatus{
 				LocalAddress:  localAddress,
 				LocalPort:     localPort,
+				LocalMark:     localMark,
 				RemoteAddress: remoteAddress,
 				RemotePort:    remotePort,
 				Proto:         proto,
@@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
 }
 
 func parseIPPort(s string) (net.IP, uint16, error) {
-	tmp := strings.SplitN(s, ":", 2)
-
-	if len(tmp) != 2 {
-		return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
-	}
+	var (
+		ip  net.IP
+		err error
+	)
 
-	if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
-		return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+	switch len(s) {
+	case 13:
+		ip, err = hex.DecodeString(s[0:8])
+		if err != nil {
+			return nil, 0, err
+		}
+	case 46:
+		ip = net.ParseIP(s[1:40])
+		if ip == nil {
+			return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+		}
+	default:
+		return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
 	}
 
-	ip, err := hex.DecodeString(tmp[0])
-	if err != nil {
-		return nil, 0, err
+	portString := s[len(s)-4:]
+	if len(portString) != 4 {
+		return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
 	}
-
-	port, err := strconv.ParseUint(tmp[1], 16, 16)
+	port, err := strconv.ParseUint(portString, 16, 16)
 	if err != nil {
 		return nil, 0, err
 	}

+ 13 - 0
vendor/github.com/prometheus/procfs/mdstat.go

@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package procfs
 
 import (

+ 569 - 0
vendor/github.com/prometheus/procfs/mountstats.go

@@ -0,0 +1,569 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Constants shared between multiple functions.
+const (
+	deviceEntryLen = 8
+
+	fieldBytesLen  = 8
+	fieldEventsLen = 27
+
+	statVersion10 = "1.0"
+	statVersion11 = "1.1"
+
+	fieldTransport10Len = 10
+	fieldTransport11Len = 13
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+	// Name of the device.
+	Device string
+	// The mount point of the device.
+	Mount string
+	// The filesystem type used by the device.
+	Type string
+	// If available additional statistics related to this Mount.
+	// Use a type assertion to determine if additional statistics are available.
+	Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+	mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+	// The version of statistics provided.
+	StatVersion string
+	// The age of the NFS mount.
+	Age time.Duration
+	// Statistics related to byte counters for various operations.
+	Bytes NFSBytesStats
+	// Statistics related to various NFS event occurrences.
+	Events NFSEventsStats
+	// Statistics broken down by filesystem operation.
+	Operations []NFSOperationStats
+	// Statistics about the NFS RPC transport.
+	Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+	// Number of bytes read using the read() syscall.
+	Read uint64
+	// Number of bytes written using the write() syscall.
+	Write uint64
+	// Number of bytes read using the read() syscall in O_DIRECT mode.
+	DirectRead uint64
+	// Number of bytes written using the write() syscall in O_DIRECT mode.
+	DirectWrite uint64
+	// Number of bytes read from the NFS server, in total.
+	ReadTotal uint64
+	// Number of bytes written to the NFS server, in total.
+	WriteTotal uint64
+	// Number of pages read directly via mmap()'d files.
+	ReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+	// Number of times cached inode attributes are re-validated from the server.
+	InodeRevalidate uint64
+	// Number of times cached dentry nodes are re-validated from the server.
+	DnodeRevalidate uint64
+	// Number of times an inode cache is cleared.
+	DataInvalidate uint64
+	// Number of times cached inode attributes are invalidated.
+	AttributeInvalidate uint64
+	// Number of times files or directories have been open()'d.
+	VFSOpen uint64
+	// Number of times a directory lookup has occurred.
+	VFSLookup uint64
+	// Number of times permissions have been checked.
+	VFSAccess uint64
+	// Number of updates (and potential writes) to pages.
+	VFSUpdatePage uint64
+	// Number of pages read directly via mmap()'d files.
+	VFSReadPage uint64
+	// Number of times a group of pages have been read.
+	VFSReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	VFSWritePage uint64
+	// Number of times a group of pages have been written.
+	VFSWritePages uint64
+	// Number of times directory entries have been read with getdents().
+	VFSGetdents uint64
+	// Number of times attributes have been set on inodes.
+	VFSSetattr uint64
+	// Number of pending writes that have been forcefully flushed to the server.
+	VFSFlush uint64
+	// Number of times fsync() has been called on directories and files.
+	VFSFsync uint64
+	// Number of times locking has been attempted on a file.
+	VFSLock uint64
+	// Number of times files have been closed and released.
+	VFSFileRelease uint64
+	// Unknown.  Possibly unused.
+	CongestionWait uint64
+	// Number of times files have been truncated.
+	Truncation uint64
+	// Number of times a file has been grown due to writes beyond its existing end.
+	WriteExtension uint64
+	// Number of times a file was removed while still open by another process.
+	SillyRename uint64
+	// Number of times the NFS server gave less data than expected while reading.
+	ShortRead uint64
+	// Number of times the NFS server wrote less data than expected while writing.
+	ShortWrite uint64
+	// Number of times the NFS server indicated EJUKEBOX; retrieving data from
+	// offline storage.
+	JukeboxDelay uint64
+	// Number of NFS v4.1+ pNFS reads.
+	PNFSRead uint64
+	// Number of NFS v4.1+ pNFS writes.
+	PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+	// The name of the operation.
+	Operation string
+	// Number of requests performed for this operation.
+	Requests uint64
+	// Number of times an actual RPC request has been transmitted for this operation.
+	Transmissions uint64
+	// Number of times a request has had a major timeout.
+	MajorTimeouts uint64
+	// Number of bytes sent for this operation, including RPC headers and payload.
+	BytesSent uint64
+	// Number of bytes received for this operation, including RPC headers and payload.
+	BytesReceived uint64
+	// Duration all requests spent queued for transmission before they were sent.
+	CumulativeQueueTime time.Duration
+	// Duration it took to get a reply back after the request was transmitted.
+	CumulativeTotalResponseTime time.Duration
+	// Duration from when a request was enqueued to when it was completely handled.
+	CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+	// The local port used for the NFS mount.
+	Port uint64
+	// Number of times the client has had to establish a connection from scratch
+	// to the NFS server.
+	Bind uint64
+	// Number of times the client has made a TCP connection to the NFS server.
+	Connect uint64
+	// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+	// spent waiting for connections to the server to be established.
+	ConnectIdleTime uint64
+	// Duration since the NFS mount last saw any RPC traffic.
+	IdleTime time.Duration
+	// Number of RPC requests for this mount sent to the NFS server.
+	Sends uint64
+	// Number of RPC responses for this mount received from the NFS server.
+	Receives uint64
+	// Number of times the NFS server sent a response with a transaction ID
+	// unknown to this client.
+	BadTransactionIDs uint64
+	// A running counter, incremented on each request as the current difference
+	// ebetween sends and receives.
+	CumulativeActiveRequests uint64
+	// A running counter, incremented on each request by the current backlog
+	// queue size.
+	CumulativeBacklog uint64
+
+	// Stats below only available with stat version 1.1.
+
+	// Maximum number of simultaneously active RPC requests ever used.
+	MaximumRPCSlotsUsed uint64
+	// A running counter, incremented on each request as the current size of the
+	// sending queue.
+	CumulativeSendingQueue uint64
+	// A running counter, incremented on each request as the current size of the
+	// pending queue.
+	CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+	const (
+		device            = "device"
+		statVersionPrefix = "statvers="
+
+		nfs3Type = "nfs"
+		nfs4Type = "nfs4"
+	)
+
+	var mounts []*Mount
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Only look for device entries in this function
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 || ss[0] != device {
+			continue
+		}
+
+		m, err := parseMount(ss)
+		if err != nil {
+			return nil, err
+		}
+
+		// Does this mount also possess statistics information?
+		if len(ss) > deviceEntryLen {
+			// Only NFSv3 and v4 are supported for parsing statistics
+			if m.Type != nfs3Type && m.Type != nfs4Type {
+				return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+			}
+
+			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+			stats, err := parseMountStatsNFS(s, statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			m.Stats = stats
+		}
+
+		mounts = append(mounts, m)
+	}
+
+	return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+//   device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+	if len(ss) < deviceEntryLen {
+		return nil, fmt.Errorf("invalid device entry: %v", ss)
+	}
+
+	// Check for specific words appearing at specific indices to ensure
+	// the format is consistent with what we expect
+	format := []struct {
+		i int
+		s string
+	}{
+		{i: 0, s: "device"},
+		{i: 2, s: "mounted"},
+		{i: 3, s: "on"},
+		{i: 5, s: "with"},
+		{i: 6, s: "fstype"},
+	}
+
+	for _, f := range format {
+		if ss[f.i] != f.s {
+			return nil, fmt.Errorf("invalid device entry: %v", ss)
+		}
+	}
+
+	return &Mount{
+		Device: ss[1],
+		Mount:  ss[4],
+		Type:   ss[7],
+	}, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+	// Field indicators for parsing specific types of data
+	const (
+		fieldAge        = "age:"
+		fieldBytes      = "bytes:"
+		fieldEvents     = "events:"
+		fieldPerOpStats = "per-op"
+		fieldTransport  = "xprt:"
+	)
+
+	stats := &MountStatsNFS{
+		StatVersion: statVersion,
+	}
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			break
+		}
+		if len(ss) < 2 {
+			return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+		}
+
+		switch ss[0] {
+		case fieldAge:
+			// Age integer is in seconds
+			d, err := time.ParseDuration(ss[1] + "s")
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Age = d
+		case fieldBytes:
+			bstats, err := parseNFSBytesStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Bytes = *bstats
+		case fieldEvents:
+			estats, err := parseNFSEventsStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Events = *estats
+		case fieldTransport:
+			if len(ss) < 3 {
+				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+			}
+
+			tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Transport = *tstats
+		}
+
+		// When encountering "per-operation statistics", we must break this
+		// loop and parse them separately to ensure we can terminate parsing
+		// before reaching another device entry; hence why this 'if' statement
+		// is not just another switch case
+		if ss[0] == fieldPerOpStats {
+			break
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	// NFS per-operation stats appear last before the next device entry
+	perOpStats, err := parseNFSOperationStats(s)
+	if err != nil {
+		return nil, err
+	}
+
+	stats.Operations = perOpStats
+
+	return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+	if len(ss) != fieldBytesLen {
+		return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+	}
+
+	ns := make([]uint64, 0, fieldBytesLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSBytesStats{
+		Read:        ns[0],
+		Write:       ns[1],
+		DirectRead:  ns[2],
+		DirectWrite: ns[3],
+		ReadTotal:   ns[4],
+		WriteTotal:  ns[5],
+		ReadPages:   ns[6],
+		WritePages:  ns[7],
+	}, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+	if len(ss) != fieldEventsLen {
+		return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+	}
+
+	ns := make([]uint64, 0, fieldEventsLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSEventsStats{
+		InodeRevalidate:     ns[0],
+		DnodeRevalidate:     ns[1],
+		DataInvalidate:      ns[2],
+		AttributeInvalidate: ns[3],
+		VFSOpen:             ns[4],
+		VFSLookup:           ns[5],
+		VFSAccess:           ns[6],
+		VFSUpdatePage:       ns[7],
+		VFSReadPage:         ns[8],
+		VFSReadPages:        ns[9],
+		VFSWritePage:        ns[10],
+		VFSWritePages:       ns[11],
+		VFSGetdents:         ns[12],
+		VFSSetattr:          ns[13],
+		VFSFlush:            ns[14],
+		VFSFsync:            ns[15],
+		VFSLock:             ns[16],
+		VFSFileRelease:      ns[17],
+		CongestionWait:      ns[18],
+		Truncation:          ns[19],
+		WriteExtension:      ns[20],
+		SillyRename:         ns[21],
+		ShortRead:           ns[22],
+		ShortWrite:          ns[23],
+		JukeboxDelay:        ns[24],
+		PNFSRead:            ns[25],
+		PNFSWrite:           ns[26],
+	}, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+	const (
+		// Number of expected fields in each per-operation statistics set
+		numFields = 9
+	)
+
+	var ops []NFSOperationStats
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			// Must break when reading a blank line after per-operation stats to
+			// enable top-level function to parse the next device entry
+			break
+		}
+
+		if len(ss) != numFields {
+			return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+		}
+
+		// Skip string operation name for integers
+		ns := make([]uint64, 0, numFields-1)
+		for _, st := range ss[1:] {
+			n, err := strconv.ParseUint(st, 10, 64)
+			if err != nil {
+				return nil, err
+			}
+
+			ns = append(ns, n)
+		}
+
+		ops = append(ops, NFSOperationStats{
+			Operation:                   strings.TrimSuffix(ss[0], ":"),
+			Requests:                    ns[0],
+			Transmissions:               ns[1],
+			MajorTimeouts:               ns[2],
+			BytesSent:                   ns[3],
+			BytesReceived:               ns[4],
+			CumulativeQueueTime:         time.Duration(ns[5]) * time.Millisecond,
+			CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+			CumulativeTotalRequestTime:  time.Duration(ns[7]) * time.Millisecond,
+		})
+	}
+
+	return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	switch statVersion {
+	case statVersion10:
+		if len(ss) != fieldTransport10Len {
+			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+		}
+	case statVersion11:
+		if len(ss) != fieldTransport11Len {
+			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+		}
+	default:
+		return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+	}
+
+	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+	// in a v1.0 response.
+	//
+	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
+	// only v1.0 stats are present.
+	// See: https://github.com/prometheus/node_exporter/issues/571.
+	ns := make([]uint64, fieldTransport11Len)
+	for i, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns[i] = n
+	}
+
+	return &NFSTransportStats{
+		Port:                     ns[0],
+		Bind:                     ns[1],
+		Connect:                  ns[2],
+		ConnectIdleTime:          ns[3],
+		IdleTime:                 time.Duration(ns[4]) * time.Second,
+		Sends:                    ns[5],
+		Receives:                 ns[6],
+		BadTransactionIDs:        ns[7],
+		CumulativeActiveRequests: ns[8],
+		CumulativeBacklog:        ns[9],
+		MaximumRPCSlotsUsed:      ns[10],
+		CumulativeSendingQueue:   ns[11],
+		CumulativePendingQueue:   ns[12],
+	}, nil
+}

Vissa filer visades inte eftersom för många filer har ändrats