Browse Source

Vendor swarmkit, containerd, and related dependencies

Update swarmkit to 037b491.

As swarmkit switched to a newer gRPC version, this also involves
updating Docker's vendored gRPC, which in turn requires updating
containerd to a new version that has protobufs generated against this
gRPC version.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Lehmann 8 years ago
parent
commit
3e987e1732
100 changed files with 7685 additions and 6778 deletions
  1. 2 2
      daemon/cluster/cluster.go
  2. 7 7
      daemon/cluster/convert/container.go
  3. 3 3
      daemon/cluster/convert/network.go
  4. 3 3
      daemon/cluster/convert/node.go
  5. 3 3
      daemon/cluster/convert/secret.go
  6. 13 13
      daemon/cluster/convert/service.go
  7. 7 7
      daemon/cluster/convert/swarm.go
  8. 4 4
      daemon/cluster/convert/task.go
  9. 2 2
      daemon/cluster/executor/container/adapter.go
  10. 3 3
      daemon/cluster/executor/container/container.go
  11. 2 2
      daemon/cluster/executor/container/controller.go
  12. 1 1
      hack/dockerfile/binaries-commits
  13. 9 9
      vendor.conf
  14. 102 41
      vendor/github.com/coreos/etcd/client/client.go
  15. 373 286
      vendor/github.com/coreos/etcd/client/keys.generated.go
  16. 23 9
      vendor/github.com/coreos/etcd/client/keys.go
  17. 6 14
      vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
  18. 46 0
      vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
  19. 1 1
      vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
  20. 106 0
      vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
  21. 1 1
      vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
  22. 6 0
      vendor/github.com/coreos/etcd/raft/doc.go
  23. 12 18
      vendor/github.com/coreos/etcd/raft/node.go
  24. 46 11
      vendor/github.com/coreos/etcd/raft/progress.go
  25. 294 115
      vendor/github.com/coreos/etcd/raft/raft.go
  26. 204 196
      vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
  27. 2 0
      vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
  28. 26 0
      vendor/github.com/coreos/etcd/raft/rawnode.go
  29. 118 0
      vendor/github.com/coreos/etcd/raft/read_only.go
  30. 2 0
      vendor/github.com/coreos/etcd/raft/storage.go
  31. 13 1
      vendor/github.com/coreos/etcd/raft/util.go
  32. 50 46
      vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
  33. 1 1
      vendor/github.com/coreos/etcd/wal/doc.go
  34. 19 4
      vendor/github.com/coreos/etcd/wal/encoder.go
  35. 59 15
      vendor/github.com/coreos/etcd/wal/wal.go
  36. 8 2
      vendor/github.com/coreos/etcd/wal/wal_unix.go
  37. 70 66
      vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
  38. 803 5
      vendor/github.com/docker/containerd/api/grpc/types/api.pb.go
  39. 1 7
      vendor/github.com/docker/swarmkit/agent/session.go
  40. 197 292
      vendor/github.com/docker/swarmkit/api/ca.pb.go
  41. 649 1115
      vendor/github.com/docker/swarmkit/api/control.pb.go
  42. 44 0
      vendor/github.com/docker/swarmkit/api/deepcopy/copy.go
  43. 255 396
      vendor/github.com/docker/swarmkit/api/dispatcher.pb.go
  44. 2 2
      vendor/github.com/docker/swarmkit/api/dispatcher.proto
  45. 0 3
      vendor/github.com/docker/swarmkit/api/duration/gen.go
  46. 1 1
      vendor/github.com/docker/swarmkit/api/gen.go
  47. 74 119
      vendor/github.com/docker/swarmkit/api/health.pb.go
  48. 256 358
      vendor/github.com/docker/swarmkit/api/logbroker.pb.go
  49. 5 3
      vendor/github.com/docker/swarmkit/api/logbroker.proto
  50. 316 465
      vendor/github.com/docker/swarmkit/api/objects.pb.go
  51. 4 3
      vendor/github.com/docker/swarmkit/api/objects.proto
  52. 281 442
      vendor/github.com/docker/swarmkit/api/raft.pb.go
  53. 119 177
      vendor/github.com/docker/swarmkit/api/resource.pb.go
  54. 264 269
      vendor/github.com/docker/swarmkit/api/snapshot.pb.go
  55. 275 484
      vendor/github.com/docker/swarmkit/api/specs.pb.go
  56. 3 2
      vendor/github.com/docker/swarmkit/api/specs.proto
  57. 0 3
      vendor/github.com/docker/swarmkit/api/timestamp/gen.go
  58. 602 1164
      vendor/github.com/docker/swarmkit/api/types.pb.go
  59. 25 14
      vendor/github.com/docker/swarmkit/api/types.proto
  60. 8 15
      vendor/github.com/docker/swarmkit/ca/certificates.go
  61. 3 3
      vendor/github.com/docker/swarmkit/ca/server.go
  62. 21 0
      vendor/github.com/docker/swarmkit/manager/constraint/constraint.go
  63. 4 4
      vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
  64. 2 2
      vendor/github.com/docker/swarmkit/manager/controlapi/node.go
  65. 5 10
      vendor/github.com/docker/swarmkit/manager/controlapi/service.go
  66. 4 4
      vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
  67. 159 92
      vendor/github.com/docker/swarmkit/manager/manager.go
  68. 3 3
      vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/tasks.go
  69. 3 3
      vendor/github.com/docker/swarmkit/manager/orchestrator/restart/restart.go
  70. 4 8
      vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
  71. 1 0
      vendor/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go
  72. 67 1
      vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
  73. 2 2
      vendor/github.com/docker/swarmkit/manager/state/store/memory.go
  74. 27 7
      vendor/github.com/docker/swarmkit/node/node.go
  75. 49 97
      vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
  76. 2 8
      vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go
  77. 3 121
      vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go
  78. 88 68
      vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
  79. 3 0
      vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
  80. 35 0
      vendor/github.com/gogo/protobuf/gogoproto/helper.go
  81. 104 8
      vendor/github.com/gogo/protobuf/proto/decode.go
  82. 17 19
      vendor/github.com/gogo/protobuf/proto/duration.go
  83. 202 0
      vendor/github.com/gogo/protobuf/proto/duration_gogo.go
  84. 3 11
      vendor/github.com/gogo/protobuf/proto/encode.go
  85. 6 2
      vendor/github.com/gogo/protobuf/proto/equal.go
  86. 3 0
      vendor/github.com/gogo/protobuf/proto/extensions.go
  87. 1 1
      vendor/github.com/gogo/protobuf/proto/lib.go
  88. 34 6
      vendor/github.com/gogo/protobuf/proto/properties.go
  89. 45 0
      vendor/github.com/gogo/protobuf/proto/properties_gogo.go
  90. 145 32
      vendor/github.com/gogo/protobuf/proto/text.go
  91. 3 3
      vendor/github.com/gogo/protobuf/proto/text_gogo.go
  92. 175 20
      vendor/github.com/gogo/protobuf/proto/text_parser.go
  93. 113 0
      vendor/github.com/gogo/protobuf/proto/timestamp.go
  94. 227 0
      vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
  95. 140 0
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
  96. 38 4
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
  97. 3 5
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto
  98. 13 8
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto
  99. 1 1
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
  100. 96 0
      vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto

+ 2 - 2
daemon/cluster/cluster.go

@@ -71,7 +71,7 @@ import (
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/docker/swarmkit/manager/encryption"
 	swarmnode "github.com/docker/swarmkit/node"
 	swarmnode "github.com/docker/swarmkit/node"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
@@ -1138,7 +1138,7 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend
 			data := []byte{}
 			data := []byte{}
 
 
 			if config.Timestamps {
 			if config.Timestamps {
-				ts, err := ptypes.Timestamp(msg.Timestamp)
+				ts, err := gogotypes.TimestampFromProto(msg.Timestamp)
 				if err != nil {
 				if err != nil {
 					return err
 					return err
 				}
 				}

+ 7 - 7
daemon/cluster/convert/container.go

@@ -9,7 +9,7 @@ import (
 	mounttypes "github.com/docker/docker/api/types/mount"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
 func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
@@ -75,7 +75,7 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
 	}
 	}
 
 
 	if c.StopGracePeriod != nil {
 	if c.StopGracePeriod != nil {
-		grace, _ := ptypes.Duration(c.StopGracePeriod)
+		grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod)
 		containerSpec.StopGracePeriod = &grace
 		containerSpec.StopGracePeriod = &grace
 	}
 	}
 
 
@@ -159,7 +159,7 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
 	}
 	}
 
 
 	if c.StopGracePeriod != nil {
 	if c.StopGracePeriod != nil {
-		containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod)
+		containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod)
 	}
 	}
 
 
 	// Mounts
 	// Mounts
@@ -215,8 +215,8 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
 }
 }
 
 
 func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig {
 func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig {
-	interval, _ := ptypes.Duration(h.Interval)
-	timeout, _ := ptypes.Duration(h.Timeout)
+	interval, _ := gogotypes.DurationFromProto(h.Interval)
+	timeout, _ := gogotypes.DurationFromProto(h.Timeout)
 	return &container.HealthConfig{
 	return &container.HealthConfig{
 		Test:     h.Test,
 		Test:     h.Test,
 		Interval: interval,
 		Interval: interval,
@@ -228,8 +228,8 @@ func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig {
 func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig {
 func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig {
 	return &swarmapi.HealthConfig{
 	return &swarmapi.HealthConfig{
 		Test:     h.Test,
 		Test:     h.Test,
-		Interval: ptypes.DurationProto(h.Interval),
-		Timeout:  ptypes.DurationProto(h.Timeout),
+		Interval: gogotypes.DurationProto(h.Interval),
+		Timeout:  gogotypes.DurationProto(h.Timeout),
 		Retries:  int32(h.Retries),
 		Retries:  int32(h.Retries),
 	}
 	}
 }
 }

+ 3 - 3
daemon/cluster/convert/network.go

@@ -7,7 +7,7 @@ import (
 	networktypes "github.com/docker/docker/api/types/network"
 	networktypes "github.com/docker/docker/api/types/network"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
 func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
@@ -35,8 +35,8 @@ func networkFromGRPC(n *swarmapi.Network) types.Network {
 
 
 		// Meta
 		// Meta
 		network.Version.Index = n.Meta.Version.Index
 		network.Version.Index = n.Meta.Version.Index
-		network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
-		network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
+		network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
+		network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 
 
 		//Annotations
 		//Annotations
 		network.Spec.Name = n.Spec.Annotations.Name
 		network.Spec.Name = n.Spec.Annotations.Name

+ 3 - 3
daemon/cluster/convert/node.go

@@ -6,7 +6,7 @@ import (
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 // NodeFromGRPC converts a grpc Node to a Node.
 // NodeFromGRPC converts a grpc Node to a Node.
@@ -26,8 +26,8 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
 
 
 	// Meta
 	// Meta
 	node.Version.Index = n.Meta.Version.Index
 	node.Version.Index = n.Meta.Version.Index
-	node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
-	node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
+	node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
+	node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 
 
 	//Annotations
 	//Annotations
 	node.Spec.Name = n.Spec.Annotations.Name
 	node.Spec.Name = n.Spec.Annotations.Name

+ 3 - 3
daemon/cluster/convert/secret.go

@@ -3,7 +3,7 @@ package convert
 import (
 import (
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 // SecretFromGRPC converts a grpc Secret to a Secret.
 // SecretFromGRPC converts a grpc Secret to a Secret.
@@ -21,8 +21,8 @@ func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret {
 
 
 	secret.Version.Index = s.Meta.Version.Index
 	secret.Version.Index = s.Meta.Version.Index
 	// Meta
 	// Meta
-	secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt)
-	secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt)
+	secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt)
+	secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt)
 
 
 	return secret
 	return secret
 }
 }

+ 13 - 13
daemon/cluster/convert/service.go

@@ -7,7 +7,7 @@ import (
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/namesgenerator"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 // ServiceFromGRPC converts a grpc Service to a Service.
 // ServiceFromGRPC converts a grpc Service to a Service.
@@ -22,8 +22,8 @@ func ServiceFromGRPC(s swarmapi.Service) types.Service {
 
 
 	// Meta
 	// Meta
 	service.Version.Index = s.Meta.Version.Index
 	service.Version.Index = s.Meta.Version.Index
-	service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt)
-	service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt)
+	service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt)
+	service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt)
 
 
 	// UpdateStatus
 	// UpdateStatus
 	if s.UpdateStatus != nil {
 	if s.UpdateStatus != nil {
@@ -37,12 +37,12 @@ func ServiceFromGRPC(s swarmapi.Service) types.Service {
 			service.UpdateStatus.State = types.UpdateStateCompleted
 			service.UpdateStatus.State = types.UpdateStateCompleted
 		}
 		}
 
 
-		startedAt, _ := ptypes.Timestamp(s.UpdateStatus.StartedAt)
+		startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt)
 		if !startedAt.IsZero() {
 		if !startedAt.IsZero() {
 			service.UpdateStatus.StartedAt = &startedAt
 			service.UpdateStatus.StartedAt = &startedAt
 		}
 		}
 
 
-		completedAt, _ := ptypes.Timestamp(s.UpdateStatus.CompletedAt)
+		completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt)
 		if !completedAt.IsZero() {
 		if !completedAt.IsZero() {
 			service.UpdateStatus.CompletedAt = &completedAt
 			service.UpdateStatus.CompletedAt = &completedAt
 		}
 		}
@@ -96,9 +96,9 @@ func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec {
 			MaxFailureRatio: spec.Update.MaxFailureRatio,
 			MaxFailureRatio: spec.Update.MaxFailureRatio,
 		}
 		}
 
 
-		convertedSpec.UpdateConfig.Delay, _ = ptypes.Duration(&spec.Update.Delay)
+		convertedSpec.UpdateConfig.Delay = spec.Update.Delay
 		if spec.Update.Monitor != nil {
 		if spec.Update.Monitor != nil {
-			convertedSpec.UpdateConfig.Monitor, _ = ptypes.Duration(spec.Update.Monitor)
+			convertedSpec.UpdateConfig.Monitor, _ = gogotypes.DurationFromProto(spec.Update.Monitor)
 		}
 		}
 
 
 		switch spec.Update.FailureAction {
 		switch spec.Update.FailureAction {
@@ -183,12 +183,12 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
 		}
 		}
 		spec.Update = &swarmapi.UpdateConfig{
 		spec.Update = &swarmapi.UpdateConfig{
 			Parallelism:     s.UpdateConfig.Parallelism,
 			Parallelism:     s.UpdateConfig.Parallelism,
-			Delay:           *ptypes.DurationProto(s.UpdateConfig.Delay),
+			Delay:           s.UpdateConfig.Delay,
 			FailureAction:   failureAction,
 			FailureAction:   failureAction,
 			MaxFailureRatio: s.UpdateConfig.MaxFailureRatio,
 			MaxFailureRatio: s.UpdateConfig.MaxFailureRatio,
 		}
 		}
 		if s.UpdateConfig.Monitor != 0 {
 		if s.UpdateConfig.Monitor != 0 {
-			spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor)
+			spec.Update.Monitor = gogotypes.DurationProto(s.UpdateConfig.Monitor)
 		}
 		}
 	}
 	}
 
 
@@ -295,11 +295,11 @@ func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy {
 		}
 		}
 
 
 		if p.Delay != nil {
 		if p.Delay != nil {
-			delay, _ := ptypes.Duration(p.Delay)
+			delay, _ := gogotypes.DurationFromProto(p.Delay)
 			rp.Delay = &delay
 			rp.Delay = &delay
 		}
 		}
 		if p.Window != nil {
 		if p.Window != nil {
-			window, _ := ptypes.Duration(p.Window)
+			window, _ := gogotypes.DurationFromProto(p.Window)
 			rp.Window = &window
 			rp.Window = &window
 		}
 		}
 
 
@@ -328,10 +328,10 @@ func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error
 		}
 		}
 
 
 		if p.Delay != nil {
 		if p.Delay != nil {
-			rp.Delay = ptypes.DurationProto(*p.Delay)
+			rp.Delay = gogotypes.DurationProto(*p.Delay)
 		}
 		}
 		if p.Window != nil {
 		if p.Window != nil {
-			rp.Window = ptypes.DurationProto(*p.Window)
+			rp.Window = gogotypes.DurationProto(*p.Window)
 		}
 		}
 		if p.MaxAttempts != nil {
 		if p.MaxAttempts != nil {
 			rp.MaxAttempts = *p.MaxAttempts
 			rp.MaxAttempts = *p.MaxAttempts

+ 7 - 7
daemon/cluster/convert/swarm.go

@@ -7,7 +7,7 @@ import (
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 // SwarmFromGRPC converts a grpc Cluster to a Swarm.
 // SwarmFromGRPC converts a grpc Cluster to a Swarm.
@@ -37,10 +37,10 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
 		},
 		},
 	}
 	}
 
 
-	heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod)
+	heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod)
 	swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod
 	swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod
 
 
-	swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry)
+	swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry)
 
 
 	for _, ca := range c.Spec.CAConfig.ExternalCAs {
 	for _, ca := range c.Spec.CAConfig.ExternalCAs {
 		swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{
 		swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{
@@ -52,8 +52,8 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
 
 
 	// Meta
 	// Meta
 	swarm.Version.Index = c.Meta.Version.Index
 	swarm.Version.Index = c.Meta.Version.Index
-	swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt)
-	swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt)
+	swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt)
+	swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt)
 
 
 	// Annotations
 	// Annotations
 	swarm.Spec.Name = c.Spec.Annotations.Name
 	swarm.Spec.Name = c.Spec.Annotations.Name
@@ -98,10 +98,10 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu
 		spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick)
 		spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick)
 	}
 	}
 	if s.Dispatcher.HeartbeatPeriod != 0 {
 	if s.Dispatcher.HeartbeatPeriod != 0 {
-		spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod))
+		spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod))
 	}
 	}
 	if s.CAConfig.NodeCertExpiry != 0 {
 	if s.CAConfig.NodeCertExpiry != 0 {
-		spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry)
+		spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry)
 	}
 	}
 
 
 	for _, ca := range s.CAConfig.ExternalCAs {
 	for _, ca := range s.CAConfig.ExternalCAs {

+ 4 - 4
daemon/cluster/convert/task.go

@@ -5,7 +5,7 @@ import (
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	swarmapi "github.com/docker/swarmkit/api"
 	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 // TaskFromGRPC converts a grpc Task to a Task.
 // TaskFromGRPC converts a grpc Task to a Task.
@@ -47,10 +47,10 @@ func TaskFromGRPC(t swarmapi.Task) types.Task {
 
 
 	// Meta
 	// Meta
 	task.Version.Index = t.Meta.Version.Index
 	task.Version.Index = t.Meta.Version.Index
-	task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt)
-	task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt)
+	task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt)
+	task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt)
 
 
-	task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp)
+	task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp)
 
 
 	if containerStatus != nil {
 	if containerStatus != nil {
 		task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID
 		task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID

+ 2 - 2
daemon/cluster/executor/container/adapter.go

@@ -22,7 +22,7 @@ import (
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"golang.org/x/time/rate"
 	"golang.org/x/time/rate"
@@ -391,7 +391,7 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription
 	}
 	}
 
 
 	if options.Since != nil {
 	if options.Since != nil {
-		since, err := ptypes.Timestamp(options.Since)
+		since, err := gogotypes.TimestampFromProto(options.Since)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}

+ 3 - 3
daemon/cluster/executor/container/container.go

@@ -22,8 +22,8 @@ import (
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/template"
 	"github.com/docker/swarmkit/template"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 const (
 const (
@@ -323,8 +323,8 @@ func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
 	if hcSpec == nil {
 	if hcSpec == nil {
 		return nil
 		return nil
 	}
 	}
-	interval, _ := ptypes.Duration(hcSpec.Interval)
-	timeout, _ := ptypes.Duration(hcSpec.Timeout)
+	interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
+	timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
 	return &enginecontainer.HealthConfig{
 	return &enginecontainer.HealthConfig{
 		Test:     hcSpec.Test,
 		Test:     hcSpec.Test,
 		Interval: interval,
 		Interval: interval,

+ 2 - 2
daemon/cluster/executor/container/controller.go

@@ -19,7 +19,7 @@ import (
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"golang.org/x/time/rate"
 	"golang.org/x/time/rate"
@@ -502,7 +502,7 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti
 			return errors.Wrap(err, "failed to parse timestamp")
 			return errors.Wrap(err, "failed to parse timestamp")
 		}
 		}
 
 
-		tsp, err := ptypes.TimestampProto(ts)
+		tsp, err := gogotypes.TimestampProto(ts)
 		if err != nil {
 		if err != nil {
 			return errors.Wrap(err, "failed to convert timestamp")
 			return errors.Wrap(err, "failed to convert timestamp")
 		}
 		}

+ 1 - 1
hack/dockerfile/binaries-commits

@@ -2,7 +2,7 @@
 
 
 TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 RUNC_COMMIT=51371867a01c467f08af739783b8beafc154c4d7
 RUNC_COMMIT=51371867a01c467f08af739783b8beafc154c4d7
-CONTAINERD_COMMIT=03e5862ec0d8d3b3f750e19fca3ee367e13c090e
+CONTAINERD_COMMIT=d7975b89804b207b68f8b446cf1e2af72589bfcf
 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
 LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e
 LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e
 VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0
 VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0

+ 9 - 9
vendor.conf

@@ -36,7 +36,7 @@ github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
-github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707
+github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20
 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
 github.com/hashicorp/consul v0.5.2
 github.com/hashicorp/consul v0.5.2
 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
@@ -54,7 +54,7 @@ github.com/pborman/uuid v1.0
 # get desired notary commit, might also need to be updated in Dockerfile
 # get desired notary commit, might also need to be updated in Dockerfile
 github.com/docker/notary v0.4.2
 github.com/docker/notary v0.4.2
 
 
-google.golang.org/grpc v1.0.2
+google.golang.org/grpc v1.0.4
 github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
 github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
 github.com/docker/go v1.5.1-1-1-gbaf439e
 github.com/docker/go v1.5.1-1-1-gbaf439e
 github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
 github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
@@ -66,7 +66,7 @@ github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
 github.com/coreos/go-systemd v4
 github.com/coreos/go-systemd v4
 github.com/godbus/dbus v4.0.0
 github.com/godbus/dbus v4.0.0
 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
+github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
 
 
 # gelf logging driver deps
 # gelf logging driver deps
 github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
 github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
@@ -92,25 +92,25 @@ golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
 google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
 google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
 cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
 cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
 github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
 github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
-google.golang.org/genproto 9359a8d303c45e3212571b77610f1cefb0c6f3eb
+google.golang.org/genproto b3e7c2fb04031add52c4817f53f43757ccbf9c18
 
 
 # native credentials
 # native credentials
 github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd
 github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd
 
 
 # containerd
 # containerd
-github.com/docker/containerd 03e5862ec0d8d3b3f750e19fca3ee367e13c090e
+github.com/docker/containerd d7975b89804b207b68f8b446cf1e2af72589bfcf
 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
 
 
 # cluster
 # cluster
-github.com/docker/swarmkit 98620dd1ddfcc03d8f4b0d2910ecf6b52918a731
+github.com/docker/swarmkit 037b4913929019d44bc927870bf2d92ce9ca261f
 github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
 github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
-github.com/gogo/protobuf v0.3
+github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
 github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
 github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
 golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
 golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
 github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47
 github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47
-github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87
+github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
 github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
 github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
 github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
 github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
 github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
 github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
@@ -121,7 +121,7 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
 github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
 github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
 github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
 github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
 bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
 bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
-github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
+github.com/matttproud/golang_protobuf_extensions v1.0.0
 github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
 github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
 
 
 # cli
 # cli

+ 102 - 41
vendor/github.com/coreos/etcd/client/client.go

@@ -22,7 +22,6 @@ import (
 	"net"
 	"net"
 	"net/http"
 	"net/http"
 	"net/url"
 	"net/url"
-	"reflect"
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
 	"sync"
 	"sync"
@@ -261,53 +260,67 @@ type httpClusterClient struct {
 	selectionMode EndpointSelectionMode
 	selectionMode EndpointSelectionMode
 }
 }
 
 
-func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
-	mAPI := NewMembersAPI(c)
-	leader, err := mAPI.Leader(context.Background())
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+	ceps := make([]url.URL, len(eps))
+	copy(ceps, eps)
+
+	// To perform a lookup on the new endpoint list without using the current
+	// client, we'll copy it
+	clientCopy := &httpClusterClient{
+		clientFactory: c.clientFactory,
+		credentials:   c.credentials,
+		rand:          c.rand,
+
+		pinned:    0,
+		endpoints: ceps,
+	}
+
+	mAPI := NewMembersAPI(clientCopy)
+	leader, err := mAPI.Leader(ctx)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
+	if len(leader.ClientURLs) == 0 {
+		return "", ErrNoLeaderEndpoint
+	}
 
 
 	return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
 	return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
 }
 }
 
 
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
 	if len(eps) == 0 {
 	if len(eps) == 0 {
-		return ErrNoEndpoints
+		return []url.URL{}, ErrNoEndpoints
 	}
 	}
 
 
 	neps := make([]url.URL, len(eps))
 	neps := make([]url.URL, len(eps))
 	for i, ep := range eps {
 	for i, ep := range eps {
 		u, err := url.Parse(ep)
 		u, err := url.Parse(ep)
 		if err != nil {
 		if err != nil {
-			return err
+			return []url.URL{}, err
 		}
 		}
 		neps[i] = *u
 		neps[i] = *u
 	}
 	}
+	return neps, nil
+}
 
 
-	switch c.selectionMode {
-	case EndpointSelectionRandom:
-		c.endpoints = shuffleEndpoints(c.rand, neps)
-		c.pinned = 0
-	case EndpointSelectionPrioritizeLeader:
-		c.endpoints = neps
-		lep, err := c.getLeaderEndpoint()
-		if err != nil {
-			return ErrNoLeaderEndpoint
-		}
-
-		for i := range c.endpoints {
-			if c.endpoints[i].String() == lep {
-				c.pinned = i
-				break
-			}
-		}
-		// If endpoints doesn't have the lu, just keep c.pinned = 0.
-		// Forwarding between follower and leader would be required but it works.
-	default:
-		return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+	neps, err := c.parseEndpoints(eps)
+	if err != nil {
+		return err
 	}
 	}
 
 
+	c.Lock()
+	defer c.Unlock()
+
+	c.endpoints = shuffleEndpoints(c.rand, neps)
+	// We're not doing anything for PrioritizeLeader here. This is
+	// due to not having a context meaning we can't call getLeaderEndpoint
+	// However, if you're using PrioritizeLeader, you've already been told
+	// to regularly call sync, where we do have a ctx, and can figure the
+	// leader. PrioritizeLeader is also quite a loose guarantee, so deal
+	// with it
+	c.pinned = 0
+
 	return nil
 	return nil
 }
 }
 
 
@@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
 		return err
 		return err
 	}
 	}
 
 
-	c.Lock()
-	defer c.Unlock()
-
 	var eps []string
 	var eps []string
 	for _, m := range ms {
 	for _, m := range ms {
 		eps = append(eps, m.ClientURLs...)
 		eps = append(eps, m.ClientURLs...)
 	}
 	}
-	sort.Sort(sort.StringSlice(eps))
 
 
-	ceps := make([]string, len(c.endpoints))
-	for i, cep := range c.endpoints {
-		ceps[i] = cep.String()
+	neps, err := c.parseEndpoints(eps)
+	if err != nil {
+		return err
 	}
 	}
-	sort.Sort(sort.StringSlice(ceps))
-	// fast path if no change happens
-	// this helps client to pin the endpoint when no cluster change
-	if reflect.DeepEqual(eps, ceps) {
-		return nil
+
+	npin := 0
+
+	switch c.selectionMode {
+	case EndpointSelectionRandom:
+		c.RLock()
+		eq := endpointsEqual(c.endpoints, neps)
+		c.RUnlock()
+
+		if eq {
+			return nil
+		}
+		// When items in the endpoint list changes, we choose a new pin
+		neps = shuffleEndpoints(c.rand, neps)
+	case EndpointSelectionPrioritizeLeader:
+		nle, err := c.getLeaderEndpoint(ctx, neps)
+		if err != nil {
+			return ErrNoLeaderEndpoint
+		}
+
+		for i, n := range neps {
+			if n.String() == nle {
+				npin = i
+				break
+			}
+		}
+	default:
+		return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
 	}
 	}
 
 
-	return c.SetEndpoints(eps)
+	c.Lock()
+	defer c.Unlock()
+	c.endpoints = neps
+	c.pinned = npin
+
+	return nil
 }
 }
 
 
 func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
 func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
@@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
 	}
 	}
 	return neps
 	return neps
 }
 }
+
+func endpointsEqual(left, right []url.URL) bool {
+	if len(left) != len(right) {
+		return false
+	}
+
+	sLeft := make([]string, len(left))
+	sRight := make([]string, len(right))
+	for i, l := range left {
+		sLeft[i] = l.String()
+	}
+	for i, r := range right {
+		sRight[i] = r.String()
+	}
+
+	sort.Strings(sLeft)
+	sort.Strings(sRight)
+	for i := range sLeft {
+		if sLeft[i] != sRight[i] {
+			return false
+		}
+	}
+	return true
+}

File diff suppressed because it is too large
+ 373 - 286
vendor/github.com/coreos/etcd/client/keys.generated.go


+ 23 - 9
vendor/github.com/coreos/etcd/client/keys.go

@@ -191,6 +191,10 @@ type SetOptions struct {
 
 
 	// Dir specifies whether or not this Node should be created as a directory.
 	// Dir specifies whether or not this Node should be created as a directory.
 	Dir bool
 	Dir bool
+
+	// NoValueOnSuccess specifies whether the response contains the current value of the Node.
+	// If set, the response will only contain the current value when the request fails.
+	NoValueOnSuccess bool
 }
 }
 
 
 type GetOptions struct {
 type GetOptions struct {
@@ -268,6 +272,10 @@ type Response struct {
 	// Index holds the cluster-level index at the time the Response was generated.
 	// Index holds the cluster-level index at the time the Response was generated.
 	// This index is not tied to the Node(s) contained in this Response.
 	// This index is not tied to the Node(s) contained in this Response.
 	Index uint64 `json:"-"`
 	Index uint64 `json:"-"`
+
+	// ClusterID holds the cluster-level ID reported by the server.  This
+	// should be different for different etcd clusters.
+	ClusterID string `json:"-"`
 }
 }
 
 
 type Node struct {
 type Node struct {
@@ -335,6 +343,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
 		act.TTL = opts.TTL
 		act.TTL = opts.TTL
 		act.Refresh = opts.Refresh
 		act.Refresh = opts.Refresh
 		act.Dir = opts.Dir
 		act.Dir = opts.Dir
+		act.NoValueOnSuccess = opts.NoValueOnSuccess
 	}
 	}
 
 
 	doCtx := ctx
 	doCtx := ctx
@@ -523,15 +532,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
 }
 }
 
 
 type setAction struct {
 type setAction struct {
-	Prefix    string
-	Key       string
-	Value     string
-	PrevValue string
-	PrevIndex uint64
-	PrevExist PrevExistType
-	TTL       time.Duration
-	Refresh   bool
-	Dir       bool
+	Prefix           string
+	Key              string
+	Value            string
+	PrevValue        string
+	PrevIndex        uint64
+	PrevExist        PrevExistType
+	TTL              time.Duration
+	Refresh          bool
+	Dir              bool
+	NoValueOnSuccess bool
 }
 }
 
 
 func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
 func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
@@ -565,6 +575,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
 	if a.Refresh {
 	if a.Refresh {
 		form.Add("refresh", "true")
 		form.Add("refresh", "true")
 	}
 	}
+	if a.NoValueOnSuccess {
+		params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
+	}
 
 
 	u.RawQuery = params.Encode()
 	u.RawQuery = params.Encode()
 	body := strings.NewReader(form.Encode())
 	body := strings.NewReader(form.Encode())
@@ -656,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
 			return nil, err
 			return nil, err
 		}
 		}
 	}
 	}
+	res.ClusterID = header.Get("X-Etcd-Cluster-ID")
 	return &res, nil
 	return &res, nil
 }
 }
 
 

+ 6 - 14
vendor/google.golang.org/genproto/googleapis/api/serviceconfig/annotations.proto → vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2015, Google Inc.
+// Copyright 2016 The etcd Authors
 //
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // you may not use this file except in compliance with the License.
@@ -12,19 +12,11 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-syntax = "proto3";
+// +build !windows
 
 
-package google.api;
+package fileutil
 
 
-import "google.golang.org/genproto/googleapis/api/serviceconfig/http.proto"; // from google/api/http.proto
-import "google.golang.org/genproto/protobuf/descriptor.proto"; // from google/protobuf/descriptor.proto
+import "os"
 
 
-option java_multiple_files = true;
-option java_outer_classname = "AnnotationsProto";
-option java_package = "com.google.api";
-option objc_class_prefix = "GAPI";
-
-extend google.protobuf.MethodOptions {
-  // See `HttpRule`.
-  HttpRule http = 72295728;
-}
+// OpenDir opens a directory for syncing.
+func OpenDir(path string) (*os.File, error) { return os.Open(path) }

+ 46 - 0
vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go

@@ -0,0 +1,46 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// OpenDir opens a directory in windows with write access for syncing.
+func OpenDir(path string) (*os.File, error) {
+	fd, err := openDir(path)
+	if err != nil {
+		return nil, err
+	}
+	return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDir(path string) (fd syscall.Handle, err error) {
+	if len(path) == 0 {
+		return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return syscall.InvalidHandle, err
+	}
+	access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+	sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+	createmode := uint32(syscall.OPEN_EXISTING)
+	fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+	return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}

+ 1 - 1
vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go

@@ -33,7 +33,7 @@ const (
 )
 )
 
 
 var (
 var (
-	plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
 )
 )
 
 
 // IsDirWriteable checks if dir is writable by writing and removing a file
 // IsDirWriteable checks if dir is writable by writing and removing a file

+ 106 - 0
vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go

@@ -0,0 +1,106 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+	"io"
+)
+
+var defaultBufferBytes = 128 * 1024
+
+// PageWriter implements the io.Writer interface so that writes will
+// either be in page chunks or from flushing.
+type PageWriter struct {
+	w io.Writer
+	// pageOffset tracks the page offset of the base of the buffer
+	pageOffset int
+	// pageBytes is the number of bytes per page
+	pageBytes int
+	// bufferedBytes counts the number of bytes pending for write in the buffer
+	bufferedBytes int
+	// buf holds the write buffer
+	buf []byte
+	// bufWatermarkBytes is the number of bytes the buffer can hold before it needs
+	// to be flushed. It is less than len(buf) so there is space for slack writes
+	// to bring the writer to page alignment.
+	bufWatermarkBytes int
+}
+
+// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
+// to write per page. pageOffset is the starting offset of io.Writer.
+func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
+	return &PageWriter{
+		w:                 w,
+		pageOffset:        pageOffset,
+		pageBytes:         pageBytes,
+		buf:               make([]byte, defaultBufferBytes+pageBytes),
+		bufWatermarkBytes: defaultBufferBytes,
+	}
+}
+
+func (pw *PageWriter) Write(p []byte) (n int, err error) {
+	if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
+		// no overflow
+		copy(pw.buf[pw.bufferedBytes:], p)
+		pw.bufferedBytes += len(p)
+		return len(p), nil
+	}
+	// complete the slack page in the buffer if unaligned
+	slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
+	if slack != pw.pageBytes {
+		partial := slack > len(p)
+		if partial {
+			// not enough data to complete the slack page
+			slack = len(p)
+		}
+		// special case: writing to slack page in buffer
+		copy(pw.buf[pw.bufferedBytes:], p[:slack])
+		pw.bufferedBytes += slack
+		n = slack
+		p = p[slack:]
+		if partial {
+			// avoid forcing an unaligned flush
+			return n, nil
+		}
+	}
+	// buffer contents are now page-aligned; clear out
+	if err = pw.Flush(); err != nil {
+		return n, err
+	}
+	// directly write all complete pages without copying
+	if len(p) > pw.pageBytes {
+		pages := len(p) / pw.pageBytes
+		c, werr := pw.w.Write(p[:pages*pw.pageBytes])
+		n += c
+		if werr != nil {
+			return n, werr
+		}
+		p = p[pages*pw.pageBytes:]
+	}
+	// write remaining tail to buffer
+	c, werr := pw.Write(p)
+	n += c
+	return n, werr
+}
+
+func (pw *PageWriter) Flush() error {
+	if pw.bufferedBytes == 0 {
+		return nil
+	}
+	_, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
+	pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
+	pw.bufferedBytes = 0
+	return err
+}

+ 1 - 1
vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go

@@ -18,7 +18,7 @@ package pbutil
 import "github.com/coreos/pkg/capnslog"
 import "github.com/coreos/pkg/capnslog"
 
 
 var (
 var (
-	plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil")
 )
 )
 
 
 type Marshaler interface {
 type Marshaler interface {

+ 6 - 0
vendor/github.com/coreos/etcd/raft/doc.go

@@ -257,6 +257,12 @@ stale log entries:
 	If candidate receives majority of votes of denials, it reverts back to
 	If candidate receives majority of votes of denials, it reverts back to
 	follower.
 	follower.
 
 
+	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+	protocol. When Config.PreVote is true, a pre-election is carried out first
+	(using the same rules as a regular election), and no node increases its term
+	number unless the pre-election indicates that the campaigining node would win.
+	This minimizes disruption when a partitioned node rejoins the cluster.
+
 	'MsgSnap' requests to install a snapshot message. When a node has just
 	'MsgSnap' requests to install a snapshot message. When a node has just
 	become a leader or the leader receives 'MsgProp' message, it calls
 	become a leader or the leader receives 'MsgProp' message, it calls
 	'bcastAppend' method, which then calls 'sendAppend' method to each
 	'bcastAppend' method, which then calls 'sendAppend' method to each

+ 12 - 18
vendor/github.com/coreos/etcd/raft/node.go

@@ -60,11 +60,11 @@ type Ready struct {
 	// HardState will be equal to empty state if there is no update.
 	// HardState will be equal to empty state if there is no update.
 	pb.HardState
 	pb.HardState
 
 
-	// ReadState can be used for node to serve linearizable read requests locally
+	// ReadStates can be used for node to serve linearizable read requests locally
 	// when its applied index is greater than the index in ReadState.
 	// when its applied index is greater than the index in ReadState.
 	// Note that the readState will be returned when raft receives msgReadIndex.
 	// Note that the readState will be returned when raft receives msgReadIndex.
 	// The returned is only valid for the request that requested to read.
 	// The returned is only valid for the request that requested to read.
-	ReadState
+	ReadStates []ReadState
 
 
 	// Entries specifies entries to be saved to stable storage BEFORE
 	// Entries specifies entries to be saved to stable storage BEFORE
 	// Messages are sent.
 	// Messages are sent.
@@ -102,7 +102,7 @@ func IsEmptySnap(sp pb.Snapshot) bool {
 func (rd Ready) containsUpdates() bool {
 func (rd Ready) containsUpdates() bool {
 	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
 	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
 		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
 		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
-		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || rd.Index != None
+		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
 }
 }
 
 
 // Node represents a node in a raft cluster.
 // Node represents a node in a raft cluster.
@@ -151,11 +151,6 @@ type Node interface {
 	// Read state has a read index. Once the application advances further than the read
 	// Read state has a read index. Once the application advances further than the read
 	// index, any linearizable read requests issued before the read request can be
 	// index, any linearizable read requests issued before the read request can be
 	// processed safely. The read state will have the same rctx attached.
 	// processed safely. The read state will have the same rctx attached.
-	//
-	// Note: the current implementation depends on the leader lease. If the clock drift is unbounded,
-	// leader might keep the lease longer than it should (clock can move backward/pause without any bound).
-	// ReadIndex is not safe in that case.
-	// TODO: add clock drift bound into raft configuration.
 	ReadIndex(ctx context.Context, rctx []byte) error
 	ReadIndex(ctx context.Context, rctx []byte) error
 
 
 	// Status returns the current status of the raft state machine.
 	// Status returns the current status of the raft state machine.
@@ -370,8 +365,7 @@ func (n *node) run(r *raft) {
 			}
 			}
 
 
 			r.msgs = nil
 			r.msgs = nil
-			r.readState.Index = None
-			r.readState.RequestCtx = nil
+			r.readStates = nil
 			advancec = n.advancec
 			advancec = n.advancec
 		case <-advancec:
 		case <-advancec:
 			if prevHardSt.Commit != 0 {
 			if prevHardSt.Commit != 0 {
@@ -468,8 +462,12 @@ func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
 
 
 func (n *node) Status() Status {
 func (n *node) Status() Status {
 	c := make(chan Status)
 	c := make(chan Status)
-	n.status <- c
-	return <-c
+	select {
+	case n.status <- c:
+		return <-c
+	case <-n.done:
+		return Status{}
+	}
 }
 }
 
 
 func (n *node) ReportUnreachable(id uint64) {
 func (n *node) ReportUnreachable(id uint64) {
@@ -516,12 +514,8 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
 	if r.raftLog.unstable.snapshot != nil {
 	if r.raftLog.unstable.snapshot != nil {
 		rd.Snapshot = *r.raftLog.unstable.snapshot
 		rd.Snapshot = *r.raftLog.unstable.snapshot
 	}
 	}
-	if r.readState.Index != None {
-		c := make([]byte, len(r.readState.RequestCtx))
-		copy(c, r.readState.RequestCtx)
-
-		rd.Index = r.readState.Index
-		rd.RequestCtx = c
+	if len(r.readStates) != 0 {
+		rd.ReadStates = r.readStates
 	}
 	}
 	return rd
 	return rd
 }
 }

+ 46 - 11
vendor/github.com/coreos/etcd/raft/progress.go

@@ -64,12 +64,17 @@ type Progress struct {
 	RecentActive bool
 	RecentActive bool
 
 
 	// inflights is a sliding window for the inflight messages.
 	// inflights is a sliding window for the inflight messages.
+	// Each inflight message contains one or more log entries.
+	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
+	// Thus inflight effectively limits both the number of inflight messages
+	// and the bandwidth each Progress can use.
 	// When inflights is full, no more message should be sent.
 	// When inflights is full, no more message should be sent.
 	// When a leader sends out a message, the index of the last
 	// When a leader sends out a message, the index of the last
 	// entry should be added to inflights. The index MUST be added
 	// entry should be added to inflights. The index MUST be added
 	// into inflights in order.
 	// into inflights in order.
 	// When a leader receives a reply, the previous inflights should
 	// When a leader receives a reply, the previous inflights should
-	// be freed by calling inflights.freeTo.
+	// be freed by calling inflights.freeTo with the index of the last
+	// received entry.
 	ins *inflights
 	ins *inflights
 }
 }
 
 
@@ -150,8 +155,11 @@ func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
 func (pr *Progress) pause()  { pr.Paused = true }
 func (pr *Progress) pause()  { pr.Paused = true }
 func (pr *Progress) resume() { pr.Paused = false }
 func (pr *Progress) resume() { pr.Paused = false }
 
 
-// isPaused returns whether progress stops sending message.
-func (pr *Progress) isPaused() bool {
+// IsPaused returns whether sending log entries to this node has been
+// paused. A node may be paused because it has rejected recent
+// MsgApps, is currently waiting for a snapshot, or has reached the
+// MaxInflightMsgs limit.
+func (pr *Progress) IsPaused() bool {
 	switch pr.State {
 	switch pr.State {
 	case ProgressStateProbe:
 	case ProgressStateProbe:
 		return pr.Paused
 		return pr.Paused
@@ -173,7 +181,7 @@ func (pr *Progress) needSnapshotAbort() bool {
 }
 }
 
 
 func (pr *Progress) String() string {
 func (pr *Progress) String() string {
-	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.isPaused(), pr.PendingSnapshot)
+	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
 }
 }
 
 
 type inflights struct {
 type inflights struct {
@@ -183,14 +191,16 @@ type inflights struct {
 	count int
 	count int
 
 
 	// the size of the buffer
 	// the size of the buffer
-	size   int
+	size int
+
+	// buffer contains the index of the last entry
+	// inside one message.
 	buffer []uint64
 	buffer []uint64
 }
 }
 
 
 func newInflights(size int) *inflights {
 func newInflights(size int) *inflights {
 	return &inflights{
 	return &inflights{
-		size:   size,
-		buffer: make([]uint64, size),
+		size: size,
 	}
 	}
 }
 }
 
 
@@ -200,13 +210,32 @@ func (in *inflights) add(inflight uint64) {
 		panic("cannot add into a full inflights")
 		panic("cannot add into a full inflights")
 	}
 	}
 	next := in.start + in.count
 	next := in.start + in.count
-	if next >= in.size {
-		next -= in.size
+	size := in.size
+	if next >= size {
+		next -= size
+	}
+	if next >= len(in.buffer) {
+		in.growBuf()
 	}
 	}
 	in.buffer[next] = inflight
 	in.buffer[next] = inflight
 	in.count++
 	in.count++
 }
 }
 
 
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *inflights) growBuf() {
+	newSize := len(in.buffer) * 2
+	if newSize == 0 {
+		newSize = 1
+	} else if newSize > in.size {
+		newSize = in.size
+	}
+	newBuffer := make([]uint64, newSize)
+	copy(newBuffer, in.buffer)
+	in.buffer = newBuffer
+}
+
 // freeTo frees the inflights smaller or equal to the given `to` flight.
 // freeTo frees the inflights smaller or equal to the given `to` flight.
 func (in *inflights) freeTo(to uint64) {
 func (in *inflights) freeTo(to uint64) {
 	if in.count == 0 || to < in.buffer[in.start] {
 	if in.count == 0 || to < in.buffer[in.start] {
@@ -221,13 +250,19 @@ func (in *inflights) freeTo(to uint64) {
 		}
 		}
 
 
 		// increase index and maybe rotate
 		// increase index and maybe rotate
-		if idx++; idx >= in.size {
-			idx -= in.size
+		size := in.size
+		if idx++; idx >= size {
+			idx -= size
 		}
 		}
 	}
 	}
 	// free i inflights and set new start index
 	// free i inflights and set new start index
 	in.count -= i
 	in.count -= i
 	in.start = idx
 	in.start = idx
+	if in.count == 0 {
+		// inflights is empty, reset the start index so that we don't grow the
+		// buffer unnecessarily.
+		in.start = 0
+	}
 }
 }
 
 
 func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
 func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }

+ 294 - 115
vendor/github.com/coreos/etcd/raft/raft.go

@@ -22,6 +22,8 @@ import (
 	"math/rand"
 	"math/rand"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
+	"sync"
+	"time"
 
 
 	pb "github.com/coreos/etcd/raft/raftpb"
 	pb "github.com/coreos/etcd/raft/raftpb"
 )
 )
@@ -35,16 +37,55 @@ const (
 	StateFollower StateType = iota
 	StateFollower StateType = iota
 	StateCandidate
 	StateCandidate
 	StateLeader
 	StateLeader
+	StatePreCandidate
+	numStates
+)
+
+type ReadOnlyOption int
+
+const (
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	ReadOnlySafe ReadOnlyOption = iota
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyLeaseBased
 )
 )
 
 
 // Possible values for CampaignType
 // Possible values for CampaignType
 const (
 const (
-	// campaignElection represents the type of normal election
+	// campaignPreElection represents the first phase of a normal election when
+	// Config.PreVote is true.
+	campaignPreElection CampaignType = "CampaignPreElection"
+	// campaignElection represents a normal (time-based) election (the second phase
+	// of the election when Config.PreVote is true).
 	campaignElection CampaignType = "CampaignElection"
 	campaignElection CampaignType = "CampaignElection"
 	// campaignTransfer represents the type of leader transfer
 	// campaignTransfer represents the type of leader transfer
 	campaignTransfer CampaignType = "CampaignTransfer"
 	campaignTransfer CampaignType = "CampaignTransfer"
 )
 )
 
 
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization. Only the methods needed by the code are exposed
+// (e.g. Intn).
+type lockedRand struct {
+	mu   sync.Mutex
+	rand *rand.Rand
+}
+
+func (r *lockedRand) Intn(n int) int {
+	r.mu.Lock()
+	v := r.rand.Intn(n)
+	r.mu.Unlock()
+	return v
+}
+
+var globalRand = &lockedRand{
+	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
 // CampaignType represents the type of campaigning
 // CampaignType represents the type of campaigning
 // the reason we use the type of string instead of uint64
 // the reason we use the type of string instead of uint64
 // is because it's simpler to compare and fill in raft entries
 // is because it's simpler to compare and fill in raft entries
@@ -57,6 +98,7 @@ var stmap = [...]string{
 	"StateFollower",
 	"StateFollower",
 	"StateCandidate",
 	"StateCandidate",
 	"StateLeader",
 	"StateLeader",
+	"StatePreCandidate",
 }
 }
 
 
 func (st StateType) String() string {
 func (st StateType) String() string {
@@ -114,6 +156,23 @@ type Config struct {
 	// steps down when quorum is not active for an electionTimeout.
 	// steps down when quorum is not active for an electionTimeout.
 	CheckQuorum bool
 	CheckQuorum bool
 
 
+	// PreVote enables the Pre-Vote algorithm described in raft thesis section
+	// 9.6. This prevents disruption when a node that has been partitioned away
+	// rejoins the cluster.
+	PreVote bool
+
+	// ReadOnlyOption specifies how the read only request is processed.
+	//
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	//
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyOption ReadOnlyOption
+
 	// Logger is the logger used for raft log. For multinode which can host
 	// Logger is the logger used for raft log. For multinode which can host
 	// multiple raft group, each raft group can have its own logger
 	// multiple raft group, each raft group can have its own logger
 	Logger Logger
 	Logger Logger
@@ -147,23 +206,13 @@ func (c *Config) validate() error {
 	return nil
 	return nil
 }
 }
 
 
-// ReadState provides state for read only query.
-// It's caller's responsibility to send MsgReadIndex first before getting
-// this state from ready, It's also caller's duty to differentiate if this
-// state is what it requests through RequestCtx, eg. given a unique id as
-// RequestCtx
-type ReadState struct {
-	Index      uint64
-	RequestCtx []byte
-}
-
 type raft struct {
 type raft struct {
 	id uint64
 	id uint64
 
 
 	Term uint64
 	Term uint64
 	Vote uint64
 	Vote uint64
 
 
-	readState ReadState
+	readStates []ReadState
 
 
 	// the log
 	// the log
 	raftLog *raftLog
 	raftLog *raftLog
@@ -186,6 +235,8 @@ type raft struct {
 	// New configuration is ignored if there exists unapplied configuration.
 	// New configuration is ignored if there exists unapplied configuration.
 	pendingConf bool
 	pendingConf bool
 
 
+	readOnly *readOnly
+
 	// number of ticks since it reached last electionTimeout when it is leader
 	// number of ticks since it reached last electionTimeout when it is leader
 	// or candidate.
 	// or candidate.
 	// number of ticks since it reached last electionTimeout or received a
 	// number of ticks since it reached last electionTimeout or received a
@@ -197,6 +248,7 @@ type raft struct {
 	heartbeatElapsed int
 	heartbeatElapsed int
 
 
 	checkQuorum bool
 	checkQuorum bool
+	preVote     bool
 
 
 	heartbeatTimeout int
 	heartbeatTimeout int
 	electionTimeout  int
 	electionTimeout  int
@@ -205,7 +257,6 @@ type raft struct {
 	// when raft changes its state to follower or candidate.
 	// when raft changes its state to follower or candidate.
 	randomizedElectionTimeout int
 	randomizedElectionTimeout int
 
 
-	rand *rand.Rand
 	tick func()
 	tick func()
 	step stepFunc
 	step stepFunc
 
 
@@ -234,7 +285,6 @@ func newRaft(c *Config) *raft {
 	r := &raft{
 	r := &raft{
 		id:               c.ID,
 		id:               c.ID,
 		lead:             None,
 		lead:             None,
-		readState:        ReadState{Index: None, RequestCtx: nil},
 		raftLog:          raftlog,
 		raftLog:          raftlog,
 		maxMsgSize:       c.MaxSizePerMsg,
 		maxMsgSize:       c.MaxSizePerMsg,
 		maxInflight:      c.MaxInflightMsgs,
 		maxInflight:      c.MaxInflightMsgs,
@@ -243,8 +293,9 @@ func newRaft(c *Config) *raft {
 		heartbeatTimeout: c.HeartbeatTick,
 		heartbeatTimeout: c.HeartbeatTick,
 		logger:           c.Logger,
 		logger:           c.Logger,
 		checkQuorum:      c.CheckQuorum,
 		checkQuorum:      c.CheckQuorum,
+		preVote:          c.PreVote,
+		readOnly:         newReadOnly(c.ReadOnlyOption),
 	}
 	}
-	r.rand = rand.New(rand.NewSource(int64(c.ID)))
 	for _, p := range peers {
 	for _, p := range peers {
 		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
 		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
 	}
 	}
@@ -292,11 +343,23 @@ func (r *raft) nodes() []uint64 {
 // send persists state to stable storage and then sends to its mailbox.
 // send persists state to stable storage and then sends to its mailbox.
 func (r *raft) send(m pb.Message) {
 func (r *raft) send(m pb.Message) {
 	m.From = r.id
 	m.From = r.id
-	// do not attach term to MsgProp
-	// proposals are a way to forward to the leader and
-	// should be treated as local message.
-	if m.Type != pb.MsgProp {
-		m.Term = r.Term
+	if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+		if m.Term == 0 {
+			// PreVote RPCs are sent at a term other than our actual term, so the code
+			// that sends these messages is responsible for setting the term.
+			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
+		}
+	} else {
+		if m.Term != 0 {
+			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
+		}
+		// do not attach term to MsgProp, MsgReadIndex
+		// proposals are a way to forward to the leader and
+		// should be treated as local message.
+		// MsgReadIndex is also forwarded to leader.
+		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+			m.Term = r.Term
+		}
 	}
 	}
 	r.msgs = append(r.msgs, m)
 	r.msgs = append(r.msgs, m)
 }
 }
@@ -304,7 +367,7 @@ func (r *raft) send(m pb.Message) {
 // sendAppend sends RPC, with entries to the given peer.
 // sendAppend sends RPC, with entries to the given peer.
 func (r *raft) sendAppend(to uint64) {
 func (r *raft) sendAppend(to uint64) {
 	pr := r.prs[to]
 	pr := r.prs[to]
-	if pr.isPaused() {
+	if pr.IsPaused() {
 		return
 		return
 	}
 	}
 	m := pb.Message{}
 	m := pb.Message{}
@@ -361,7 +424,7 @@ func (r *raft) sendAppend(to uint64) {
 }
 }
 
 
 // sendHeartbeat sends an empty MsgApp
 // sendHeartbeat sends an empty MsgApp
-func (r *raft) sendHeartbeat(to uint64) {
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
 	// Attach the commit as min(to.matched, r.committed).
 	// Attach the commit as min(to.matched, r.committed).
 	// When the leader sends out heartbeat message,
 	// When the leader sends out heartbeat message,
 	// the receiver(follower) might not be matched with the leader
 	// the receiver(follower) might not be matched with the leader
@@ -370,10 +433,12 @@ func (r *raft) sendHeartbeat(to uint64) {
 	// an unmatched index.
 	// an unmatched index.
 	commit := min(r.prs[to].Match, r.raftLog.committed)
 	commit := min(r.prs[to].Match, r.raftLog.committed)
 	m := pb.Message{
 	m := pb.Message{
-		To:     to,
-		Type:   pb.MsgHeartbeat,
-		Commit: commit,
+		To:      to,
+		Type:    pb.MsgHeartbeat,
+		Commit:  commit,
+		Context: ctx,
 	}
 	}
+
 	r.send(m)
 	r.send(m)
 }
 }
 
 
@@ -390,12 +455,20 @@ func (r *raft) bcastAppend() {
 
 
 // bcastHeartbeat sends RPC, without entries to all the peers.
 // bcastHeartbeat sends RPC, without entries to all the peers.
 func (r *raft) bcastHeartbeat() {
 func (r *raft) bcastHeartbeat() {
+	lastCtx := r.readOnly.lastPendingRequestCtx()
+	if len(lastCtx) == 0 {
+		r.bcastHeartbeatWithCtx(nil)
+	} else {
+		r.bcastHeartbeatWithCtx([]byte(lastCtx))
+	}
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
 	for id := range r.prs {
 	for id := range r.prs {
 		if id == r.id {
 		if id == r.id {
 			continue
 			continue
 		}
 		}
-		r.sendHeartbeat(id)
-		r.prs[id].resume()
+		r.sendHeartbeat(id, ctx)
 	}
 	}
 }
 }
 
 
@@ -434,6 +507,7 @@ func (r *raft) reset(term uint64) {
 		}
 		}
 	}
 	}
 	r.pendingConf = false
 	r.pendingConf = false
+	r.readOnly = newReadOnly(r.readOnly.option)
 }
 }
 
 
 func (r *raft) appendEntry(es ...pb.Entry) {
 func (r *raft) appendEntry(es ...pb.Entry) {
@@ -506,6 +580,20 @@ func (r *raft) becomeCandidate() {
 	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
 	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
 }
 }
 
 
+func (r *raft) becomePreCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> pre-candidate]")
+	}
+	// Becoming a pre-candidate changes our step functions and state,
+	// but doesn't change anything else. In particular it does not increase
+	// r.Term or change r.Vote.
+	r.step = stepCandidate
+	r.tick = r.tickElection
+	r.state = StatePreCandidate
+	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
 func (r *raft) becomeLeader() {
 func (r *raft) becomeLeader() {
 	// TODO(xiangli) remove the panic when the raft implementation is stable
 	// TODO(xiangli) remove the panic when the raft implementation is stable
 	if r.state == StateFollower {
 	if r.state == StateFollower {
@@ -534,31 +622,48 @@ func (r *raft) becomeLeader() {
 }
 }
 
 
 func (r *raft) campaign(t CampaignType) {
 func (r *raft) campaign(t CampaignType) {
-	r.becomeCandidate()
-	if r.quorum() == r.poll(r.id, true) {
-		r.becomeLeader()
+	var term uint64
+	var voteMsg pb.MessageType
+	if t == campaignPreElection {
+		r.becomePreCandidate()
+		voteMsg = pb.MsgPreVote
+		// PreVote RPCs are sent for the next term before we've incremented r.Term.
+		term = r.Term + 1
+	} else {
+		r.becomeCandidate()
+		voteMsg = pb.MsgVote
+		term = r.Term
+	}
+	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
+		// We won the election after voting for ourselves (which must mean that
+		// this is a single-node cluster). Advance to the next state.
+		if t == campaignPreElection {
+			r.campaign(campaignElection)
+		} else {
+			r.becomeLeader()
+		}
 		return
 		return
 	}
 	}
 	for id := range r.prs {
 	for id := range r.prs {
 		if id == r.id {
 		if id == r.id {
 			continue
 			continue
 		}
 		}
-		r.logger.Infof("%x [logterm: %d, index: %d] sent vote request to %x at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), id, r.Term)
+		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
 
 
 		var ctx []byte
 		var ctx []byte
 		if t == campaignTransfer {
 		if t == campaignTransfer {
 			ctx = []byte(t)
 			ctx = []byte(t)
 		}
 		}
-		r.send(pb.Message{To: id, Type: pb.MsgVote, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
+		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
 	}
 	}
 }
 }
 
 
-func (r *raft) poll(id uint64, v bool) (granted int) {
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
 	if v {
 	if v {
-		r.logger.Infof("%x received vote from %x at term %d", r.id, id, r.Term)
+		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
 	} else {
 	} else {
-		r.logger.Infof("%x received vote rejection from %x at term %d", r.id, id, r.Term)
+		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
 	}
 	}
 	if _, ok := r.votes[id]; !ok {
 	if _, ok := r.votes[id]; !ok {
 		r.votes[id] = v
 		r.votes[id] = v
@@ -572,56 +677,54 @@ func (r *raft) poll(id uint64, v bool) (granted int) {
 }
 }
 
 
 func (r *raft) Step(m pb.Message) error {
 func (r *raft) Step(m pb.Message) error {
-	if m.Type == pb.MsgHup {
-		if r.state != StateLeader {
-			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
-			if err != nil {
-				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
-			}
-			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
-				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
-				return nil
-			}
-
-			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
-			r.campaign(campaignElection)
-		} else {
-			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
-		}
-		return nil
-	}
-
+	// Handle the message term, which may result in our stepping down to a follower.
 	switch {
 	switch {
 	case m.Term == 0:
 	case m.Term == 0:
 		// local message
 		// local message
 	case m.Term > r.Term:
 	case m.Term > r.Term:
 		lead := m.From
 		lead := m.From
-		if m.Type == pb.MsgVote {
+		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
 			force := bytes.Equal(m.Context, []byte(campaignTransfer))
 			force := bytes.Equal(m.Context, []byte(campaignTransfer))
-			inLease := r.checkQuorum && r.state != StateCandidate && r.electionElapsed < r.electionTimeout
+			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
 			if !force && inLease {
 			if !force && inLease {
 				// If a server receives a RequestVote request within the minimum election timeout
 				// If a server receives a RequestVote request within the minimum election timeout
 				// of hearing from a current leader, it does not update its term or grant its vote
 				// of hearing from a current leader, it does not update its term or grant its vote
-				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored vote from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
-					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
 				return nil
 				return nil
 			}
 			}
 			lead = None
 			lead = None
 		}
 		}
-		r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
-			r.id, r.Term, m.Type, m.From, m.Term)
-		r.becomeFollower(m.Term, lead)
+		switch {
+		case m.Type == pb.MsgPreVote:
+			// Never change our term in response to a PreVote
+		case m.Type == pb.MsgPreVoteResp && !m.Reject:
+			// We send pre-vote requests with a term in our future. If the
+			// pre-vote is granted, we will increment our term when we get a
+			// quorum. If it is not, the term comes from the node that
+			// rejected our vote so we should become a follower at the new
+			// term.
+		default:
+			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+			r.becomeFollower(m.Term, lead)
+		}
+
 	case m.Term < r.Term:
 	case m.Term < r.Term:
 		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
 		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
-			// We have received messages from a leader at a lower term. It is possible that these messages were
-			// simply delayed in the network, but this could also mean that this node has advanced its term number
-			// during a network partition, and it is now unable to either win an election or to rejoin the majority
-			// on the old term. If checkQuorum is false, this will be handled by incrementing term numbers in response
-			// to MsgVote with a higher term, but if checkQuorum is true we may not advance the term on MsgVote and
-			// must generate other messages to advance the term. The net result of these two features is to minimize
-			// the disruption caused by nodes that have been removed from the cluster's configuration: a removed node
-			// will send MsgVotes which will be ignored, but it will not receive MsgApp or MsgHeartbeat, so it will not
-			// create disruptive term increases
+			// We have received messages from a leader at a lower term. It is possible
+			// that these messages were simply delayed in the network, but this could
+			// also mean that this node has advanced its term number during a network
+			// partition, and it is now unable to either win an election or to rejoin
+			// the majority on the old term. If checkQuorum is false, this will be
+			// handled by incrementing term numbers in response to MsgVote with a
+			// higher term, but if checkQuorum is true we may not advance the term on
+			// MsgVote and must generate other messages to advance the term. The net
+			// result of these two features is to minimize the disruption caused by
+			// nodes that have been removed from the cluster's configuration: a
+			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
+			// disruptive term increases
 			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
 			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
 		} else {
 		} else {
 			// ignore other cases
 			// ignore other cases
@@ -630,7 +733,50 @@ func (r *raft) Step(m pb.Message) error {
 		}
 		}
 		return nil
 		return nil
 	}
 	}
-	r.step(r, m)
+
+	switch m.Type {
+	case pb.MsgHup:
+		if r.state != StateLeader {
+			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+			if err != nil {
+				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+			}
+			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+				return nil
+			}
+
+			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+			if r.preVote {
+				r.campaign(campaignPreElection)
+			} else {
+				r.campaign(campaignElection)
+			}
+		} else {
+			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		}
+
+	case pb.MsgVote, pb.MsgPreVote:
+		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
+		// always equal r.Term.
+		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)})
+			if m.Type == pb.MsgVote {
+				// Only record real votes.
+				r.electionElapsed = 0
+				r.Vote = m.From
+			}
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true})
+		}
+
+	default:
+		r.step(r, m)
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -666,6 +812,7 @@ func stepLeader(r *raft, m pb.Message) {
 		for i, e := range m.Entries {
 		for i, e := range m.Entries {
 			if e.Type == pb.EntryConfChange {
 			if e.Type == pb.EntryConfChange {
 				if r.pendingConf {
 				if r.pendingConf {
+					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
 					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
 					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
 				}
 				}
 				r.pendingConf = true
 				r.pendingConf = true
@@ -674,22 +821,30 @@ func stepLeader(r *raft, m pb.Message) {
 		r.appendEntry(m.Entries...)
 		r.appendEntry(m.Entries...)
 		r.bcastAppend()
 		r.bcastAppend()
 		return
 		return
-	case pb.MsgVote:
-		r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
-		r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
-		return
 	case pb.MsgReadIndex:
 	case pb.MsgReadIndex:
-		ri := None
-		if r.checkQuorum {
-			ri = r.raftLog.committed
-		}
-		if m.From == None || m.From == r.id { // from local member
-			r.readState.Index = ri
-			r.readState.RequestCtx = m.Entries[0].Data
+		if r.quorum() > 1 {
+			// thinking: use an interally defined context instead of the user given context.
+			// We can express this in terms of the term and index instead of a user-supplied value.
+			// This would allow multiple reads to piggyback on the same message.
+			switch r.readOnly.option {
+			case ReadOnlySafe:
+				r.readOnly.addRequest(r.raftLog.committed, m)
+				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+			case ReadOnlyLeaseBased:
+				var ri uint64
+				if r.checkQuorum {
+					ri = r.raftLog.committed
+				}
+				if m.From == None || m.From == r.id { // from local member
+					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+				} else {
+					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+				}
+			}
 		} else {
 		} else {
-			r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
 		}
 		}
+
 		return
 		return
 	}
 	}
 
 
@@ -714,7 +869,7 @@ func stepLeader(r *raft, m pb.Message) {
 				r.sendAppend(m.From)
 				r.sendAppend(m.From)
 			}
 			}
 		} else {
 		} else {
-			oldPaused := pr.isPaused()
+			oldPaused := pr.IsPaused()
 			if pr.maybeUpdate(m.Index) {
 			if pr.maybeUpdate(m.Index) {
 				switch {
 				switch {
 				case pr.State == ProgressStateProbe:
 				case pr.State == ProgressStateProbe:
@@ -742,6 +897,7 @@ func stepLeader(r *raft, m pb.Message) {
 		}
 		}
 	case pb.MsgHeartbeatResp:
 	case pb.MsgHeartbeatResp:
 		pr.RecentActive = true
 		pr.RecentActive = true
+		pr.resume()
 
 
 		// free one slot for the full inflights window to allow progress.
 		// free one slot for the full inflights window to allow progress.
 		if pr.State == ProgressStateReplicate && pr.ins.full() {
 		if pr.State == ProgressStateReplicate && pr.ins.full() {
@@ -750,6 +906,25 @@ func stepLeader(r *raft, m pb.Message) {
 		if pr.Match < r.raftLog.lastIndex() {
 		if pr.Match < r.raftLog.lastIndex() {
 			r.sendAppend(m.From)
 			r.sendAppend(m.From)
 		}
 		}
+
+		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+			return
+		}
+
+		ackCount := r.readOnly.recvAck(m)
+		if ackCount < r.quorum() {
+			return
+		}
+
+		rss := r.readOnly.advance(m)
+		for _, rs := range rss {
+			req := rs.req
+			if req.From == None || req.From == r.id { // from local member
+				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
+			} else {
+				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
+			}
+		}
 	case pb.MsgSnapStatus:
 	case pb.MsgSnapStatus:
 		if pr.State != ProgressStateSnapshot {
 		if pr.State != ProgressStateSnapshot {
 			return
 			return
@@ -803,7 +978,18 @@ func stepLeader(r *raft, m pb.Message) {
 	}
 	}
 }
 }
 
 
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
 func stepCandidate(r *raft, m pb.Message) {
 func stepCandidate(r *raft, m pb.Message) {
+	// Only handle vote responses corresponding to our candidacy (while in
+	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+	// our pre-candidate state).
+	var myVoteRespType pb.MessageType
+	if r.state == StatePreCandidate {
+		myVoteRespType = pb.MsgPreVoteResp
+	} else {
+		myVoteRespType = pb.MsgVoteResp
+	}
 	switch m.Type {
 	switch m.Type {
 	case pb.MsgProp:
 	case pb.MsgProp:
 		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
 		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
@@ -817,17 +1003,17 @@ func stepCandidate(r *raft, m pb.Message) {
 	case pb.MsgSnap:
 	case pb.MsgSnap:
 		r.becomeFollower(m.Term, m.From)
 		r.becomeFollower(m.Term, m.From)
 		r.handleSnapshot(m)
 		r.handleSnapshot(m)
-	case pb.MsgVote:
-		r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
-		r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
-	case pb.MsgVoteResp:
-		gr := r.poll(m.From, !m.Reject)
-		r.logger.Infof("%x [quorum:%d] has received %d votes and %d vote rejections", r.id, r.quorum(), gr, len(r.votes)-gr)
+	case myVoteRespType:
+		gr := r.poll(m.From, m.Type, !m.Reject)
+		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
 		switch r.quorum() {
 		switch r.quorum() {
 		case gr:
 		case gr:
-			r.becomeLeader()
-			r.bcastAppend()
+			if r.state == StatePreCandidate {
+				r.campaign(campaignElection)
+			} else {
+				r.becomeLeader()
+				r.bcastAppend()
+			}
 		case len(r.votes) - gr:
 		case len(r.votes) - gr:
 			r.becomeFollower(r.Term, None)
 			r.becomeFollower(r.Term, None)
 		}
 		}
@@ -857,18 +1043,6 @@ func stepFollower(r *raft, m pb.Message) {
 		r.electionElapsed = 0
 		r.electionElapsed = 0
 		r.lead = m.From
 		r.lead = m.From
 		r.handleSnapshot(m)
 		r.handleSnapshot(m)
-	case pb.MsgVote:
-		if (r.Vote == None || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
-			r.electionElapsed = 0
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] voted for %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
-			r.Vote = m.From
-			r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp})
-		} else {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
-		}
 	case pb.MsgTransferLeader:
 	case pb.MsgTransferLeader:
 		if r.lead == None {
 		if r.lead == None {
 			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
 			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
@@ -877,8 +1051,15 @@ func stepFollower(r *raft, m pb.Message) {
 		m.To = r.lead
 		m.To = r.lead
 		r.send(m)
 		r.send(m)
 	case pb.MsgTimeoutNow:
 	case pb.MsgTimeoutNow:
-		r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
-		r.campaign(campaignTransfer)
+		if r.promotable() {
+			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+			// Leadership transfers never use pre-vote even if r.preVote is true; we
+			// know we are not recovering from a partition so there is no need for the
+			// extra round trip.
+			r.campaign(campaignTransfer)
+		} else {
+			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
+		}
 	case pb.MsgReadIndex:
 	case pb.MsgReadIndex:
 		if r.lead == None {
 		if r.lead == None {
 			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
 			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
@@ -891,9 +1072,7 @@ func stepFollower(r *raft, m pb.Message) {
 			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
 			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
 			return
 			return
 		}
 		}
-
-		r.readState.Index = m.Index
-		r.readState.RequestCtx = m.Entries[0].Data
+		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
 	}
 	}
 }
 }
 
 
@@ -914,7 +1093,7 @@ func (r *raft) handleAppendEntries(m pb.Message) {
 
 
 func (r *raft) handleHeartbeat(m pb.Message) {
 func (r *raft) handleHeartbeat(m pb.Message) {
 	r.raftLog.commitTo(m.Commit)
 	r.raftLog.commitTo(m.Commit)
-	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp})
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
 }
 }
 
 
 func (r *raft) handleSnapshot(m pb.Message) {
 func (r *raft) handleSnapshot(m pb.Message) {
@@ -967,6 +1146,7 @@ func (r *raft) promotable() bool {
 }
 }
 
 
 func (r *raft) addNode(id uint64) {
 func (r *raft) addNode(id uint64) {
+	r.pendingConf = false
 	if _, ok := r.prs[id]; ok {
 	if _, ok := r.prs[id]; ok {
 		// Ignore any redundant addNode calls (which can happen because the
 		// Ignore any redundant addNode calls (which can happen because the
 		// initial bootstrapping entries are applied twice).
 		// initial bootstrapping entries are applied twice).
@@ -974,7 +1154,6 @@ func (r *raft) addNode(id uint64) {
 	}
 	}
 
 
 	r.setProgress(id, 0, r.raftLog.lastIndex()+1)
 	r.setProgress(id, 0, r.raftLog.lastIndex()+1)
-	r.pendingConf = false
 }
 }
 
 
 func (r *raft) removeNode(id uint64) {
 func (r *raft) removeNode(id uint64) {
@@ -1024,7 +1203,7 @@ func (r *raft) pastElectionTimeout() bool {
 }
 }
 
 
 func (r *raft) resetRandomizedElectionTimeout() {
 func (r *raft) resetRandomizedElectionTimeout() {
-	r.randomizedElectionTimeout = r.electionTimeout + r.rand.Intn(r.electionTimeout)
+	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
 }
 }
 
 
 // checkQuorumActive returns true if the quorum is active from
 // checkQuorumActive returns true if the quorum is active from

File diff suppressed because it is too large
+ 204 - 196
vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go


+ 2 - 0
vendor/github.com/coreos/etcd/raft/raftpb/raft.proto

@@ -50,6 +50,8 @@ enum MessageType {
 	MsgTimeoutNow      = 14;
 	MsgTimeoutNow      = 14;
 	MsgReadIndex       = 15;
 	MsgReadIndex       = 15;
 	MsgReadIndexResp   = 16;
 	MsgReadIndexResp   = 16;
+	MsgPreVote         = 17;
+	MsgPreVoteResp     = 18;
 }
 }
 
 
 message Message {
 message Message {

+ 26 - 0
vendor/github.com/coreos/etcd/raft/rawnode.go

@@ -66,6 +66,9 @@ func (rn *RawNode) commitReady(rd Ready) {
 	if !IsEmptySnap(rd.Snapshot) {
 	if !IsEmptySnap(rd.Snapshot) {
 		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
 		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
 	}
 	}
+	if len(rd.ReadStates) != 0 {
+		rn.raft.readStates = nil
+	}
 }
 }
 
 
 // NewRawNode returns a new RawNode given configuration and a list of raft peers.
 // NewRawNode returns a new RawNode given configuration and a list of raft peers.
@@ -120,6 +123,18 @@ func (rn *RawNode) Tick() {
 	rn.raft.tick()
 	rn.raft.tick()
 }
 }
 
 
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+func (rn *RawNode) TickQuiesced() {
+	rn.raft.electionElapsed++
+}
+
 // Campaign causes this RawNode to transition to candidate state.
 // Campaign causes this RawNode to transition to candidate state.
 func (rn *RawNode) Campaign() error {
 func (rn *RawNode) Campaign() error {
 	return rn.raft.Step(pb.Message{
 	return rn.raft.Step(pb.Message{
@@ -205,6 +220,9 @@ func (rn *RawNode) HasReady() bool {
 	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
 	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
 		return true
 		return true
 	}
 	}
+	if len(r.readStates) != 0 {
+		return true
+	}
 	return false
 	return false
 }
 }
 
 
@@ -236,3 +254,11 @@ func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
 func (rn *RawNode) TransferLeader(transferee uint64) {
 func (rn *RawNode) TransferLeader(transferee uint64) {
 	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
 	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
 }
 }
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}

+ 118 - 0
vendor/github.com/coreos/etcd/raft/read_only.go

@@ -0,0 +1,118 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, It's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+	Index      uint64
+	RequestCtx []byte
+}
+
+type readIndexStatus struct {
+	req   pb.Message
+	index uint64
+	acks  map[uint64]struct{}
+}
+
+type readOnly struct {
+	option           ReadOnlyOption
+	pendingReadIndex map[string]*readIndexStatus
+	readIndexQueue   []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+	return &readOnly{
+		option:           option,
+		pendingReadIndex: make(map[string]*readIndexStatus),
+	}
+}
+
+// addRequest adds a read only reuqest into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+	ctx := string(m.Entries[0].Data)
+	if _, ok := ro.pendingReadIndex[ctx]; ok {
+		return
+	}
+	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
+	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(m pb.Message) int {
+	rs, ok := ro.pendingReadIndex[string(m.Context)]
+	if !ok {
+		return 0
+	}
+
+	rs.acks[m.From] = struct{}{}
+	// add one to include an ack from local node
+	return len(rs.acks) + 1
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+	var (
+		i     int
+		found bool
+	)
+
+	ctx := string(m.Context)
+	rss := []*readIndexStatus{}
+
+	for _, okctx := range ro.readIndexQueue {
+		i++
+		rs, ok := ro.pendingReadIndex[okctx]
+		if !ok {
+			panic("cannot find corresponding read state from pending map")
+		}
+		rss = append(rss, rs)
+		if okctx == ctx {
+			found = true
+			break
+		}
+	}
+
+	if found {
+		ro.readIndexQueue = ro.readIndexQueue[i:]
+		for _, rs := range rss {
+			delete(ro.pendingReadIndex, string(rs.req.Context))
+		}
+		return rss
+	}
+
+	return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+	if len(ro.readIndexQueue) == 0 {
+		return ""
+	}
+	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}

+ 2 - 0
vendor/github.com/coreos/etcd/raft/storage.go

@@ -98,6 +98,8 @@ func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
 
 
 // SetHardState saves the current HardState.
 // SetHardState saves the current HardState.
 func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
 func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.Lock()
+	defer ms.Unlock()
 	ms.hardState = st
 	ms.hardState = st
 	return nil
 	return nil
 }
 }

+ 13 - 1
vendor/github.com/coreos/etcd/raft/util.go

@@ -52,7 +52,19 @@ func IsLocalMsg(msgt pb.MessageType) bool {
 }
 }
 
 
 func IsResponseMsg(msgt pb.MessageType) bool {
 func IsResponseMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable
+	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+	switch msgt {
+	case pb.MsgVote:
+		return pb.MsgVoteResp
+	case pb.MsgPreVote:
+		return pb.MsgPreVoteResp
+	default:
+		panic(fmt.Sprintf("not a vote message: %s", msgt))
+	}
 }
 }
 
 
 // EntryFormatter can be implemented by the application to provide human-readable formatting
 // EntryFormatter can be implemented by the application to provide human-readable formatting

+ 50 - 46
vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go

@@ -19,9 +19,9 @@ import (
 	proto "github.com/golang/protobuf/proto"
 	proto "github.com/golang/protobuf/proto"
 
 
 	math "math"
 	math "math"
-)
 
 
-import io "io"
+	io "io"
+)
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = proto.Marshal
@@ -30,7 +30,9 @@ var _ = math.Inf
 
 
 // This is a compile-time assertion to ensure that this generated file
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
 // is compatible with the proto package it is being compiled against.
-const _ = proto.ProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
 
 
 type Snapshot struct {
 type Snapshot struct {
 	Crc              uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
 	Crc              uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
@@ -46,61 +48,61 @@ func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int
 func init() {
 func init() {
 	proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
 	proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
 }
 }
-func (m *Snapshot) Marshal() (data []byte, err error) {
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
-	data[i] = 0x8
+	dAtA[i] = 0x8
 	i++
 	i++
-	i = encodeVarintSnap(data, i, uint64(m.Crc))
+	i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
 	if m.Data != nil {
 	if m.Data != nil {
-		data[i] = 0x12
+		dAtA[i] = 0x12
 		i++
 		i++
-		i = encodeVarintSnap(data, i, uint64(len(m.Data)))
-		i += copy(data[i:], m.Data)
+		i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
 	}
 	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
-		i += copy(data[i:], m.XXX_unrecognized)
+		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Snap(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Snap(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Snap(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Snap(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintSnap(data []byte, offset int, v uint64) int {
+func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 func (m *Snapshot) Size() (n int) {
 func (m *Snapshot) Size() (n int) {
@@ -130,8 +132,8 @@ func sovSnap(x uint64) (n int) {
 func sozSnap(x uint64) (n int) {
 func sozSnap(x uint64) (n int) {
 	return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 	return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
 }
-func (m *Snapshot) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -143,7 +145,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -171,7 +173,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Crc |= (uint32(b) & 0x7F) << shift
 				m.Crc |= (uint32(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -190,7 +192,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -204,14 +206,14 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
 			if m.Data == nil {
 			if m.Data == nil {
 				m.Data = []byte{}
 				m.Data = []byte{}
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipSnap(data[iNdEx:])
+			skippy, err := skipSnap(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -221,7 +223,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if (iNdEx + skippy) > l {
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 			iNdEx += skippy
 		}
 		}
 	}
 	}
@@ -231,8 +233,8 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipSnap(data []byte) (n int, err error) {
-	l := len(data)
+func skipSnap(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -243,7 +245,7 @@ func skipSnap(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -261,7 +263,7 @@ func skipSnap(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -278,7 +280,7 @@ func skipSnap(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -301,7 +303,7 @@ func skipSnap(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -312,7 +314,7 @@ func skipSnap(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipSnap(data[start:])
+				next, err := skipSnap(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -336,6 +338,8 @@ var (
 	ErrIntOverflowSnap   = fmt.Errorf("proto: integer overflow")
 	ErrIntOverflowSnap   = fmt.Errorf("proto: integer overflow")
 )
 )
 
 
+func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) }
+
 var fileDescriptorSnap = []byte{
 var fileDescriptorSnap = []byte{
 	// 126 bytes of a gzipped FileDescriptorProto
 	// 126 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,

+ 1 - 1
vendor/github.com/coreos/etcd/wal/doc.go

@@ -25,7 +25,7 @@ to it with the Save method:
 	...
 	...
 	err := w.Save(s, ents)
 	err := w.Save(s, ents)
 
 
-After saving an raft snapshot to disk, SaveSnapshot method should be called to
+After saving a raft snapshot to disk, SaveSnapshot method should be called to
 record it. So WAL can match with the saved snapshot when restarting.
 record it. So WAL can match with the saved snapshot when restarting.
 
 
 	err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
 	err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})

+ 19 - 4
vendor/github.com/coreos/etcd/wal/encoder.go

@@ -15,28 +15,34 @@
 package wal
 package wal
 
 
 import (
 import (
-	"bufio"
 	"encoding/binary"
 	"encoding/binary"
 	"hash"
 	"hash"
 	"io"
 	"io"
+	"os"
 	"sync"
 	"sync"
 
 
 	"github.com/coreos/etcd/pkg/crc"
 	"github.com/coreos/etcd/pkg/crc"
+	"github.com/coreos/etcd/pkg/ioutil"
 	"github.com/coreos/etcd/wal/walpb"
 	"github.com/coreos/etcd/wal/walpb"
 )
 )
 
 
+// walPageBytes is the alignment for flushing records to the backing Writer.
+// It should be a multiple of the minimum sector size so that WAL can safely
+// distinguish between torn writes and ordinary data corruption.
+const walPageBytes = 8 * minSectorSize
+
 type encoder struct {
 type encoder struct {
 	mu sync.Mutex
 	mu sync.Mutex
-	bw *bufio.Writer
+	bw *ioutil.PageWriter
 
 
 	crc       hash.Hash32
 	crc       hash.Hash32
 	buf       []byte
 	buf       []byte
 	uint64buf []byte
 	uint64buf []byte
 }
 }
 
 
-func newEncoder(w io.Writer, prevCrc uint32) *encoder {
+func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
 	return &encoder{
 	return &encoder{
-		bw:  bufio.NewWriter(w),
+		bw:  ioutil.NewPageWriter(w, walPageBytes, pageOffset),
 		crc: crc.New(prevCrc, crcTable),
 		crc: crc.New(prevCrc, crcTable),
 		// 1MB buffer
 		// 1MB buffer
 		buf:       make([]byte, 1024*1024),
 		buf:       make([]byte, 1024*1024),
@@ -44,6 +50,15 @@ func newEncoder(w io.Writer, prevCrc uint32) *encoder {
 	}
 	}
 }
 }
 
 
+// newFileEncoder creates a new encoder with current file offset for the page writer.
+func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
+	offset, err := f.Seek(0, os.SEEK_CUR)
+	if err != nil {
+		return nil, err
+	}
+	return newEncoder(f, prevCrc, int(offset)), nil
+}
+
 func (e *encoder) encode(rec *walpb.Record) error {
 func (e *encoder) encode(rec *walpb.Record) error {
 	e.mu.Lock()
 	e.mu.Lock()
 	defer e.mu.Unlock()
 	defer e.mu.Unlock()

+ 59 - 15
vendor/github.com/coreos/etcd/wal/wal.go

@@ -69,7 +69,11 @@ var (
 // A just opened WAL is in read mode, and ready for reading records.
 // A just opened WAL is in read mode, and ready for reading records.
 // The WAL will be ready for appending after reading out all the previous records.
 // The WAL will be ready for appending after reading out all the previous records.
 type WAL struct {
 type WAL struct {
-	dir      string           // the living directory of the underlay files
+	dir string // the living directory of the underlay files
+
+	// dirFile is a fd for the wal directory for syncing on Rename
+	dirFile *os.File
+
 	metadata []byte           // metadata recorded at the head of each WAL
 	metadata []byte           // metadata recorded at the head of each WAL
 	state    raftpb.HardState // hardstate recorded at the head of WAL
 	state    raftpb.HardState // hardstate recorded at the head of WAL
 
 
@@ -108,30 +112,49 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if _, err := f.Seek(0, os.SEEK_END); err != nil {
+	if _, err = f.Seek(0, os.SEEK_END); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+	if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
 	w := &WAL{
 	w := &WAL{
 		dir:      dirpath,
 		dir:      dirpath,
 		metadata: metadata,
 		metadata: metadata,
-		encoder:  newEncoder(f, 0),
+	}
+	w.encoder, err = newFileEncoder(f.File, 0)
+	if err != nil {
+		return nil, err
 	}
 	}
 	w.locks = append(w.locks, f)
 	w.locks = append(w.locks, f)
-	if err := w.saveCrc(0); err != nil {
+	if err = w.saveCrc(0); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+	if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+	if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	return w.renameWal(tmpdirpath)
+	if w, err = w.renameWal(tmpdirpath); err != nil {
+		return nil, err
+	}
+
+	// directory was renamed; sync parent dir to persist rename
+	pdir, perr := fileutil.OpenDir(path.Dir(w.dir))
+	if perr != nil {
+		return nil, perr
+	}
+	if perr = fileutil.Fsync(pdir); perr != nil {
+		return nil, perr
+	}
+	if perr = pdir.Close(); err != nil {
+		return nil, perr
+	}
+
+	return w, nil
 }
 }
 
 
 // Open opens the WAL at the given snap.
 // Open opens the WAL at the given snap.
@@ -141,7 +164,14 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
 // the given snap. The WAL cannot be appended to before reading out all of its
 // the given snap. The WAL cannot be appended to before reading out all of its
 // previous records.
 // previous records.
 func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
 func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
-	return openAtIndex(dirpath, snap, true)
+	w, err := openAtIndex(dirpath, snap, true)
+	if err != nil {
+		return nil, err
+	}
+	if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
+		return nil, err
+	}
+	return w, nil
 }
 }
 
 
 // OpenForRead only opens the wal files for read.
 // OpenForRead only opens the wal files for read.
@@ -316,7 +346,10 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
 
 
 	if w.tail() != nil {
 	if w.tail() != nil {
 		// create encoder (chain crc with the decoder), enable appending
 		// create encoder (chain crc with the decoder), enable appending
-		w.encoder = newEncoder(w.tail(), w.decoder.lastCRC())
+		w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
+		if err != nil {
+			return
+		}
 	}
 	}
 	w.decoder = nil
 	w.decoder = nil
 
 
@@ -350,7 +383,10 @@ func (w *WAL) cut() error {
 	// update writer and save the previous crc
 	// update writer and save the previous crc
 	w.locks = append(w.locks, newTail)
 	w.locks = append(w.locks, newTail)
 	prevCrc := w.encoder.crc.Sum32()
 	prevCrc := w.encoder.crc.Sum32()
-	w.encoder = newEncoder(w.tail(), prevCrc)
+	w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+	if err != nil {
+		return err
+	}
 	if err = w.saveCrc(prevCrc); err != nil {
 	if err = w.saveCrc(prevCrc); err != nil {
 		return err
 		return err
 	}
 	}
@@ -373,6 +409,10 @@ func (w *WAL) cut() error {
 	if err = os.Rename(newTail.Name(), fpath); err != nil {
 	if err = os.Rename(newTail.Name(), fpath); err != nil {
 		return err
 		return err
 	}
 	}
+	if err = fileutil.Fsync(w.dirFile); err != nil {
+		return err
+	}
+
 	newTail.Close()
 	newTail.Close()
 
 
 	if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
 	if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
@@ -385,7 +425,10 @@ func (w *WAL) cut() error {
 	w.locks[len(w.locks)-1] = newTail
 	w.locks[len(w.locks)-1] = newTail
 
 
 	prevCrc = w.encoder.crc.Sum32()
 	prevCrc = w.encoder.crc.Sum32()
-	w.encoder = newEncoder(w.tail(), prevCrc)
+	w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+	if err != nil {
+		return err
+	}
 
 
 	plog.Infof("segmented wal file %v is created", fpath)
 	plog.Infof("segmented wal file %v is created", fpath)
 	return nil
 	return nil
@@ -475,7 +518,8 @@ func (w *WAL) Close() error {
 			plog.Errorf("failed to unlock during closing wal: %s", err)
 			plog.Errorf("failed to unlock during closing wal: %s", err)
 		}
 		}
 	}
 	}
-	return nil
+
+	return w.dirFile.Close()
 }
 }
 
 
 func (w *WAL) saveEntry(e *raftpb.Entry) error {
 func (w *WAL) saveEntry(e *raftpb.Entry) error {
@@ -531,15 +575,15 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	// TODO: add a test for this code path when refactoring the tests
 	return w.cut()
 	return w.cut()
 }
 }
 
 
 func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
 func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+	b := pbutil.MustMarshal(&e)
+
 	w.mu.Lock()
 	w.mu.Lock()
 	defer w.mu.Unlock()
 	defer w.mu.Unlock()
 
 
-	b := pbutil.MustMarshal(&e)
 	rec := &walpb.Record{Type: snapshotType, Data: b}
 	rec := &walpb.Record{Type: snapshotType, Data: b}
 	if err := w.encoder.encode(rec); err != nil {
 	if err := w.encoder.encode(rec); err != nil {
 		return err
 		return err

+ 8 - 2
vendor/github.com/coreos/etcd/wal/wal_unix.go

@@ -16,7 +16,11 @@
 
 
 package wal
 package wal
 
 
-import "os"
+import (
+	"os"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
 
 
 func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
 func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
 	// On non-Windows platforms, hold the lock while renaming. Releasing
 	// On non-Windows platforms, hold the lock while renaming. Releasing
@@ -34,5 +38,7 @@ func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
 	}
 	}
 
 
 	w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
 	w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
-	return w, nil
+	df, err := fileutil.OpenDir(w.dir)
+	w.dirFile = df
+	return w, err
 }
 }

+ 70 - 66
vendor/github.com/coreos/etcd/wal/walpb/record.pb.go

@@ -20,9 +20,9 @@ import (
 	proto "github.com/golang/protobuf/proto"
 	proto "github.com/golang/protobuf/proto"
 
 
 	math "math"
 	math "math"
-)
 
 
-import io "io"
+	io "io"
+)
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = proto.Marshal
@@ -31,7 +31,9 @@ var _ = math.Inf
 
 
 // This is a compile-time assertion to ensure that this generated file
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
 // is compatible with the proto package it is being compiled against.
-const _ = proto.ProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
 
 
 type Record struct {
 type Record struct {
 	Type             int64  `protobuf:"varint,1,opt,name=type" json:"type"`
 	Type             int64  `protobuf:"varint,1,opt,name=type" json:"type"`
@@ -60,91 +62,91 @@ func init() {
 	proto.RegisterType((*Record)(nil), "walpb.Record")
 	proto.RegisterType((*Record)(nil), "walpb.Record")
 	proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
 	proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
 }
 }
-func (m *Record) Marshal() (data []byte, err error) {
+func (m *Record) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *Record) MarshalTo(data []byte) (int, error) {
+func (m *Record) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
-	data[i] = 0x8
+	dAtA[i] = 0x8
 	i++
 	i++
-	i = encodeVarintRecord(data, i, uint64(m.Type))
-	data[i] = 0x10
+	i = encodeVarintRecord(dAtA, i, uint64(m.Type))
+	dAtA[i] = 0x10
 	i++
 	i++
-	i = encodeVarintRecord(data, i, uint64(m.Crc))
+	i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
 	if m.Data != nil {
 	if m.Data != nil {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
 		i++
-		i = encodeVarintRecord(data, i, uint64(len(m.Data)))
-		i += copy(data[i:], m.Data)
+		i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
 	}
 	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
-		i += copy(data[i:], m.XXX_unrecognized)
+		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *Snapshot) Marshal() (data []byte, err error) {
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
-	data[i] = 0x8
+	dAtA[i] = 0x8
 	i++
 	i++
-	i = encodeVarintRecord(data, i, uint64(m.Index))
-	data[i] = 0x10
+	i = encodeVarintRecord(dAtA, i, uint64(m.Index))
+	dAtA[i] = 0x10
 	i++
 	i++
-	i = encodeVarintRecord(data, i, uint64(m.Term))
+	i = encodeVarintRecord(dAtA, i, uint64(m.Term))
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
-		i += copy(data[i:], m.XXX_unrecognized)
+		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Record(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Record(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Record(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Record(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintRecord(data []byte, offset int, v uint64) int {
+func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 func (m *Record) Size() (n int) {
 func (m *Record) Size() (n int) {
@@ -186,8 +188,8 @@ func sovRecord(x uint64) (n int) {
 func sozRecord(x uint64) (n int) {
 func sozRecord(x uint64) (n int) {
 	return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 	return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
 }
-func (m *Record) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *Record) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -199,7 +201,7 @@ func (m *Record) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -227,7 +229,7 @@ func (m *Record) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Type |= (int64(b) & 0x7F) << shift
 				m.Type |= (int64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -246,7 +248,7 @@ func (m *Record) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Crc |= (uint32(b) & 0x7F) << shift
 				m.Crc |= (uint32(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -265,7 +267,7 @@ func (m *Record) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -279,14 +281,14 @@ func (m *Record) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
 			if m.Data == nil {
 			if m.Data == nil {
 				m.Data = []byte{}
 				m.Data = []byte{}
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipRecord(data[iNdEx:])
+			skippy, err := skipRecord(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -296,7 +298,7 @@ func (m *Record) Unmarshal(data []byte) error {
 			if (iNdEx + skippy) > l {
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 			iNdEx += skippy
 		}
 		}
 	}
 	}
@@ -306,8 +308,8 @@ func (m *Record) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *Snapshot) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -319,7 +321,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -347,7 +349,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Index |= (uint64(b) & 0x7F) << shift
 				m.Index |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -366,7 +368,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Term |= (uint64(b) & 0x7F) << shift
 				m.Term |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -375,7 +377,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			}
 			}
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipRecord(data[iNdEx:])
+			skippy, err := skipRecord(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -385,7 +387,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if (iNdEx + skippy) > l {
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 			iNdEx += skippy
 		}
 		}
 	}
 	}
@@ -395,8 +397,8 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipRecord(data []byte) (n int, err error) {
-	l := len(data)
+func skipRecord(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -407,7 +409,7 @@ func skipRecord(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -425,7 +427,7 @@ func skipRecord(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -442,7 +444,7 @@ func skipRecord(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -465,7 +467,7 @@ func skipRecord(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -476,7 +478,7 @@ func skipRecord(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipRecord(data[start:])
+				next, err := skipRecord(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -500,6 +502,8 @@ var (
 	ErrIntOverflowRecord   = fmt.Errorf("proto: integer overflow")
 	ErrIntOverflowRecord   = fmt.Errorf("proto: integer overflow")
 )
 )
 
 
+func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) }
+
 var fileDescriptorRecord = []byte{
 var fileDescriptorRecord = []byte{
 	// 186 bytes of a gzipped FileDescriptorProto
 	// 186 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,

File diff suppressed because it is too large
+ 803 - 5
vendor/github.com/docker/containerd/api/grpc/types/api.pb.go


+ 1 - 7
vendor/github.com/docker/swarmkit/agent/session.go

@@ -9,7 +9,6 @@ import (
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/connectionbroker"
 	"github.com/docker/swarmkit/connectionbroker"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
@@ -173,12 +172,7 @@ func (s *session) heartbeat(ctx context.Context) error {
 				return err
 				return err
 			}
 			}
 
 
-			period, err := ptypes.Duration(&resp.Period)
-			if err != nil {
-				return err
-			}
-
-			heartbeat.Reset(period)
+			heartbeat.Reset(resp.Period)
 		case <-s.closed:
 		case <-s.closed:
 			return errSessionClosed
 			return errSessionClosed
 		case <-ctx.Done():
 		case <-ctx.Done():

File diff suppressed because it is too large
+ 197 - 292
vendor/github.com/docker/swarmkit/api/ca.pb.go


File diff suppressed because it is too large
+ 649 - 1115
vendor/github.com/docker/swarmkit/api/control.pb.go


+ 44 - 0
vendor/github.com/docker/swarmkit/api/deepcopy/copy.go

@@ -0,0 +1,44 @@
+package deepcopy
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/gogo/protobuf/types"
+)
+
+// CopierFrom can be implemented if an object knows how to copy another into itself.
+type CopierFrom interface {
+	// Copy takes the fields from src and copies them into the target object.
+	//
+	// Calling this method with a nil receiver or a nil src may panic.
+	CopyFrom(src interface{})
+}
+
+// Copy copies src into dst. dst and src must have the same type.
+//
+// If the type has a copy function defined, it will be used.
+//
+// Default implementations for builtin types and well known protobuf types may
+// be provided.
+//
+// If the copy cannot be performed, this function will panic. Make sure to test
+// types that use this function.
+func Copy(dst, src interface{}) {
+	switch dst := dst.(type) {
+	case *types.Duration:
+		src := src.(*types.Duration)
+		*dst = *src
+	case *time.Duration:
+		src := src.(*time.Duration)
+		*dst = *src
+	case *types.Timestamp:
+		src := src.(*types.Timestamp)
+		*dst = *src
+	case CopierFrom:
+		dst.CopyFrom(src)
+	default:
+		panic(fmt.Sprintf("Copy for %T not implemented", dst))
+	}
+
+}

File diff suppressed because it is too large
+ 255 - 396
vendor/github.com/docker/swarmkit/api/dispatcher.pb.go


+ 2 - 2
vendor/github.com/docker/swarmkit/api/dispatcher.proto

@@ -6,7 +6,7 @@ import "types.proto";
 import "objects.proto";
 import "objects.proto";
 import "gogoproto/gogo.proto";
 import "gogoproto/gogo.proto";
 import "plugin/plugin.proto";
 import "plugin/plugin.proto";
-import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "google/protobuf/duration.proto";
 
 
 // Dispatcher is the API provided by a manager group for agents to connect to. Agents
 // Dispatcher is the API provided by a manager group for agents to connect to. Agents
 // connect to this service to receive task assignments and report status.
 // connect to this service to receive task assignments and report status.
@@ -136,7 +136,7 @@ message HeartbeatRequest {
 message HeartbeatResponse {
 message HeartbeatResponse {
 	// Period is the duration to wait before sending the next heartbeat.
 	// Period is the duration to wait before sending the next heartbeat.
 	// Well-behaved agents should update this on every heartbeat round trip.
 	// Well-behaved agents should update this on every heartbeat round trip.
-	Duration period = 1 [(gogoproto.nullable) = false];
+	google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
 }
 }
 
 
 message UpdateTaskStatusRequest {
 message UpdateTaskStatusRequest {

+ 0 - 3
vendor/github.com/docker/swarmkit/api/duration/gen.go

@@ -1,3 +0,0 @@
-//go:generate protoc -I.:../../vendor:../../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api/duration,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. duration.proto
-
-package duration

+ 1 - 1
vendor/github.com/docker/swarmkit/api/gen.go

@@ -1,3 +1,3 @@
 package api
 package api
 
 
-//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto logbroker.proto
+//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto logbroker.proto

+ 74 - 119
vendor/github.com/docker/swarmkit/api/health.pb.go

@@ -10,12 +10,6 @@ import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/docker/swarmkit/protobuf/plugin"
 import _ "github.com/docker/swarmkit/protobuf/plugin"
 
 
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
-import reflect "reflect"
-
 import (
 import (
 	context "golang.org/x/net/context"
 	context "golang.org/x/net/context"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
@@ -25,7 +19,10 @@ import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
 import transport "google.golang.org/grpc/transport"
-import time "time"
+import rafttime "time"
+
+import strings "strings"
+import reflect "reflect"
 
 
 import io "io"
 import io "io"
 
 
@@ -106,71 +103,30 @@ func (m *HealthCheckRequest) Copy() *HealthCheckRequest {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
+	o := &HealthCheckRequest{}
+	o.CopyFrom(m)
+	return o
+}
 
 
-	o := &HealthCheckRequest{
-		Service: m.Service,
-	}
+func (m *HealthCheckRequest) CopyFrom(src interface{}) {
 
 
-	return o
+	o := src.(*HealthCheckRequest)
+	*m = *o
 }
 }
 
 
 func (m *HealthCheckResponse) Copy() *HealthCheckResponse {
 func (m *HealthCheckResponse) Copy() *HealthCheckResponse {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
-
-	o := &HealthCheckResponse{
-		Status: m.Status,
-	}
-
+	o := &HealthCheckResponse{}
+	o.CopyFrom(m)
 	return o
 	return o
 }
 }
 
 
-func (this *HealthCheckRequest) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&api.HealthCheckRequest{")
-	s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *HealthCheckResponse) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&api.HealthCheckResponse{")
-	s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringHealth(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringHealth(m github_com_gogo_protobuf_proto.Message) string {
-	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
-	if e == nil {
-		return "nil"
-	}
-	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "})"
-	return s
+func (m *HealthCheckResponse) CopyFrom(src interface{}) {
+
+	o := src.(*HealthCheckResponse)
+	*m = *o
 }
 }
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
@@ -179,7 +135,7 @@ var _ grpc.ClientConn
 
 
 // This is a compile-time assertion to ensure that this generated file
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion3
+const _ = grpc.SupportPackageIsVersion4
 
 
 // Client API for Health service
 // Client API for Health service
 
 
@@ -242,81 +198,81 @@ var _Health_serviceDesc = grpc.ServiceDesc{
 		},
 		},
 	},
 	},
 	Streams:  []grpc.StreamDesc{},
 	Streams:  []grpc.StreamDesc{},
-	Metadata: fileDescriptorHealth,
+	Metadata: "health.proto",
 }
 }
 
 
-func (m *HealthCheckRequest) Marshal() (data []byte, err error) {
+func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *HealthCheckRequest) MarshalTo(data []byte) (int, error) {
+func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.Service) > 0 {
 	if len(m.Service) > 0 {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
 		i++
-		i = encodeVarintHealth(data, i, uint64(len(m.Service)))
-		i += copy(data[i:], m.Service)
+		i = encodeVarintHealth(dAtA, i, uint64(len(m.Service)))
+		i += copy(dAtA[i:], m.Service)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *HealthCheckResponse) Marshal() (data []byte, err error) {
+func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *HealthCheckResponse) MarshalTo(data []byte) (int, error) {
+func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if m.Status != 0 {
 	if m.Status != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
 		i++
-		i = encodeVarintHealth(data, i, uint64(m.Status))
+		i = encodeVarintHealth(dAtA, i, uint64(m.Status))
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Health(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Health(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Health(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Health(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintHealth(data []byte, offset int, v uint64) int {
+func encodeVarintHealth(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 
 
@@ -369,7 +325,7 @@ func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(c
 	return ctx, nil
 	return ctx, nil
 }
 }
 func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
 func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
-	ticker := time.NewTicker(500 * time.Millisecond)
+	ticker := rafttime.NewTicker(500 * rafttime.Millisecond)
 	defer ticker.Stop()
 	defer ticker.Stop()
 	for {
 	for {
 		select {
 		select {
@@ -487,8 +443,8 @@ func valueToStringHealth(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 	return fmt.Sprintf("*%v", pv)
 }
 }
-func (m *HealthCheckRequest) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -500,7 +456,7 @@ func (m *HealthCheckRequest) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -528,7 +484,7 @@ func (m *HealthCheckRequest) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -543,11 +499,11 @@ func (m *HealthCheckRequest) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.Service = string(data[iNdEx:postIndex])
+			m.Service = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipHealth(data[iNdEx:])
+			skippy, err := skipHealth(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -566,8 +522,8 @@ func (m *HealthCheckRequest) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *HealthCheckResponse) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -579,7 +535,7 @@ func (m *HealthCheckResponse) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -607,7 +563,7 @@ func (m *HealthCheckResponse) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift
 				m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -616,7 +572,7 @@ func (m *HealthCheckResponse) Unmarshal(data []byte) error {
 			}
 			}
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipHealth(data[iNdEx:])
+			skippy, err := skipHealth(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -635,8 +591,8 @@ func (m *HealthCheckResponse) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipHealth(data []byte) (n int, err error) {
-	l := len(data)
+func skipHealth(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -647,7 +603,7 @@ func skipHealth(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -665,7 +621,7 @@ func skipHealth(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -682,7 +638,7 @@ func skipHealth(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -705,7 +661,7 @@ func skipHealth(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -716,7 +672,7 @@ func skipHealth(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipHealth(data[start:])
+				next, err := skipHealth(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -743,7 +699,7 @@ var (
 func init() { proto.RegisterFile("health.proto", fileDescriptorHealth) }
 func init() { proto.RegisterFile("health.proto", fileDescriptorHealth) }
 
 
 var fileDescriptorHealth = []byte{
 var fileDescriptorHealth = []byte{
-	// 291 bytes of a gzipped FileDescriptorProto
+	// 287 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
 	0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
 	0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
 	0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf,
 	0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf,
@@ -759,8 +715,7 @@ var fileDescriptorHealth = []byte{
 	0xc4, 0xc3, 0x04, 0x98, 0x8c, 0x2a, 0xb9, 0xd8, 0x20, 0x16, 0x09, 0xe5, 0x73, 0xb1, 0x82, 0x2d,
 	0xc4, 0xc3, 0x04, 0x98, 0x8c, 0x2a, 0xb9, 0xd8, 0x20, 0x16, 0x09, 0xe5, 0x73, 0xb1, 0x82, 0x2d,
 	0x13, 0x52, 0x23, 0xe8, 0x1a, 0xb0, 0xbf, 0xa5, 0xd4, 0x89, 0x74, 0xb5, 0x92, 0xe8, 0xa9, 0x75,
 	0x13, 0x52, 0x23, 0xe8, 0x1a, 0xb0, 0xbf, 0xa5, 0xd4, 0x89, 0x74, 0xb5, 0x92, 0xe8, 0xa9, 0x75,
 	0xef, 0x66, 0x30, 0xf1, 0x73, 0xf1, 0x82, 0x15, 0xea, 0xe6, 0x26, 0xe6, 0x25, 0xa6, 0xa7, 0x16,
 	0xef, 0x66, 0x30, 0xf1, 0x73, 0xf1, 0x82, 0x15, 0xea, 0xe6, 0x26, 0xe6, 0x25, 0xa6, 0xa7, 0x16,
-	0x39, 0xc9, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d,
-	0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6,
-	0x24, 0x36, 0x70, 0x90, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x14, 0x7c, 0x23, 0xc1,
-	0x01, 0x00, 0x00,
+	0x39, 0x49, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13,
+	0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0x89, 0x0d, 0x1c, 0xdc,
+	0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0xcd, 0x52, 0xee, 0xbd, 0x01, 0x00, 0x00,
 }
 }

File diff suppressed because it is too large
+ 256 - 358
vendor/github.com/docker/swarmkit/api/logbroker.pb.go


+ 5 - 3
vendor/github.com/docker/swarmkit/api/logbroker.proto

@@ -3,7 +3,7 @@ syntax = "proto3";
 package docker.swarmkit.v1;
 package docker.swarmkit.v1;
 
 
 import "gogoproto/gogo.proto";
 import "gogoproto/gogo.proto";
-import "timestamp/timestamp.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "google/protobuf/timestamp.proto";
 import "plugin/plugin.proto";
 import "plugin/plugin.proto";
 
 
 // LogStream defines the stream from which the log message came.
 // LogStream defines the stream from which the log message came.
@@ -44,7 +44,8 @@ message LogSubscriptionOptions {
 
 
 	// Since indicates that only log messages produced after this timestamp
 	// Since indicates that only log messages produced after this timestamp
 	// should be sent.
 	// should be sent.
-	Timestamp since = 4;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp since = 4;
 }
 }
 
 
 // LogSelector will match logs from ANY of the defined parameters.
 // LogSelector will match logs from ANY of the defined parameters.
@@ -71,7 +72,8 @@ message LogMessage {
 	LogContext context = 1 [(gogoproto.nullable) = false];
 	LogContext context = 1 [(gogoproto.nullable) = false];
 
 
 	// Timestamp is the time at which the message was generated.
 	// Timestamp is the time at which the message was generated.
-	Timestamp timestamp = 2;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp timestamp = 2;
 
 
 	// Stream identifies the stream of the log message, stdout or stderr.
 	// Stream identifies the stream of the log message, stdout or stderr.
 	LogStream stream = 3;
 	LogStream stream = 3;

File diff suppressed because it is too large
+ 316 - 465
vendor/github.com/docker/swarmkit/api/objects.pb.go


+ 4 - 3
vendor/github.com/docker/swarmkit/api/objects.proto

@@ -4,7 +4,7 @@ package docker.swarmkit.v1;
 
 
 import "types.proto";
 import "types.proto";
 import "specs.proto";
 import "specs.proto";
-import "timestamp/timestamp.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "google/protobuf/timestamp.proto";
 import "gogoproto/gogo.proto";
 import "gogoproto/gogo.proto";
 
 
 // This file contains definitions for all first-class objects in the cluster
 // This file contains definitions for all first-class objects in the cluster
@@ -17,8 +17,9 @@ message Meta {
 	Version version = 1 [(gogoproto.nullable) = false];
 	Version version = 1 [(gogoproto.nullable) = false];
 
 
 	// Object timestamps.
 	// Object timestamps.
-	Timestamp created_at = 2;
-	Timestamp updated_at = 3;
+	// Note: can't use stdtime because these fields are nullable.
+	google.protobuf.Timestamp created_at = 2;
+	google.protobuf.Timestamp updated_at = 3;
 }
 }
 
 
 // Node provides the internal node state as seen by the cluster.
 // Node provides the internal node state as seen by the cluster.

File diff suppressed because it is too large
+ 281 - 442
vendor/github.com/docker/swarmkit/api/raft.pb.go


+ 119 - 177
vendor/github.com/docker/swarmkit/api/resource.pb.go

@@ -10,11 +10,7 @@ import math "math"
 import _ "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/docker/swarmkit/protobuf/plugin"
 import _ "github.com/docker/swarmkit/protobuf/plugin"
 
 
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
-import reflect "reflect"
+import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
 
 
 import (
 import (
 	context "golang.org/x/net/context"
 	context "golang.org/x/net/context"
@@ -25,7 +21,10 @@ import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
 import transport "google.golang.org/grpc/transport"
-import time "time"
+import rafttime "time"
+
+import strings "strings"
+import reflect "reflect"
 
 
 import io "io"
 import io "io"
 
 
@@ -105,117 +104,61 @@ func (m *AttachNetworkRequest) Copy() *AttachNetworkRequest {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
+	o := &AttachNetworkRequest{}
+	o.CopyFrom(m)
+	return o
+}
 
 
-	o := &AttachNetworkRequest{
-		Config:      m.Config.Copy(),
-		ContainerID: m.ContainerID,
-	}
+func (m *AttachNetworkRequest) CopyFrom(src interface{}) {
 
 
-	return o
+	o := src.(*AttachNetworkRequest)
+	*m = *o
+	if o.Config != nil {
+		m.Config = &NetworkAttachmentConfig{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config)
+	}
 }
 }
 
 
 func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse {
 func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
+	o := &AttachNetworkResponse{}
+	o.CopyFrom(m)
+	return o
+}
 
 
-	o := &AttachNetworkResponse{
-		AttachmentID: m.AttachmentID,
-	}
+func (m *AttachNetworkResponse) CopyFrom(src interface{}) {
 
 
-	return o
+	o := src.(*AttachNetworkResponse)
+	*m = *o
 }
 }
 
 
 func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest {
 func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
+	o := &DetachNetworkRequest{}
+	o.CopyFrom(m)
+	return o
+}
 
 
-	o := &DetachNetworkRequest{
-		AttachmentID: m.AttachmentID,
-	}
+func (m *DetachNetworkRequest) CopyFrom(src interface{}) {
 
 
-	return o
+	o := src.(*DetachNetworkRequest)
+	*m = *o
 }
 }
 
 
 func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse {
 func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
-
 	o := &DetachNetworkResponse{}
 	o := &DetachNetworkResponse{}
-
+	o.CopyFrom(m)
 	return o
 	return o
 }
 }
 
 
-func (this *AttachNetworkRequest) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&api.AttachNetworkRequest{")
-	if this.Config != nil {
-		s = append(s, "Config: "+fmt.Sprintf("%#v", this.Config)+",\n")
-	}
-	s = append(s, "ContainerID: "+fmt.Sprintf("%#v", this.ContainerID)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *AttachNetworkResponse) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&api.AttachNetworkResponse{")
-	s = append(s, "AttachmentID: "+fmt.Sprintf("%#v", this.AttachmentID)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *DetachNetworkRequest) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&api.DetachNetworkRequest{")
-	s = append(s, "AttachmentID: "+fmt.Sprintf("%#v", this.AttachmentID)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *DetachNetworkResponse) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 4)
-	s = append(s, "&api.DetachNetworkResponse{")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringResource(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringResource(m github_com_gogo_protobuf_proto.Message) string {
-	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
-	if e == nil {
-		return "nil"
-	}
-	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "})"
-	return s
-}
+func (m *DetachNetworkResponse) CopyFrom(src interface{}) {}
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ context.Context
@@ -223,7 +166,7 @@ var _ grpc.ClientConn
 
 
 // This is a compile-time assertion to ensure that this generated file
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion3
+const _ = grpc.SupportPackageIsVersion4
 
 
 // Client API for ResourceAllocator service
 // Client API for ResourceAllocator service
 
 
@@ -319,102 +262,102 @@ var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{
 		},
 		},
 	},
 	},
 	Streams:  []grpc.StreamDesc{},
 	Streams:  []grpc.StreamDesc{},
-	Metadata: fileDescriptorResource,
+	Metadata: "resource.proto",
 }
 }
 
 
-func (m *AttachNetworkRequest) Marshal() (data []byte, err error) {
+func (m *AttachNetworkRequest) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *AttachNetworkRequest) MarshalTo(data []byte) (int, error) {
+func (m *AttachNetworkRequest) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if m.Config != nil {
 	if m.Config != nil {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
 		i++
-		i = encodeVarintResource(data, i, uint64(m.Config.Size()))
-		n1, err := m.Config.MarshalTo(data[i:])
+		i = encodeVarintResource(dAtA, i, uint64(m.Config.Size()))
+		n1, err := m.Config.MarshalTo(dAtA[i:])
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
 		i += n1
 		i += n1
 	}
 	}
 	if len(m.ContainerID) > 0 {
 	if len(m.ContainerID) > 0 {
-		data[i] = 0x12
+		dAtA[i] = 0x12
 		i++
 		i++
-		i = encodeVarintResource(data, i, uint64(len(m.ContainerID)))
-		i += copy(data[i:], m.ContainerID)
+		i = encodeVarintResource(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *AttachNetworkResponse) Marshal() (data []byte, err error) {
+func (m *AttachNetworkResponse) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *AttachNetworkResponse) MarshalTo(data []byte) (int, error) {
+func (m *AttachNetworkResponse) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.AttachmentID) > 0 {
 	if len(m.AttachmentID) > 0 {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
 		i++
-		i = encodeVarintResource(data, i, uint64(len(m.AttachmentID)))
-		i += copy(data[i:], m.AttachmentID)
+		i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID)))
+		i += copy(dAtA[i:], m.AttachmentID)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *DetachNetworkRequest) Marshal() (data []byte, err error) {
+func (m *DetachNetworkRequest) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *DetachNetworkRequest) MarshalTo(data []byte) (int, error) {
+func (m *DetachNetworkRequest) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.AttachmentID) > 0 {
 	if len(m.AttachmentID) > 0 {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
 		i++
-		i = encodeVarintResource(data, i, uint64(len(m.AttachmentID)))
-		i += copy(data[i:], m.AttachmentID)
+		i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID)))
+		i += copy(dAtA[i:], m.AttachmentID)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *DetachNetworkResponse) Marshal() (data []byte, err error) {
+func (m *DetachNetworkResponse) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *DetachNetworkResponse) MarshalTo(data []byte) (int, error) {
+func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
@@ -422,31 +365,31 @@ func (m *DetachNetworkResponse) MarshalTo(data []byte) (int, error) {
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Resource(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Resource(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Resource(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Resource(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintResource(data []byte, offset int, v uint64) int {
+func encodeVarintResource(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 
 
@@ -499,7 +442,7 @@ func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context, ctxMo
 	return ctx, nil
 	return ctx, nil
 }
 }
 func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
 func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
-	ticker := time.NewTicker(500 * time.Millisecond)
+	ticker := rafttime.NewTicker(500 * rafttime.Millisecond)
 	defer ticker.Stop()
 	defer ticker.Stop()
 	for {
 	for {
 		select {
 		select {
@@ -693,8 +636,8 @@ func valueToStringResource(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 	return fmt.Sprintf("*%v", pv)
 }
 }
-func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *AttachNetworkRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -706,7 +649,7 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -734,7 +677,7 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -751,7 +694,7 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 			if m.Config == nil {
 			if m.Config == nil {
 				m.Config = &NetworkAttachmentConfig{}
 				m.Config = &NetworkAttachmentConfig{}
 			}
 			}
-			if err := m.Config.Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -767,7 +710,7 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -782,11 +725,11 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.ContainerID = string(data[iNdEx:postIndex])
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipResource(data[iNdEx:])
+			skippy, err := skipResource(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -805,8 +748,8 @@ func (m *AttachNetworkRequest) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *AttachNetworkResponse) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *AttachNetworkResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -818,7 +761,7 @@ func (m *AttachNetworkResponse) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -846,7 +789,7 @@ func (m *AttachNetworkResponse) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -861,11 +804,11 @@ func (m *AttachNetworkResponse) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.AttachmentID = string(data[iNdEx:postIndex])
+			m.AttachmentID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipResource(data[iNdEx:])
+			skippy, err := skipResource(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -884,8 +827,8 @@ func (m *AttachNetworkResponse) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *DetachNetworkRequest) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *DetachNetworkRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -897,7 +840,7 @@ func (m *DetachNetworkRequest) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -925,7 +868,7 @@ func (m *DetachNetworkRequest) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -940,11 +883,11 @@ func (m *DetachNetworkRequest) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.AttachmentID = string(data[iNdEx:postIndex])
+			m.AttachmentID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipResource(data[iNdEx:])
+			skippy, err := skipResource(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -963,8 +906,8 @@ func (m *DetachNetworkRequest) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *DetachNetworkResponse) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *DetachNetworkResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -976,7 +919,7 @@ func (m *DetachNetworkResponse) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -994,7 +937,7 @@ func (m *DetachNetworkResponse) Unmarshal(data []byte) error {
 		switch fieldNum {
 		switch fieldNum {
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipResource(data[iNdEx:])
+			skippy, err := skipResource(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -1013,8 +956,8 @@ func (m *DetachNetworkResponse) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipResource(data []byte) (n int, err error) {
-	l := len(data)
+func skipResource(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -1025,7 +968,7 @@ func skipResource(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -1043,7 +986,7 @@ func skipResource(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -1060,7 +1003,7 @@ func skipResource(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -1083,7 +1026,7 @@ func skipResource(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -1094,7 +1037,7 @@ func skipResource(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipResource(data[start:])
+				next, err := skipResource(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -1121,7 +1064,7 @@ var (
 func init() { proto.RegisterFile("resource.proto", fileDescriptorResource) }
 func init() { proto.RegisterFile("resource.proto", fileDescriptorResource) }
 
 
 var fileDescriptorResource = []byte{
 var fileDescriptorResource = []byte{
-	// 373 bytes of a gzipped FileDescriptorProto
+	// 368 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce,
 	0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce,
 	0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce,
 	0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2,
 	0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2,
@@ -1142,8 +1085,7 @@ var fileDescriptorResource = []byte{
 	0x93, 0x08, 0x95, 0x10, 0xcb, 0x95, 0x94, 0x4f, 0xad, 0x7b, 0x37, 0x83, 0x49, 0x96, 0x8b, 0x07,
 	0x93, 0x08, 0x95, 0x10, 0xcb, 0x95, 0x94, 0x4f, 0xad, 0x7b, 0x37, 0x83, 0x49, 0x96, 0x8b, 0x07,
 	0xac, 0x54, 0x17, 0x24, 0x97, 0x5a, 0xc4, 0xc5, 0x0b, 0xe1, 0xe5, 0x26, 0xe6, 0x25, 0xa6, 0xa7,
 	0xac, 0x54, 0x17, 0x24, 0x97, 0x5a, 0xc4, 0xc5, 0x0b, 0xe1, 0xe5, 0x26, 0xe6, 0x25, 0xa6, 0xa7,
 	0x42, 0xdc, 0x82, 0xe2, 0x76, 0xec, 0x6e, 0xc1, 0x16, 0x5a, 0xd8, 0xdd, 0x82, 0x35, 0x20, 0x88,
 	0x42, 0xdc, 0x82, 0xe2, 0x76, 0xec, 0x6e, 0xc1, 0x16, 0x5a, 0xd8, 0xdd, 0x82, 0x35, 0x20, 0x88,
-	0x72, 0x8b, 0x93, 0xcc, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7,
-	0xd8, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92,
-	0x63, 0x4c, 0x62, 0x03, 0x27, 0x4e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xef, 0x94, 0x58,
-	0xde, 0xfa, 0x02, 0x00, 0x00,
+	0x72, 0x8b, 0x93, 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63,
+	0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x93, 0xd8, 0xc0,
+	0x09, 0xd3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x48, 0x12, 0x41, 0xf6, 0x02, 0x00, 0x00,
 }
 }

+ 264 - 269
vendor/github.com/docker/swarmkit/api/snapshot.pb.go

@@ -10,10 +10,9 @@ import math "math"
 
 
 // skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 // skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 
+import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
+
 import strings "strings"
 import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
 import reflect "reflect"
 import reflect "reflect"
 
 
 import io "io"
 import io "io"
@@ -59,7 +58,7 @@ func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapsh
 // ClusterSnapshot stores cluster membership information in snapshots.
 // ClusterSnapshot stores cluster membership information in snapshots.
 type ClusterSnapshot struct {
 type ClusterSnapshot struct {
 	Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"`
 	Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"`
-	Removed []uint64      `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"`
+	Removed []uint64      `protobuf:"varint,2,rep,packed,name=removed" json:"removed,omitempty"`
 }
 }
 
 
 func (m *ClusterSnapshot) Reset()                    { *m = ClusterSnapshot{} }
 func (m *ClusterSnapshot) Reset()                    { *m = ClusterSnapshot{} }
@@ -87,189 +86,131 @@ func (m *StoreSnapshot) Copy() *StoreSnapshot {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
-
 	o := &StoreSnapshot{}
 	o := &StoreSnapshot{}
+	o.CopyFrom(m)
+	return o
+}
+
+func (m *StoreSnapshot) CopyFrom(src interface{}) {
 
 
-	if m.Nodes != nil {
-		o.Nodes = make([]*Node, 0, len(m.Nodes))
-		for _, v := range m.Nodes {
-			o.Nodes = append(o.Nodes, v.Copy())
+	o := src.(*StoreSnapshot)
+	*m = *o
+	if o.Nodes != nil {
+		m.Nodes = make([]*Node, len(o.Nodes))
+		for i := range m.Nodes {
+			m.Nodes[i] = &Node{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Services != nil {
-		o.Services = make([]*Service, 0, len(m.Services))
-		for _, v := range m.Services {
-			o.Services = append(o.Services, v.Copy())
+	if o.Services != nil {
+		m.Services = make([]*Service, len(o.Services))
+		for i := range m.Services {
+			m.Services[i] = &Service{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Networks != nil {
-		o.Networks = make([]*Network, 0, len(m.Networks))
-		for _, v := range m.Networks {
-			o.Networks = append(o.Networks, v.Copy())
+	if o.Networks != nil {
+		m.Networks = make([]*Network, len(o.Networks))
+		for i := range m.Networks {
+			m.Networks[i] = &Network{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Tasks != nil {
-		o.Tasks = make([]*Task, 0, len(m.Tasks))
-		for _, v := range m.Tasks {
-			o.Tasks = append(o.Tasks, v.Copy())
+	if o.Tasks != nil {
+		m.Tasks = make([]*Task, len(o.Tasks))
+		for i := range m.Tasks {
+			m.Tasks[i] = &Task{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Clusters != nil {
-		o.Clusters = make([]*Cluster, 0, len(m.Clusters))
-		for _, v := range m.Clusters {
-			o.Clusters = append(o.Clusters, v.Copy())
+	if o.Clusters != nil {
+		m.Clusters = make([]*Cluster, len(o.Clusters))
+		for i := range m.Clusters {
+			m.Clusters[i] = &Cluster{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Secrets != nil {
-		o.Secrets = make([]*Secret, 0, len(m.Secrets))
-		for _, v := range m.Secrets {
-			o.Secrets = append(o.Secrets, v.Copy())
+	if o.Secrets != nil {
+		m.Secrets = make([]*Secret, len(o.Secrets))
+		for i := range m.Secrets {
+			m.Secrets[i] = &Secret{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i])
 		}
 		}
 	}
 	}
 
 
-	return o
 }
 }
 
 
 func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
 func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
-
 	o := &ClusterSnapshot{}
 	o := &ClusterSnapshot{}
+	o.CopyFrom(m)
+	return o
+}
 
 
-	if m.Members != nil {
-		o.Members = make([]*RaftMember, 0, len(m.Members))
-		for _, v := range m.Members {
-			o.Members = append(o.Members, v.Copy())
+func (m *ClusterSnapshot) CopyFrom(src interface{}) {
+
+	o := src.(*ClusterSnapshot)
+	*m = *o
+	if o.Members != nil {
+		m.Members = make([]*RaftMember, len(o.Members))
+		for i := range m.Members {
+			m.Members[i] = &RaftMember{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Members[i], o.Members[i])
 		}
 		}
 	}
 	}
 
 
-	if m.Removed != nil {
-		o.Removed = make([]uint64, 0, len(m.Removed))
-		o.Removed = append(o.Removed, m.Removed...)
+	if o.Removed != nil {
+		m.Removed = make([]uint64, len(o.Removed))
+		copy(m.Removed, o.Removed)
 	}
 	}
 
 
-	return o
 }
 }
 
 
 func (m *Snapshot) Copy() *Snapshot {
 func (m *Snapshot) Copy() *Snapshot {
 	if m == nil {
 	if m == nil {
 		return nil
 		return nil
 	}
 	}
-
-	o := &Snapshot{
-		Version:    m.Version,
-		Membership: *m.Membership.Copy(),
-		Store:      *m.Store.Copy(),
-	}
-
+	o := &Snapshot{}
+	o.CopyFrom(m)
 	return o
 	return o
 }
 }
 
 
-func (this *StoreSnapshot) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 10)
-	s = append(s, "&api.StoreSnapshot{")
-	if this.Nodes != nil {
-		s = append(s, "Nodes: "+fmt.Sprintf("%#v", this.Nodes)+",\n")
-	}
-	if this.Services != nil {
-		s = append(s, "Services: "+fmt.Sprintf("%#v", this.Services)+",\n")
-	}
-	if this.Networks != nil {
-		s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n")
-	}
-	if this.Tasks != nil {
-		s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n")
-	}
-	if this.Clusters != nil {
-		s = append(s, "Clusters: "+fmt.Sprintf("%#v", this.Clusters)+",\n")
-	}
-	if this.Secrets != nil {
-		s = append(s, "Secrets: "+fmt.Sprintf("%#v", this.Secrets)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *ClusterSnapshot) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&api.ClusterSnapshot{")
-	if this.Members != nil {
-		s = append(s, "Members: "+fmt.Sprintf("%#v", this.Members)+",\n")
-	}
-	s = append(s, "Removed: "+fmt.Sprintf("%#v", this.Removed)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Snapshot) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&api.Snapshot{")
-	s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n")
-	s = append(s, "Membership: "+strings.Replace(this.Membership.GoString(), `&`, ``, 1)+",\n")
-	s = append(s, "Store: "+strings.Replace(this.Store.GoString(), `&`, ``, 1)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringSnapshot(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringSnapshot(m github_com_gogo_protobuf_proto.Message) string {
-	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
-	if e == nil {
-		return "nil"
-	}
-	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "})"
-	return s
+func (m *Snapshot) CopyFrom(src interface{}) {
+
+	o := src.(*Snapshot)
+	*m = *o
+	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Membership, &o.Membership)
+	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Store, &o.Store)
 }
 }
-func (m *StoreSnapshot) Marshal() (data []byte, err error) {
+
+func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
+func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.Nodes) > 0 {
 	if len(m.Nodes) > 0 {
 		for _, msg := range m.Nodes {
 		for _, msg := range m.Nodes {
-			data[i] = 0xa
+			dAtA[i] = 0xa
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -278,10 +219,10 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	}
 	}
 	if len(m.Services) > 0 {
 	if len(m.Services) > 0 {
 		for _, msg := range m.Services {
 		for _, msg := range m.Services {
-			data[i] = 0x12
+			dAtA[i] = 0x12
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -290,10 +231,10 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	}
 	}
 	if len(m.Networks) > 0 {
 	if len(m.Networks) > 0 {
 		for _, msg := range m.Networks {
 		for _, msg := range m.Networks {
-			data[i] = 0x1a
+			dAtA[i] = 0x1a
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -302,10 +243,10 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	}
 	}
 	if len(m.Tasks) > 0 {
 	if len(m.Tasks) > 0 {
 		for _, msg := range m.Tasks {
 		for _, msg := range m.Tasks {
-			data[i] = 0x22
+			dAtA[i] = 0x22
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -314,10 +255,10 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	}
 	}
 	if len(m.Clusters) > 0 {
 	if len(m.Clusters) > 0 {
 		for _, msg := range m.Clusters {
 		for _, msg := range m.Clusters {
-			data[i] = 0x2a
+			dAtA[i] = 0x2a
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -326,10 +267,10 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	}
 	}
 	if len(m.Secrets) > 0 {
 	if len(m.Secrets) > 0 {
 		for _, msg := range m.Secrets {
 		for _, msg := range m.Secrets {
-			data[i] = 0x32
+			dAtA[i] = 0x32
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -339,27 +280,27 @@ func (m *StoreSnapshot) MarshalTo(data []byte) (int, error) {
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *ClusterSnapshot) Marshal() (data []byte, err error) {
+func (m *ClusterSnapshot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *ClusterSnapshot) MarshalTo(data []byte) (int, error) {
+func (m *ClusterSnapshot) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.Members) > 0 {
 	if len(m.Members) > 0 {
 		for _, msg := range m.Members {
 		for _, msg := range m.Members {
-			data[i] = 0xa
+			dAtA[i] = 0xa
 			i++
 			i++
-			i = encodeVarintSnapshot(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 			if err != nil {
 				return 0, err
 				return 0, err
 			}
 			}
@@ -367,79 +308,89 @@ func (m *ClusterSnapshot) MarshalTo(data []byte) (int, error) {
 		}
 		}
 	}
 	}
 	if len(m.Removed) > 0 {
 	if len(m.Removed) > 0 {
+		dAtA2 := make([]byte, len(m.Removed)*10)
+		var j1 int
 		for _, num := range m.Removed {
 		for _, num := range m.Removed {
-			data[i] = 0x10
-			i++
-			i = encodeVarintSnapshot(data, i, uint64(num))
+			for num >= 1<<7 {
+				dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j1++
+			}
+			dAtA2[j1] = uint8(num)
+			j1++
 		}
 		}
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(j1))
+		i += copy(dAtA[i:], dAtA2[:j1])
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func (m *Snapshot) Marshal() (data []byte, err error) {
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if m.Version != 0 {
 	if m.Version != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
 		i++
-		i = encodeVarintSnapshot(data, i, uint64(m.Version))
+		i = encodeVarintSnapshot(dAtA, i, uint64(m.Version))
 	}
 	}
-	data[i] = 0x12
+	dAtA[i] = 0x12
 	i++
 	i++
-	i = encodeVarintSnapshot(data, i, uint64(m.Membership.Size()))
-	n1, err := m.Membership.MarshalTo(data[i:])
+	i = encodeVarintSnapshot(dAtA, i, uint64(m.Membership.Size()))
+	n3, err := m.Membership.MarshalTo(dAtA[i:])
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	i += n1
-	data[i] = 0x1a
+	i += n3
+	dAtA[i] = 0x1a
 	i++
 	i++
-	i = encodeVarintSnapshot(data, i, uint64(m.Store.Size()))
-	n2, err := m.Store.MarshalTo(data[i:])
+	i = encodeVarintSnapshot(dAtA, i, uint64(m.Store.Size()))
+	n4, err := m.Store.MarshalTo(dAtA[i:])
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
-	i += n2
+	i += n4
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Snapshot(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Snapshot(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintSnapshot(data []byte, offset int, v uint64) int {
+func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 
 
@@ -495,9 +446,11 @@ func (m *ClusterSnapshot) Size() (n int) {
 		}
 		}
 	}
 	}
 	if len(m.Removed) > 0 {
 	if len(m.Removed) > 0 {
+		l = 0
 		for _, e := range m.Removed {
 		for _, e := range m.Removed {
-			n += 1 + sovSnapshot(uint64(e))
+			l += sovSnapshot(uint64(e))
 		}
 		}
+		n += 1 + sovSnapshot(uint64(l)) + l
 	}
 	}
 	return n
 	return n
 }
 }
@@ -574,8 +527,8 @@ func valueToStringSnapshot(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 	return fmt.Sprintf("*%v", pv)
 }
 }
-func (m *StoreSnapshot) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *StoreSnapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -587,7 +540,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -615,7 +568,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -630,7 +583,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Nodes = append(m.Nodes, &Node{})
 			m.Nodes = append(m.Nodes, &Node{})
-			if err := m.Nodes[len(m.Nodes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -646,7 +599,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -661,7 +614,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Services = append(m.Services, &Service{})
 			m.Services = append(m.Services, &Service{})
-			if err := m.Services[len(m.Services)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -677,7 +630,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -692,7 +645,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Networks = append(m.Networks, &Network{})
 			m.Networks = append(m.Networks, &Network{})
-			if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -708,7 +661,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -723,7 +676,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Tasks = append(m.Tasks, &Task{})
 			m.Tasks = append(m.Tasks, &Task{})
-			if err := m.Tasks[len(m.Tasks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -739,7 +692,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -754,7 +707,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Clusters = append(m.Clusters, &Cluster{})
 			m.Clusters = append(m.Clusters, &Cluster{})
-			if err := m.Clusters[len(m.Clusters)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -770,7 +723,7 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -785,13 +738,13 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Secrets = append(m.Secrets, &Secret{})
 			m.Secrets = append(m.Secrets, &Secret{})
-			if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipSnapshot(data[iNdEx:])
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -810,8 +763,8 @@ func (m *StoreSnapshot) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *ClusterSnapshot) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *ClusterSnapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -823,7 +776,7 @@ func (m *ClusterSnapshot) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -851,7 +804,7 @@ func (m *ClusterSnapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -866,33 +819,75 @@ func (m *ClusterSnapshot) Unmarshal(data []byte) error {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
 			m.Members = append(m.Members, &RaftMember{})
 			m.Members = append(m.Members, &RaftMember{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
 		case 2:
 		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType)
-			}
-			var v uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshot
+			if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshot
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
 				}
 				}
-				if iNdEx >= l {
+				if packedLen < 0 {
+					return ErrInvalidLengthSnapshot
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex > l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
-				iNdEx++
-				v |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshot
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Removed = append(m.Removed, v)
+				}
+			} else if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshot
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
 				}
 				}
+				m.Removed = append(m.Removed, v)
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType)
 			}
 			}
-			m.Removed = append(m.Removed, v)
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipSnapshot(data[iNdEx:])
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -911,8 +906,8 @@ func (m *ClusterSnapshot) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func (m *Snapshot) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -924,7 +919,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -952,7 +947,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				m.Version |= (Snapshot_Version(b) & 0x7F) << shift
 				m.Version |= (Snapshot_Version(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -971,7 +966,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -985,7 +980,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			if err := m.Membership.Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Membership.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
@@ -1001,7 +996,7 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -1015,13 +1010,13 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			if err := m.Store.Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 				return err
 			}
 			}
 			iNdEx = postIndex
 			iNdEx = postIndex
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipSnapshot(data[iNdEx:])
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -1040,8 +1035,8 @@ func (m *Snapshot) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipSnapshot(data []byte) (n int, err error) {
-	l := len(data)
+func skipSnapshot(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -1052,7 +1047,7 @@ func skipSnapshot(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -1070,7 +1065,7 @@ func skipSnapshot(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -1087,7 +1082,7 @@ func skipSnapshot(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -1110,7 +1105,7 @@ func skipSnapshot(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -1121,7 +1116,7 @@ func skipSnapshot(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipSnapshot(data[start:])
+				next, err := skipSnapshot(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -1148,31 +1143,31 @@ var (
 func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
 func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
 
 
 var fileDescriptorSnapshot = []byte{
 var fileDescriptorSnapshot = []byte{
-	// 409 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x6f, 0xd3, 0x40,
-	0x14, 0xc7, 0x73, 0xce, 0x0f, 0x57, 0xaf, 0x6a, 0x81, 0x13, 0xc3, 0x29, 0xa0, 0x23, 0x04, 0x86,
-	0x4c, 0x06, 0x02, 0x12, 0x2c, 0x30, 0x94, 0x89, 0x81, 0x0e, 0x17, 0x54, 0xb1, 0x3a, 0xf6, 0x6b,
-	0x6b, 0x8c, 0x7d, 0xd1, 0xbd, 0xc3, 0x5d, 0xf9, 0xf3, 0x32, 0x76, 0x64, 0x42, 0xc4, 0x0b, 0x2b,
-	0x7f, 0x02, 0xb2, 0xcf, 0xb6, 0x22, 0xe1, 0xb0, 0x3d, 0x5b, 0x9f, 0xef, 0x8f, 0x7b, 0x7a, 0x70,
-	0x4a, 0x79, 0xb8, 0xa1, 0x6b, 0x6d, 0x83, 0x8d, 0xd1, 0x56, 0x73, 0x1e, 0xeb, 0x28, 0x45, 0x13,
-	0xd0, 0x4d, 0x68, 0xb2, 0x34, 0xb1, 0x41, 0xf1, 0x62, 0x7a, 0xa2, 0xd7, 0x5f, 0x30, 0xb2, 0xe4,
-	0x90, 0x29, 0x98, 0xf0, 0xb2, 0xc1, 0xa7, 0xf7, 0xaf, 0xf4, 0x95, 0xae, 0xc7, 0x67, 0xd5, 0xe4,
-	0xfe, 0xce, 0x6f, 0x3d, 0x38, 0x59, 0x59, 0x6d, 0x70, 0xd5, 0x98, 0xf3, 0x00, 0xc6, 0xb9, 0x8e,
-	0x91, 0x04, 0x9b, 0x0d, 0x17, 0xc7, 0x4b, 0x11, 0xfc, 0x1b, 0x13, 0x9c, 0xeb, 0x18, 0x95, 0xc3,
-	0xf8, 0x6b, 0x38, 0x22, 0x34, 0x45, 0x12, 0x21, 0x09, 0xaf, 0x96, 0x3c, 0xe8, 0x93, 0xac, 0x1c,
-	0xa3, 0x3a, 0xb8, 0x12, 0xe6, 0x68, 0x6f, 0xb4, 0x49, 0x49, 0x0c, 0x0f, 0x0b, 0xcf, 0x1d, 0xa3,
-	0x3a, 0xb8, 0x6a, 0x68, 0x43, 0x4a, 0x49, 0x8c, 0x0e, 0x37, 0xfc, 0x14, 0x52, 0xaa, 0x1c, 0x56,
-	0x05, 0x45, 0x5f, 0xbf, 0x91, 0x45, 0x43, 0x62, 0x7c, 0x38, 0xe8, 0xbd, 0x63, 0x54, 0x07, 0xf3,
-	0x57, 0xe0, 0x13, 0x46, 0x06, 0x2d, 0x89, 0x49, 0xad, 0x9b, 0xf6, 0xbf, 0xac, 0x42, 0x54, 0x8b,
-	0xce, 0x11, 0xee, 0x34, 0x56, 0xdd, 0x4e, 0xdf, 0x80, 0x9f, 0x61, 0xb6, 0xae, 0x0a, 0xb8, 0xad,
-	0xca, 0x3e, 0x23, 0x15, 0x5e, 0xda, 0x8f, 0x35, 0xa6, 0x5a, 0x9c, 0x0b, 0xf0, 0x0d, 0x66, 0xba,
-	0xc0, 0xb8, 0x5e, 0xee, 0x48, 0xb5, 0x9f, 0xf3, 0xdf, 0x0c, 0x8e, 0xba, 0x80, 0x77, 0xe0, 0x17,
-	0x68, 0x28, 0xd1, 0xb9, 0x60, 0x33, 0xb6, 0x38, 0x5d, 0x3e, 0xed, 0x6d, 0xda, 0x1e, 0xd0, 0x85,
-	0x63, 0x55, 0x2b, 0xe2, 0x1f, 0x00, 0x9a, 0xc4, 0xeb, 0x64, 0x23, 0xbc, 0x19, 0x5b, 0x1c, 0x2f,
-	0x9f, 0xfc, 0x67, 0x49, 0xad, 0xd3, 0xd9, 0x68, 0xfb, 0xf3, 0xd1, 0x40, 0xed, 0x89, 0xf9, 0x5b,
-	0x18, 0x53, 0x75, 0x50, 0x62, 0x58, 0xbb, 0x3c, 0xee, 0x2d, 0xb2, 0x7f, 0x71, 0x8d, 0x87, 0x53,
-	0xcd, 0xef, 0x81, 0xdf, 0xb4, 0xe3, 0x13, 0xf0, 0x2e, 0x9e, 0xdf, 0x1d, 0x9c, 0x3d, 0xdc, 0xee,
-	0xe4, 0xe0, 0xc7, 0x4e, 0x0e, 0xfe, 0xec, 0x24, 0xfb, 0x5e, 0x4a, 0xb6, 0x2d, 0x25, 0xbb, 0x2d,
-	0x25, 0xfb, 0x55, 0x4a, 0xf6, 0xd9, 0x5b, 0x4f, 0xea, 0x53, 0x7e, 0xf9, 0x37, 0x00, 0x00, 0xff,
-	0xff, 0x48, 0xfb, 0x27, 0x26, 0x21, 0x03, 0x00, 0x00,
+	// 404 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbd, 0x6e, 0xd4, 0x40,
+	0x10, 0xc7, 0xbd, 0xbe, 0x0f, 0x47, 0x13, 0x25, 0xc0, 0x8a, 0x62, 0x65, 0x24, 0x73, 0x18, 0x8a,
+	0xab, 0x0c, 0x1c, 0x48, 0xd0, 0x40, 0x11, 0x2a, 0x0a, 0x52, 0xec, 0xa1, 0x88, 0xd6, 0x67, 0x4f,
+	0x12, 0x63, 0xec, 0x3d, 0xed, 0x2c, 0x4e, 0xcb, 0xe3, 0x5d, 0x99, 0x92, 0x0a, 0x11, 0x37, 0xbc,
+	0x06, 0xb2, 0xd7, 0xb6, 0x4e, 0xc2, 0x47, 0x37, 0xb6, 0x7e, 0xff, 0x8f, 0x1d, 0x0d, 0x9c, 0x52,
+	0x19, 0x6f, 0xe9, 0x5a, 0x99, 0x68, 0xab, 0x95, 0x51, 0x9c, 0xa7, 0x2a, 0xc9, 0x51, 0x47, 0x74,
+	0x13, 0xeb, 0x22, 0xcf, 0x4c, 0x54, 0xbd, 0xf4, 0x4f, 0xd4, 0xe6, 0x2b, 0x26, 0x86, 0x2c, 0xe2,
+	0x83, 0x8e, 0x2f, 0x3b, 0xdc, 0x7f, 0x78, 0xa5, 0xae, 0x54, 0x3b, 0x3e, 0x6f, 0x26, 0xfb, 0x37,
+	0xbc, 0x75, 0xe1, 0x64, 0x6d, 0x94, 0xc6, 0x75, 0x67, 0xce, 0x23, 0x98, 0x95, 0x2a, 0x45, 0x12,
+	0x6c, 0x31, 0x59, 0x1e, 0xaf, 0x44, 0xf4, 0x6f, 0x4c, 0x74, 0xae, 0x52, 0x94, 0x16, 0xe3, 0x6f,
+	0xe0, 0x88, 0x50, 0x57, 0x59, 0x82, 0x24, 0xdc, 0x56, 0xf2, 0x68, 0x4c, 0xb2, 0xb6, 0x8c, 0x1c,
+	0xe0, 0x46, 0x58, 0xa2, 0xb9, 0x51, 0x3a, 0x27, 0x31, 0x39, 0x2c, 0x3c, 0xb7, 0x8c, 0x1c, 0xe0,
+	0xa6, 0xa1, 0x89, 0x29, 0x27, 0x31, 0x3d, 0xdc, 0xf0, 0x73, 0x4c, 0xb9, 0xb4, 0x58, 0x13, 0x94,
+	0x7c, 0xfb, 0x4e, 0x06, 0x35, 0x89, 0xd9, 0xe1, 0xa0, 0x0f, 0x96, 0x91, 0x03, 0xcc, 0x5f, 0x83,
+	0x47, 0x98, 0x68, 0x34, 0x24, 0xe6, 0xad, 0xce, 0x1f, 0x7f, 0x59, 0x83, 0xc8, 0x1e, 0x0d, 0x11,
+	0xee, 0x75, 0x56, 0xc3, 0x4e, 0xdf, 0x82, 0x57, 0x60, 0xb1, 0x69, 0x0a, 0xd8, 0xad, 0x06, 0x63,
+	0x46, 0x32, 0xbe, 0x34, 0x9f, 0x5a, 0x4c, 0xf6, 0x38, 0x17, 0xe0, 0x69, 0x2c, 0x54, 0x85, 0x69,
+	0xbb, 0xdc, 0xa9, 0xec, 0x3f, 0xc3, 0x3f, 0x0c, 0x8e, 0x86, 0x80, 0xf7, 0xe0, 0x55, 0xa8, 0x29,
+	0x53, 0xa5, 0x60, 0x0b, 0xb6, 0x3c, 0x5d, 0x3d, 0x1b, 0x6d, 0xda, 0x1f, 0xd0, 0x85, 0x65, 0x65,
+	0x2f, 0xe2, 0x1f, 0x01, 0xba, 0xc4, 0xeb, 0x6c, 0x2b, 0xdc, 0x05, 0x5b, 0x1e, 0xaf, 0x9e, 0xfe,
+	0x67, 0x49, 0xbd, 0xd3, 0xd9, 0x74, 0xf7, 0xeb, 0xb1, 0x23, 0xf7, 0xc4, 0xfc, 0x1d, 0xcc, 0xa8,
+	0x39, 0x28, 0x31, 0x69, 0x5d, 0x9e, 0x8c, 0x16, 0xd9, 0xbf, 0xb8, 0xce, 0xc3, 0xaa, 0xc2, 0x07,
+	0xe0, 0x75, 0xed, 0xf8, 0x1c, 0xdc, 0x8b, 0x17, 0xf7, 0x9d, 0x33, 0xb1, 0xbb, 0x0b, 0x9c, 0x9f,
+	0x77, 0x81, 0xf3, 0xa3, 0x0e, 0xd8, 0xae, 0x0e, 0xd8, 0x6d, 0x1d, 0xb0, 0xdf, 0x75, 0xc0, 0xbe,
+	0xb8, 0x9b, 0x79, 0x7b, 0xc6, 0xaf, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x5b, 0x6d, 0xab,
+	0x1d, 0x03, 0x00, 0x00,
 }
 }

File diff suppressed because it is too large
+ 275 - 484
vendor/github.com/docker/swarmkit/api/specs.pb.go


+ 3 - 2
vendor/github.com/docker/swarmkit/api/specs.proto

@@ -4,7 +4,7 @@ package docker.swarmkit.v1;
 
 
 import "types.proto";
 import "types.proto";
 import "gogoproto/gogo.proto";
 import "gogoproto/gogo.proto";
-import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "google/protobuf/duration.proto";
 
 
 // Specs are container objects for user provided input. All creations and
 // Specs are container objects for user provided input. All creations and
 // updates are done through spec types. As a convention, user input from a spec
 // updates are done through spec types. As a convention, user input from a spec
@@ -198,7 +198,8 @@ message ContainerSpec {
 
 
 	// StopGracePeriod the grace period for stopping the container before
 	// StopGracePeriod the grace period for stopping the container before
 	// forcefully killing the container.
 	// forcefully killing the container.
-	Duration stop_grace_period = 9;
+	// Note: Can't use stdduration here because this needs to be nullable.
+	google.protobuf.Duration stop_grace_period = 9;
 
 
 	// PullOptions allows one to parameterize an image pull.
 	// PullOptions allows one to parameterize an image pull.
 	message PullOptions {
 	message PullOptions {

+ 0 - 3
vendor/github.com/docker/swarmkit/api/timestamp/gen.go

@@ -1,3 +0,0 @@
-//go:generate protoc -I.:../../vendor:../../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api/timestamp,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. timestamp.proto
-
-package timestamp

File diff suppressed because it is too large
+ 602 - 1164
vendor/github.com/docker/swarmkit/api/types.pb.go


+ 25 - 14
vendor/github.com/docker/swarmkit/api/types.proto

@@ -2,8 +2,8 @@ syntax = "proto3";
 
 
 package docker.swarmkit.v1;
 package docker.swarmkit.v1;
 
 
-import "timestamp/timestamp.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
-import "duration/duration.proto"; // TODO(stevvooe): use our own until we fix gogoproto/deepcopy
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/duration.proto";
 import "gogoproto/gogo.proto";
 import "gogoproto/gogo.proto";
 
 
 // This file contains types that are common to objects and spec or that are not
 // This file contains types that are common to objects and spec or that are not
@@ -259,7 +259,8 @@ message RestartPolicy {
 	RestartCondition condition = 1;
 	RestartCondition condition = 1;
 
 
 	// Delay between restart attempts
 	// Delay between restart attempts
-	Duration delay = 2;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration delay = 2;
 
 
 	// MaxAttempts is the maximum number of restarts to attempt on an
 	// MaxAttempts is the maximum number of restarts to attempt on an
 	// instance before giving up. Ignored if 0.
 	// instance before giving up. Ignored if 0.
@@ -267,7 +268,8 @@ message RestartPolicy {
 
 
 	// Window is the time window used to evaluate the restart policy.
 	// Window is the time window used to evaluate the restart policy.
 	// The time window is unbounded if this is 0.
 	// The time window is unbounded if this is 0.
-	Duration window = 4;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration window = 4;
 }
 }
 
 
 // UpdateConfig specifies the rate and policy of updates.
 // UpdateConfig specifies the rate and policy of updates.
@@ -278,7 +280,7 @@ message UpdateConfig {
 	uint64 parallelism = 1;
 	uint64 parallelism = 1;
 
 
 	// Amount of time between updates.
 	// Amount of time between updates.
-	Duration delay = 2 [(gogoproto.nullable) = false];
+	google.protobuf.Duration delay = 2 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
 
 
 	enum FailureAction {
 	enum FailureAction {
 		PAUSE = 0;
 		PAUSE = 0;
@@ -301,7 +303,8 @@ message UpdateConfig {
 	// this counts as a failure. If it fails after Monitor, it does not
 	// this counts as a failure. If it fails after Monitor, it does not
 	// count as a failure. If Monitor is unspecified, a default value will
 	// count as a failure. If Monitor is unspecified, a default value will
 	// be used.
 	// be used.
-	Duration monitor = 4;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration monitor = 4;
 
 
 	// MaxFailureRatio is the fraction of tasks that may fail during
 	// MaxFailureRatio is the fraction of tasks that may fail during
 	// an update before the failure action is invoked. Any task created by
 	// an update before the failure action is invoked. Any task created by
@@ -343,11 +346,13 @@ message UpdateStatus {
 	UpdateState state = 1;
 	UpdateState state = 1;
 
 
 	// StartedAt is the time at which the update was started.
 	// StartedAt is the time at which the update was started.
-	Timestamp started_at = 2;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp started_at = 2;
 
 
 	// CompletedAt is the time at which the update completed successfully,
 	// CompletedAt is the time at which the update completed successfully,
 	// paused, or finished rolling back.
 	// paused, or finished rolling back.
-	Timestamp completed_at = 3;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp completed_at = 3;
 
 
 	// TODO(aaronl): Consider adding a timestamp showing when the most
 	// TODO(aaronl): Consider adding a timestamp showing when the most
 	// recent task update took place. Currently, this is nontrivial
 	// recent task update took place. Currently, this is nontrivial
@@ -418,7 +423,8 @@ message PortStatus {
 }
 }
 
 
 message TaskStatus {
 message TaskStatus {
-	Timestamp timestamp = 1;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp timestamp = 1;
 
 
 	// State expresses the current state of the task.
 	// State expresses the current state of the task.
 	TaskState state = 2;
 	TaskState state = 2;
@@ -644,7 +650,8 @@ message ExternalCA {
 
 
 message CAConfig {
 message CAConfig {
 	// NodeCertExpiry is the duration certificates should be issued for
 	// NodeCertExpiry is the duration certificates should be issued for
-	Duration node_cert_expiry = 1;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration node_cert_expiry = 1;
 
 
 	// ExternalCAs is a list of CAs to which a manager node will make
 	// ExternalCAs is a list of CAs to which a manager node will make
 	// certificate signing requests for node certificates.
 	// certificate signing requests for node certificates.
@@ -673,7 +680,8 @@ message TaskDefaults {
 message DispatcherConfig {
 message DispatcherConfig {
 	// HeartbeatPeriod defines how often agent should send heartbeats to
 	// HeartbeatPeriod defines how often agent should send heartbeats to
 	// dispatcher.
 	// dispatcher.
-	Duration heartbeat_period = 1;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration heartbeat_period = 1;
 }
 }
 
 
 // RaftConfig defines raft settings for the cluster.
 // RaftConfig defines raft settings for the cluster.
@@ -829,7 +837,8 @@ message SecretReference {
 message BlacklistedCertificate {
 message BlacklistedCertificate {
 	// Expiry is the latest known expiration time of a certificate that
 	// Expiry is the latest known expiration time of a certificate that
 	// was issued for the given CN.
 	// was issued for the given CN.
-	Timestamp expiry = 1;
+	// Note: can't use stdtime because this field is nullable.
+	google.protobuf.Timestamp expiry = 1;
 }
 }
 
 
 // HealthConfig holds configuration settings for the HEALTHCHECK feature.
 // HealthConfig holds configuration settings for the HEALTHCHECK feature.
@@ -844,11 +853,13 @@ message HealthConfig {
 	repeated string test = 1;
 	repeated string test = 1;
 
 
 	// Interval is the time to wait between checks. Zero means inherit.
 	// Interval is the time to wait between checks. Zero means inherit.
-	Duration interval = 2;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration interval = 2;
 
 
 	// Timeout is the time to wait before considering the check to have hung.
 	// Timeout is the time to wait before considering the check to have hung.
 	// Zero means inherit.
 	// Zero means inherit.
-	Duration timeout = 3;
+	// Note: can't use stdduration because this field needs to be nullable.
+	google.protobuf.Duration timeout = 3;
 
 
 	// Retries is the number of consecutive failures needed to consider a
 	// Retries is the number of consecutive failures needed to consider a
 	// container as unhealthy. Zero means inherit.
 	// container as unhealthy. Zero means inherit.

+ 8 - 15
vendor/github.com/docker/swarmkit/ca/certificates.go

@@ -461,7 +461,7 @@ func getGRPCConnection(creds credentials.TransportCredentials, connBroker *conne
 	return connBroker.Select(dialOpts...)
 	return connBroker.Select(dialOpts...)
 }
 }
 
 
-// GetRemoteCA returns the remote endpoint's CA certificate
+// GetRemoteCA returns the remote endpoint's CA certificate bundle
 func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbroker.Broker) (RootCA, error) {
 func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbroker.Broker) (RootCA, error) {
 	// This TLS Config is intentionally using InsecureSkipVerify. We use the
 	// This TLS Config is intentionally using InsecureSkipVerify. We use the
 	// digest instead to check the integrity of the CA certificate.
 	// digest instead to check the integrity of the CA certificate.
@@ -482,6 +482,10 @@ func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbro
 		return RootCA{}, err
 		return RootCA{}, err
 	}
 	}
 
 
+	// If a bundle of certificates are provided, the digest covers the entire bundle and not just
+	// one of the certificates in the bundle.  Otherwise, a node can be MITMed while joining if
+	// the MITM CA provides a single certificate which matches the digest, and providing arbitrary
+	// other non-verified root certs that the manager certificate actually chains up to.
 	if d != "" {
 	if d != "" {
 		verifier := d.Verifier()
 		verifier := d.Verifier()
 		if err != nil {
 		if err != nil {
@@ -492,23 +496,12 @@ func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbro
 
 
 		if !verifier.Verified() {
 		if !verifier.Verified() {
 			return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())
 			return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())
-
 		}
 		}
 	}
 	}
 
 
-	// Check the validity of the remote Cert
-	_, err = helpers.ParseCertificatePEM(response.Certificate)
-	if err != nil {
-		return RootCA{}, err
-	}
-
-	// Create a Pool with our RootCACertificate
-	pool := x509.NewCertPool()
-	if !pool.AppendCertsFromPEM(response.Certificate) {
-		return RootCA{}, errors.New("failed to append certificate to cert pool")
-	}
-
-	return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil
+	// NewRootCA will validate that the certificates are otherwise valid and create a RootCA object.
+	// Since there is no key, the certificate expiry does not matter and will not be used.
+	return NewRootCA(response.Certificate, nil, DefaultNodeCertExpiration)
 }
 }
 
 
 // CreateRootCA creates a Certificate authority for a new Swarm Cluster, potentially
 // CreateRootCA creates a Certificate authority for a new Swarm Cluster, potentially

+ 3 - 3
vendor/github.com/docker/swarmkit/ca/server.go

@@ -11,7 +11,7 @@ import (
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
@@ -48,7 +48,7 @@ type Server struct {
 // DefaultCAConfig returns the default CA Config, with a default expiration.
 // DefaultCAConfig returns the default CA Config, with a default expiration.
 func DefaultCAConfig() api.CAConfig {
 func DefaultCAConfig() api.CAConfig {
 	return api.CAConfig{
 	return api.CAConfig{
-		NodeCertExpiry: ptypes.DurationProto(DefaultNodeCertExpiration),
+		NodeCertExpiry: gogotypes.DurationProto(DefaultNodeCertExpiration),
 	}
 	}
 }
 }
 
 
@@ -527,7 +527,7 @@ func (s *Server) updateCluster(ctx context.Context, cluster *api.Cluster) {
 		expiry := DefaultNodeCertExpiration
 		expiry := DefaultNodeCertExpiration
 		if cluster.Spec.CAConfig.NodeCertExpiry != nil {
 		if cluster.Spec.CAConfig.NodeCertExpiry != nil {
 			// NodeCertExpiry exists, let's try to parse the duration out of it
 			// NodeCertExpiry exists, let's try to parse the duration out of it
-			clusterExpiry, err := ptypes.Duration(cluster.Spec.CAConfig.NodeCertExpiry)
+			clusterExpiry, err := gogotypes.DurationFromProto(cluster.Spec.CAConfig.NodeCertExpiry)
 			if err != nil {
 			if err != nil {
 				log.G(ctx).WithFields(logrus.Fields{
 				log.G(ctx).WithFields(logrus.Fields{
 					"cluster.id": cluster.ID,
 					"cluster.id": cluster.ID,

+ 21 - 0
vendor/github.com/docker/swarmkit/manager/constraint/constraint.go

@@ -2,6 +2,7 @@ package constraint
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"net"
 	"regexp"
 	"regexp"
 	"strings"
 	"strings"
 
 
@@ -121,6 +122,26 @@ func NodeMatches(constraints []Constraint, n *api.Node) bool {
 			if !constraint.Match(n.Description.Hostname) {
 			if !constraint.Match(n.Description.Hostname) {
 				return false
 				return false
 			}
 			}
+		case strings.EqualFold(constraint.key, "node.ip"):
+			nodeIP := net.ParseIP(n.Status.Addr)
+			// single IP address, node.ip == 2001:db8::2
+			if ip := net.ParseIP(constraint.exp); ip != nil {
+				ipEq := ip.Equal(nodeIP)
+				if (ipEq && constraint.operator != eq) || (!ipEq && constraint.operator == eq) {
+					return false
+				}
+				continue
+			}
+			// CIDR subnet, node.ip != 210.8.4.0/24
+			if _, subnet, err := net.ParseCIDR(constraint.exp); err == nil {
+				within := subnet.Contains(nodeIP)
+				if (within && constraint.operator != eq) || (!within && constraint.operator == eq) {
+					return false
+				}
+				continue
+			}
+			// reject constraint with malformed address/network
+			return false
 		case strings.EqualFold(constraint.key, "node.role"):
 		case strings.EqualFold(constraint.key, "node.role"):
 			if !constraint.Match(n.Role.String()) {
 			if !constraint.Match(n.Role.String()) {
 				return false
 				return false

+ 4 - 4
vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go

@@ -8,7 +8,7 @@ import (
 	"github.com/docker/swarmkit/ca"
 	"github.com/docker/swarmkit/ca"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
@@ -27,7 +27,7 @@ func validateClusterSpec(spec *api.ClusterSpec) error {
 
 
 	// Validate that expiry time being provided is valid, and over our minimum
 	// Validate that expiry time being provided is valid, and over our minimum
 	if spec.CAConfig.NodeCertExpiry != nil {
 	if spec.CAConfig.NodeCertExpiry != nil {
-		expiry, err := ptypes.Duration(spec.CAConfig.NodeCertExpiry)
+		expiry, err := gogotypes.DurationFromProto(spec.CAConfig.NodeCertExpiry)
 		if err != nil {
 		if err != nil {
 			return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
 			return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
 		}
 		}
@@ -48,7 +48,7 @@ func validateClusterSpec(spec *api.ClusterSpec) error {
 
 
 	// Validate that heartbeatPeriod time being provided is valid
 	// Validate that heartbeatPeriod time being provided is valid
 	if spec.Dispatcher.HeartbeatPeriod != nil {
 	if spec.Dispatcher.HeartbeatPeriod != nil {
-		heartbeatPeriod, err := ptypes.Duration(spec.Dispatcher.HeartbeatPeriod)
+		heartbeatPeriod, err := gogotypes.DurationFromProto(spec.Dispatcher.HeartbeatPeriod)
 		if err != nil {
 		if err != nil {
 			return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
 			return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
 		}
 		}
@@ -253,7 +253,7 @@ func expireBlacklistedCerts(cluster *api.Cluster) {
 			continue
 			continue
 		}
 		}
 
 
-		expiry, err := ptypes.Timestamp(blacklistedCert.Expiry)
+		expiry, err := gogotypes.TimestampFromProto(blacklistedCert.Expiry)
 		if err == nil && nowMinusGrace.After(expiry) {
 		if err == nil && nowMinusGrace.After(expiry) {
 			delete(cluster.BlacklistedCertificates, cn)
 			delete(cluster.BlacklistedCertificates, cn)
 		}
 		}

+ 2 - 2
vendor/github.com/docker/swarmkit/manager/controlapi/node.go

@@ -7,7 +7,7 @@ import (
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/manager/state/raft/membership"
 	"github.com/docker/swarmkit/manager/state/raft/membership"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
@@ -294,7 +294,7 @@ func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest)
 			if certBlock != nil {
 			if certBlock != nil {
 				X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
 				X509Cert, err := x509.ParseCertificate(certBlock.Bytes)
 				if err == nil && !X509Cert.NotAfter.IsZero() {
 				if err == nil && !X509Cert.NotAfter.IsZero() {
-					expiry, err := ptypes.TimestampProto(X509Cert.NotAfter)
+					expiry, err := gogotypes.TimestampProto(X509Cert.NotAfter)
 					if err == nil {
 					if err == nil {
 						blacklistedCert.Expiry = expiry
 						blacklistedCert.Expiry = expiry
 					}
 					}

+ 5 - 10
vendor/github.com/docker/swarmkit/manager/controlapi/service.go

@@ -12,8 +12,8 @@ import (
 	"github.com/docker/swarmkit/identity"
 	"github.com/docker/swarmkit/identity"
 	"github.com/docker/swarmkit/manager/constraint"
 	"github.com/docker/swarmkit/manager/constraint"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/template"
 	"github.com/docker/swarmkit/template"
+	gogotypes "github.com/gogo/protobuf/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
@@ -59,7 +59,7 @@ func validateRestartPolicy(rp *api.RestartPolicy) error {
 	}
 	}
 
 
 	if rp.Delay != nil {
 	if rp.Delay != nil {
-		delay, err := ptypes.Duration(rp.Delay)
+		delay, err := gogotypes.DurationFromProto(rp.Delay)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -69,7 +69,7 @@ func validateRestartPolicy(rp *api.RestartPolicy) error {
 	}
 	}
 
 
 	if rp.Window != nil {
 	if rp.Window != nil {
-		win, err := ptypes.Duration(rp.Window)
+		win, err := gogotypes.DurationFromProto(rp.Window)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -94,12 +94,7 @@ func validateUpdate(uc *api.UpdateConfig) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	delay, err := ptypes.Duration(&uc.Delay)
-	if err != nil {
-		return err
-	}
-
-	if delay < 0 {
+	if uc.Delay < 0 {
 		return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative")
 		return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative")
 	}
 	}
 
 
@@ -238,7 +233,7 @@ func validateSecretRefsSpec(spec *api.ServiceSpec) error {
 			return grpc.Errorf(codes.InvalidArgument, "malformed secret reference")
 			return grpc.Errorf(codes.InvalidArgument, "malformed secret reference")
 		}
 		}
 
 
-		// Every secret referece requires a Target
+		// Every secret reference requires a Target
 		if secretRef.GetTarget() == nil {
 		if secretRef.GetTarget() == nil {
 			return grpc.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
 			return grpc.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
 		}
 		}

+ 4 - 4
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go

@@ -19,9 +19,9 @@ import (
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/remotes"
 	"github.com/docker/swarmkit/remotes"
 	"github.com/docker/swarmkit/watch"
 	"github.com/docker/swarmkit/watch"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
@@ -191,7 +191,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 				return err
 				return err
 			}
 			}
 			if err == nil && len(clusters) == 1 {
 			if err == nil && len(clusters) == 1 {
-				heartbeatPeriod, err := ptypes.Duration(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
+				heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
 				if err == nil && heartbeatPeriod > 0 {
 				if err == nil && heartbeatPeriod > 0 {
 					d.config.HeartbeatPeriod = heartbeatPeriod
 					d.config.HeartbeatPeriod = heartbeatPeriod
 				}
 				}
@@ -254,7 +254,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 			d.mu.Lock()
 			d.mu.Lock()
 			if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
 			if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
 				// ignore error, since Spec has passed validation before
 				// ignore error, since Spec has passed validation before
-				heartbeatPeriod, _ := ptypes.Duration(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
+				heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
 				if heartbeatPeriod != d.config.HeartbeatPeriod {
 				if heartbeatPeriod != d.config.HeartbeatPeriod {
 					// only call d.nodes.updatePeriod when heartbeatPeriod changes
 					// only call d.nodes.updatePeriod when heartbeatPeriod changes
 					d.config.HeartbeatPeriod = heartbeatPeriod
 					d.config.HeartbeatPeriod = heartbeatPeriod
@@ -1273,7 +1273,7 @@ func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*a
 	}
 	}
 
 
 	period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
 	period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
-	return &api.HeartbeatResponse{Period: *ptypes.DurationProto(period)}, err
+	return &api.HeartbeatResponse{Period: period}, err
 }
 }
 
 
 func (d *Dispatcher) getManagers() []*api.WeightedPeer {
 func (d *Dispatcher) getManagers() []*api.WeightedPeer {

+ 159 - 92
vendor/github.com/docker/swarmkit/manager/manager.go

@@ -35,9 +35,9 @@ import (
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state/raft"
 	"github.com/docker/swarmkit/manager/state/raft"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/remotes"
 	"github.com/docker/swarmkit/remotes"
 	"github.com/docker/swarmkit/xnet"
 	"github.com/docker/swarmkit/xnet"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
@@ -72,7 +72,7 @@ type Config struct {
 
 
 	// RemoteAPI is a listening address for serving the remote API, and
 	// RemoteAPI is a listening address for serving the remote API, and
 	// an optional advertise address.
 	// an optional advertise address.
-	RemoteAPI RemoteAddrs
+	RemoteAPI *RemoteAddrs
 
 
 	// JoinRaft is an optional address of a node in an existing raft
 	// JoinRaft is an optional address of a node in an existing raft
 	// cluster to join.
 	// cluster to join.
@@ -115,8 +115,7 @@ type Config struct {
 // This is the high-level object holding and initializing all the manager
 // This is the high-level object holding and initializing all the manager
 // subsystems.
 // subsystems.
 type Manager struct {
 type Manager struct {
-	config    *Config
-	listeners []net.Listener
+	config Config
 
 
 	caserver               *ca.Server
 	caserver               *ca.Server
 	dispatcher             *dispatcher.Dispatcher
 	dispatcher             *dispatcher.Dispatcher
@@ -136,9 +135,18 @@ type Manager struct {
 
 
 	cancelFunc context.CancelFunc
 	cancelFunc context.CancelFunc
 
 
-	mu      sync.Mutex
+	// mu is a general mutex used to coordinate starting/stopping and
+	// leadership events.
+	mu sync.Mutex
+	// addrMu is a mutex that protects config.ControlAPI and config.RemoteAPI
+	addrMu sync.Mutex
+
 	started chan struct{}
 	started chan struct{}
 	stopped bool
 	stopped bool
+
+	remoteListener  chan net.Listener
+	controlListener chan net.Listener
+	errServe        chan error
 }
 }
 
 
 type closeOnceListener struct {
 type closeOnceListener struct {
@@ -156,29 +164,6 @@ func (l *closeOnceListener) Close() error {
 
 
 // New creates a Manager which has not started to accept requests yet.
 // New creates a Manager which has not started to accept requests yet.
 func New(config *Config) (*Manager, error) {
 func New(config *Config) (*Manager, error) {
-	dispatcherConfig := dispatcher.DefaultConfig()
-
-	// If an AdvertiseAddr was specified, we use that as our
-	// externally-reachable address.
-	advertiseAddr := config.RemoteAPI.AdvertiseAddr
-
-	var advertiseAddrPort string
-	if advertiseAddr == "" {
-		// Otherwise, we know we are joining an existing swarm. Use a
-		// wildcard address to trigger remote autodetection of our
-		// address.
-		var err error
-		_, advertiseAddrPort, err = net.SplitHostPort(config.RemoteAPI.ListenAddr)
-		if err != nil {
-			return nil, fmt.Errorf("missing or invalid listen address %s", config.RemoteAPI.ListenAddr)
-		}
-
-		// Even with an IPv6 listening address, it's okay to use
-		// 0.0.0.0 here. Any "unspecified" (wildcard) IP will
-		// be substituted with the actual source address.
-		advertiseAddr = net.JoinHostPort("0.0.0.0", advertiseAddrPort)
-	}
-
 	err := os.MkdirAll(config.StateDir, 0700)
 	err := os.MkdirAll(config.StateDir, 0700)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create state directory")
 		return nil, errors.Wrap(err, "failed to create state directory")
@@ -190,17 +175,89 @@ func New(config *Config) (*Manager, error) {
 		return nil, errors.Wrap(err, "failed to create raft state directory")
 		return nil, errors.Wrap(err, "failed to create raft state directory")
 	}
 	}
 
 
-	var listeners []net.Listener
+	raftCfg := raft.DefaultNodeConfig()
+
+	if config.ElectionTick > 0 {
+		raftCfg.ElectionTick = int(config.ElectionTick)
+	}
+	if config.HeartbeatTick > 0 {
+		raftCfg.HeartbeatTick = int(config.HeartbeatTick)
+	}
+
+	dekRotator, err := NewRaftDEKManager(config.SecurityConfig.KeyWriter())
+	if err != nil {
+		return nil, err
+	}
+
+	newNodeOpts := raft.NodeOptions{
+		ID:              config.SecurityConfig.ClientTLSCreds.NodeID(),
+		JoinAddr:        config.JoinRaft,
+		Config:          raftCfg,
+		StateDir:        raftStateDir,
+		ForceNewCluster: config.ForceNewCluster,
+		TLSCredentials:  config.SecurityConfig.ClientTLSCreds,
+		KeyRotator:      dekRotator,
+	}
+	raftNode := raft.NewNode(newNodeOpts)
+
+	opts := []grpc.ServerOption{
+		grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
+
+	m := &Manager{
+		config:          *config,
+		caserver:        ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig),
+		dispatcher:      dispatcher.New(raftNode, dispatcher.DefaultConfig()),
+		logbroker:       logbroker.New(raftNode.MemoryStore()),
+		server:          grpc.NewServer(opts...),
+		localserver:     grpc.NewServer(opts...),
+		raftNode:        raftNode,
+		started:         make(chan struct{}),
+		dekRotator:      dekRotator,
+		remoteListener:  make(chan net.Listener, 1),
+		controlListener: make(chan net.Listener, 1),
+		errServe:        make(chan error, 2),
+	}
+
+	if config.ControlAPI != "" {
+		m.config.ControlAPI = ""
+		if err := m.BindControl(config.ControlAPI); err != nil {
+			return nil, err
+		}
+	}
+
+	if config.RemoteAPI != nil {
+		m.config.RemoteAPI = nil
+		// The context isn't used in this case (before (*Manager).Run).
+		if err := m.BindRemote(context.Background(), *config.RemoteAPI); err != nil {
+			if config.ControlAPI != "" {
+				l := <-m.controlListener
+				l.Close()
+			}
+			return nil, err
+		}
+	}
+
+	return m, nil
+}
+
+// BindControl binds a local socket for the control API.
+func (m *Manager) BindControl(addr string) error {
+	m.addrMu.Lock()
+	defer m.addrMu.Unlock()
+
+	if m.config.ControlAPI != "" {
+		return errors.New("manager already has a control API address")
+	}
 
 
 	// don't create a socket directory if we're on windows. we used named pipe
 	// don't create a socket directory if we're on windows. we used named pipe
 	if runtime.GOOS != "windows" {
 	if runtime.GOOS != "windows" {
-		err := os.MkdirAll(filepath.Dir(config.ControlAPI), 0700)
+		err := os.MkdirAll(filepath.Dir(addr), 0700)
 		if err != nil {
 		if err != nil {
-			return nil, errors.Wrap(err, "failed to create socket directory")
+			return errors.Wrap(err, "failed to create socket directory")
 		}
 		}
 	}
 	}
 
 
-	l, err := xnet.ListenLocal(config.ControlAPI)
+	l, err := xnet.ListenLocal(addr)
 
 
 	// A unix socket may fail to bind if the file already
 	// A unix socket may fail to bind if the file already
 	// exists. Try replacing the file.
 	// exists. Try replacing the file.
@@ -213,69 +270,64 @@ func New(config *Config) (*Manager, error) {
 			unwrappedErr = sys.Err
 			unwrappedErr = sys.Err
 		}
 		}
 		if unwrappedErr == syscall.EADDRINUSE {
 		if unwrappedErr == syscall.EADDRINUSE {
-			os.Remove(config.ControlAPI)
-			l, err = xnet.ListenLocal(config.ControlAPI)
+			os.Remove(addr)
+			l, err = xnet.ListenLocal(addr)
 		}
 		}
 	}
 	}
 	if err != nil {
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to listen on control API address")
+		return errors.Wrap(err, "failed to listen on control API address")
 	}
 	}
 
 
-	listeners = append(listeners, l)
+	m.config.ControlAPI = addr
+	m.controlListener <- l
+	return nil
+}
 
 
-	l, err = net.Listen("tcp", config.RemoteAPI.ListenAddr)
-	if err != nil {
-		return nil, errors.Wrap(err, "failed to listen on remote API address")
-	}
-	if advertiseAddrPort == "0" {
-		advertiseAddr = l.Addr().String()
-		config.RemoteAPI.ListenAddr = advertiseAddr
+// BindRemote binds a port for the remote API.
+func (m *Manager) BindRemote(ctx context.Context, addrs RemoteAddrs) error {
+	m.addrMu.Lock()
+	defer m.addrMu.Unlock()
+
+	if m.config.RemoteAPI != nil {
+		return errors.New("manager already has remote API address")
 	}
 	}
-	listeners = append(listeners, l)
 
 
-	raftCfg := raft.DefaultNodeConfig()
+	// If an AdvertiseAddr was specified, we use that as our
+	// externally-reachable address.
+	advertiseAddr := addrs.AdvertiseAddr
 
 
-	if config.ElectionTick > 0 {
-		raftCfg.ElectionTick = int(config.ElectionTick)
-	}
-	if config.HeartbeatTick > 0 {
-		raftCfg.HeartbeatTick = int(config.HeartbeatTick)
+	var advertiseAddrPort string
+	if advertiseAddr == "" {
+		// Otherwise, we know we are joining an existing swarm. Use a
+		// wildcard address to trigger remote autodetection of our
+		// address.
+		var err error
+		_, advertiseAddrPort, err = net.SplitHostPort(addrs.ListenAddr)
+		if err != nil {
+			return fmt.Errorf("missing or invalid listen address %s", addrs.ListenAddr)
+		}
+
+		// Even with an IPv6 listening address, it's okay to use
+		// 0.0.0.0 here. Any "unspecified" (wildcard) IP will
+		// be substituted with the actual source address.
+		advertiseAddr = net.JoinHostPort("0.0.0.0", advertiseAddrPort)
 	}
 	}
 
 
-	dekRotator, err := NewRaftDEKManager(config.SecurityConfig.KeyWriter())
+	l, err := net.Listen("tcp", addrs.ListenAddr)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		return errors.Wrap(err, "failed to listen on remote API address")
 	}
 	}
-
-	newNodeOpts := raft.NodeOptions{
-		ID:              config.SecurityConfig.ClientTLSCreds.NodeID(),
-		Addr:            advertiseAddr,
-		JoinAddr:        config.JoinRaft,
-		Config:          raftCfg,
-		StateDir:        raftStateDir,
-		ForceNewCluster: config.ForceNewCluster,
-		TLSCredentials:  config.SecurityConfig.ClientTLSCreds,
-		KeyRotator:      dekRotator,
+	if advertiseAddrPort == "0" {
+		advertiseAddr = l.Addr().String()
+		addrs.ListenAddr = advertiseAddr
 	}
 	}
-	raftNode := raft.NewNode(newNodeOpts)
 
 
-	opts := []grpc.ServerOption{
-		grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
+	m.config.RemoteAPI = &addrs
 
 
-	m := &Manager{
-		config:      config,
-		listeners:   listeners,
-		caserver:    ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig),
-		dispatcher:  dispatcher.New(raftNode, dispatcherConfig),
-		logbroker:   logbroker.New(raftNode.MemoryStore()),
-		server:      grpc.NewServer(opts...),
-		localserver: grpc.NewServer(opts...),
-		raftNode:    raftNode,
-		started:     make(chan struct{}),
-		dekRotator:  dekRotator,
-	}
+	m.raftNode.SetAddr(ctx, advertiseAddr)
+	m.remoteListener <- l
 
 
-	return m, nil
+	return nil
 }
 }
 
 
 // RemovedFromRaft returns a channel that's closed if the manager is removed
 // RemovedFromRaft returns a channel that's closed if the manager is removed
@@ -286,6 +338,12 @@ func (m *Manager) RemovedFromRaft() <-chan struct{} {
 
 
 // Addr returns tcp address on which remote api listens.
 // Addr returns tcp address on which remote api listens.
 func (m *Manager) Addr() string {
 func (m *Manager) Addr() string {
+	m.addrMu.Lock()
+	defer m.addrMu.Unlock()
+
+	if m.config.RemoteAPI == nil {
+		return ""
+	}
 	return m.config.RemoteAPI.ListenAddr
 	return m.config.RemoteAPI.ListenAddr
 }
 }
 
 
@@ -357,12 +415,17 @@ func (m *Manager) Run(parent context.Context) error {
 	// requests (it has no TLS information to put in the metadata map).
 	// requests (it has no TLS information to put in the metadata map).
 	forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
 	forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
 	handleRequestLocally := func(ctx context.Context) (context.Context, error) {
 	handleRequestLocally := func(ctx context.Context) (context.Context, error) {
-		var remoteAddr string
-		if m.config.RemoteAPI.AdvertiseAddr != "" {
-			remoteAddr = m.config.RemoteAPI.AdvertiseAddr
-		} else {
-			remoteAddr = m.config.RemoteAPI.ListenAddr
+		remoteAddr := "127.0.0.1:0"
+
+		m.addrMu.Lock()
+		if m.config.RemoteAPI != nil {
+			if m.config.RemoteAPI.AdvertiseAddr != "" {
+				remoteAddr = m.config.RemoteAPI.AdvertiseAddr
+			} else {
+				remoteAddr = m.config.RemoteAPI.ListenAddr
+			}
 		}
 		}
+		m.addrMu.Unlock()
 
 
 		creds := m.config.SecurityConfig.ClientTLSCreds
 		creds := m.config.SecurityConfig.ClientTLSCreds
 
 
@@ -408,10 +471,8 @@ func (m *Manager) Run(parent context.Context) error {
 	healthServer.SetServingStatus("Raft", api.HealthCheckResponse_NOT_SERVING)
 	healthServer.SetServingStatus("Raft", api.HealthCheckResponse_NOT_SERVING)
 	localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_NOT_SERVING)
 	localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_NOT_SERVING)
 
 
-	errServe := make(chan error, len(m.listeners))
-	for _, lis := range m.listeners {
-		go m.serveListener(ctx, errServe, lis)
-	}
+	go m.serveListener(ctx, m.remoteListener)
+	go m.serveListener(ctx, m.controlListener)
 
 
 	defer func() {
 	defer func() {
 		m.server.Stop()
 		m.server.Stop()
@@ -459,7 +520,7 @@ func (m *Manager) Run(parent context.Context) error {
 	}
 	}
 
 
 	// wait for an error in serving.
 	// wait for an error in serving.
-	err = <-errServe
+	err = <-m.errServe
 	m.mu.Lock()
 	m.mu.Lock()
 	if m.stopped {
 	if m.stopped {
 		m.mu.Unlock()
 		m.mu.Unlock()
@@ -759,7 +820,13 @@ func (m *Manager) handleLeadershipEvents(ctx context.Context, leadershipCh chan
 }
 }
 
 
 // serveListener serves a listener for local and non local connections.
 // serveListener serves a listener for local and non local connections.
-func (m *Manager) serveListener(ctx context.Context, errServe chan error, l net.Listener) {
+func (m *Manager) serveListener(ctx context.Context, lCh <-chan net.Listener) {
+	var l net.Listener
+	select {
+	case l = <-lCh:
+	case <-ctx.Done():
+		return
+	}
 	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
 	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
 		logrus.Fields{
 		logrus.Fields{
 			"proto": l.Addr().Network(),
 			"proto": l.Addr().Network(),
@@ -770,10 +837,10 @@ func (m *Manager) serveListener(ctx context.Context, errServe chan error, l net.
 		// we need to disallow double closes because UnixListener.Close
 		// we need to disallow double closes because UnixListener.Close
 		// can delete unix-socket file of newer listener. grpc calls
 		// can delete unix-socket file of newer listener. grpc calls
 		// Close twice indeed: in Serve and in Stop.
 		// Close twice indeed: in Serve and in Stop.
-		errServe <- m.localserver.Serve(&closeOnceListener{Listener: l})
+		m.errServe <- m.localserver.Serve(&closeOnceListener{Listener: l})
 	} else {
 	} else {
 		log.G(ctx).Info("Listening for connections")
 		log.G(ctx).Info("Listening for connections")
-		errServe <- m.server.Serve(l)
+		m.errServe <- m.server.Serve(l)
 	}
 	}
 }
 }
 
 
@@ -966,7 +1033,7 @@ func defaultClusterObject(
 				TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
 				TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
 			},
 			},
 			Dispatcher: api.DispatcherConfig{
 			Dispatcher: api.DispatcherConfig{
-				HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
+				HeartbeatPeriod: gogotypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
 			},
 			},
 			Raft:             raftCfg,
 			Raft:             raftCfg,
 			CAConfig:         initialCAConfig,
 			CAConfig:         initialCAConfig,

+ 3 - 3
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/tasks.go

@@ -9,7 +9,7 @@ import (
 	"github.com/docker/swarmkit/manager/orchestrator"
 	"github.com/docker/swarmkit/manager/orchestrator"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
@@ -63,14 +63,14 @@ func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error
 			restartDelay := orchestrator.DefaultRestartDelay
 			restartDelay := orchestrator.DefaultRestartDelay
 			if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
 			if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
 				var err error
 				var err error
-				restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
+				restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay)
 				if err != nil {
 				if err != nil {
 					log.G(ctx).WithError(err).Error("invalid restart delay")
 					log.G(ctx).WithError(err).Error("invalid restart delay")
 					restartDelay = orchestrator.DefaultRestartDelay
 					restartDelay = orchestrator.DefaultRestartDelay
 				}
 				}
 			}
 			}
 			if restartDelay != 0 {
 			if restartDelay != 0 {
-				timestamp, err := ptypes.Timestamp(t.Status.Timestamp)
+				timestamp, err := gogotypes.TimestampFromProto(t.Status.Timestamp)
 				if err == nil {
 				if err == nil {
 					restartTime := timestamp.Add(restartDelay)
 					restartTime := timestamp.Add(restartDelay)
 					calculatedRestartDelay := restartTime.Sub(time.Now())
 					calculatedRestartDelay := restartTime.Sub(time.Now())

+ 3 - 3
vendor/github.com/docker/swarmkit/manager/orchestrator/restart/restart.go

@@ -12,7 +12,7 @@ import (
 	"github.com/docker/swarmkit/manager/orchestrator"
 	"github.com/docker/swarmkit/manager/orchestrator"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	gogotypes "github.com/gogo/protobuf/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
@@ -156,7 +156,7 @@ func (r *Supervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Clus
 	if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
 	if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
 		if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
 		if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
 			var err error
 			var err error
-			restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay)
+			restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay)
 			if err != nil {
 			if err != nil {
 				log.G(ctx).WithError(err).Error("invalid restart delay; using default")
 				log.G(ctx).WithError(err).Error("invalid restart delay; using default")
 				restartDelay = orchestrator.DefaultRestartDelay
 				restartDelay = orchestrator.DefaultRestartDelay
@@ -226,7 +226,7 @@ func (r *Supervisor) shouldRestart(ctx context.Context, t *api.Task, service *ap
 		return true
 		return true
 	}
 	}
 
 
-	window, err := ptypes.Duration(t.Spec.Restart.Window)
+	window, err := gogotypes.DurationFromProto(t.Spec.Restart.Window)
 	if err != nil {
 	if err != nil {
 		log.G(ctx).WithError(err).Error("invalid restart lookback window")
 		log.G(ctx).WithError(err).Error("invalid restart lookback window")
 		return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts
 		return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts

+ 4 - 8
vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go

@@ -18,6 +18,7 @@ import (
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/watch"
 	"github.com/docker/swarmkit/watch"
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
 const defaultMonitor = 30 * time.Second
 const defaultMonitor = 30 * time.Second
@@ -186,7 +187,7 @@ func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
 
 
 		if service.Spec.Update.Monitor != nil {
 		if service.Spec.Update.Monitor != nil {
 			var err error
 			var err error
-			monitoringPeriod, err = ptypes.Duration(service.Spec.Update.Monitor)
+			monitoringPeriod, err = gogotypes.DurationFromProto(service.Spec.Update.Monitor)
 			if err != nil {
 			if err != nil {
 				monitoringPeriod = defaultMonitor
 				monitoringPeriod = defaultMonitor
 			}
 			}
@@ -344,14 +345,9 @@ func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot) {
 			}
 			}
 		}
 		}
 
 
-		if u.newService.Spec.Update != nil && (u.newService.Spec.Update.Delay.Seconds != 0 || u.newService.Spec.Update.Delay.Nanos != 0) {
-			delay, err := ptypes.Duration(&u.newService.Spec.Update.Delay)
-			if err != nil {
-				log.G(ctx).WithError(err).Error("invalid update delay")
-				continue
-			}
+		if u.newService.Spec.Update != nil && u.newService.Spec.Update.Delay != 0 {
 			select {
 			select {
-			case <-time.After(delay):
+			case <-time.After(u.newService.Spec.Update.Delay):
 			case <-u.stopChan:
 			case <-u.stopChan:
 				return
 				return
 			}
 			}

+ 1 - 0
vendor/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go

@@ -143,6 +143,7 @@ func (c *Cluster) UpdateMember(id uint64, m *api.RaftMember) error {
 		return nil
 		return nil
 	}
 	}
 	oldMember.RaftMember = m
 	oldMember.RaftMember = m
+	c.broadcastUpdate()
 	return nil
 	return nil
 }
 }
 
 

+ 67 - 1
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go

@@ -126,6 +126,9 @@ type Node struct {
 	stopMu sync.RWMutex
 	stopMu sync.RWMutex
 	// used for membership management checks
 	// used for membership management checks
 	membershipLock sync.Mutex
 	membershipLock sync.Mutex
+	// synchronizes access to n.opts.Addr, and makes sure the address is not
+	// updated concurrently with JoinAndStart.
+	addrLock sync.Mutex
 
 
 	snapshotInProgress chan raftpb.SnapshotMetadata
 	snapshotInProgress chan raftpb.SnapshotMetadata
 	asyncTasks         sync.WaitGroup
 	asyncTasks         sync.WaitGroup
@@ -259,6 +262,59 @@ func (n *Node) ReportUnreachable(id uint64) {
 	n.raftNode.ReportUnreachable(id)
 	n.raftNode.ReportUnreachable(id)
 }
 }
 
 
+// SetAddr provides the raft node's address. This can be used in cases where
+// opts.Addr was not provided to NewNode, for example when a port was not bound
+// until after the raft node was created.
+func (n *Node) SetAddr(ctx context.Context, addr string) error {
+	n.addrLock.Lock()
+	defer n.addrLock.Unlock()
+
+	n.opts.Addr = addr
+
+	if !n.IsMember() {
+		return nil
+	}
+
+	newRaftMember := &api.RaftMember{
+		RaftID: n.Config.ID,
+		NodeID: n.opts.ID,
+		Addr:   addr,
+	}
+	if err := n.cluster.UpdateMember(n.Config.ID, newRaftMember); err != nil {
+		return err
+	}
+
+	// If the raft node is running, submit a configuration change
+	// with the new address.
+
+	// TODO(aaronl): Currently, this node must be the leader to
+	// submit this configuration change. This works for the initial
+	// use cases (single-node cluster late binding ports, or calling
+	// SetAddr before joining a cluster). In the future, we may want
+	// to support having a follower proactively change its remote
+	// address.
+
+	leadershipCh, cancelWatch := n.SubscribeLeadership()
+	defer cancelWatch()
+
+	ctx, cancelCtx := n.WithContext(ctx)
+	defer cancelCtx()
+
+	isLeader := atomic.LoadUint32(&n.signalledLeadership) == 1
+	for !isLeader {
+		select {
+		case leadershipChange := <-leadershipCh:
+			if leadershipChange == IsLeader {
+				isLeader = true
+			}
+		case <-ctx.Done():
+			return ctx.Err()
+		}
+	}
+
+	return n.updateNodeBlocking(ctx, n.Config.ID, addr)
+}
+
 // WithContext returns context which is cancelled when parent context cancelled
 // WithContext returns context which is cancelled when parent context cancelled
 // or node is stopped.
 // or node is stopped.
 func (n *Node) WithContext(ctx context.Context) (context.Context, context.CancelFunc) {
 func (n *Node) WithContext(ctx context.Context) (context.Context, context.CancelFunc) {
@@ -316,6 +372,9 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) {
 	n.snapshotMeta = snapshot.Metadata
 	n.snapshotMeta = snapshot.Metadata
 	n.writtenWALIndex, _ = n.raftStore.LastIndex() // lastIndex always returns nil as an error
 	n.writtenWALIndex, _ = n.raftStore.LastIndex() // lastIndex always returns nil as an error
 
 
+	n.addrLock.Lock()
+	defer n.addrLock.Unlock()
+
 	// restore from snapshot
 	// restore from snapshot
 	if loadAndStartErr == nil {
 	if loadAndStartErr == nil {
 		if n.opts.JoinAddr != "" {
 		if n.opts.JoinAddr != "" {
@@ -342,6 +401,9 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) {
 	}
 	}
 
 
 	// join to existing cluster
 	// join to existing cluster
+	if n.opts.Addr == "" {
+		return errors.New("attempted to join raft cluster without knowing own address")
+	}
 
 
 	conn, err := dial(n.opts.JoinAddr, "tcp", n.opts.TLSCredentials, 10*time.Second)
 	conn, err := dial(n.opts.JoinAddr, "tcp", n.opts.TLSCredentials, 10*time.Second)
 	if err != nil {
 	if err != nil {
@@ -1092,6 +1154,11 @@ func (n *Node) reportNewAddress(ctx context.Context, id uint64) error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+	if oldAddr == "" {
+		// Don't know the address of the peer yet, so can't report an
+		// update.
+		return nil
+	}
 	newHost, _, err := net.SplitHostPort(p.Addr.String())
 	newHost, _, err := net.SplitHostPort(p.Addr.String())
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -1655,7 +1722,6 @@ func (n *Node) applyRemoveNode(ctx context.Context, cc raftpb.ConfChange) (err e
 	}
 	}
 
 
 	if cc.NodeID == n.Config.ID {
 	if cc.NodeID == n.Config.ID {
-
 		// wait the commit ack to be sent before closing connection
 		// wait the commit ack to be sent before closing connection
 		n.asyncTasks.Wait()
 		n.asyncTasks.Wait()
 
 

+ 2 - 2
vendor/github.com/docker/swarmkit/manager/state/store/memory.go

@@ -13,8 +13,8 @@ import (
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/api"
 	pb "github.com/docker/swarmkit/api"
 	pb "github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/manager/state"
 	"github.com/docker/swarmkit/manager/state"
-	"github.com/docker/swarmkit/protobuf/ptypes"
 	"github.com/docker/swarmkit/watch"
 	"github.com/docker/swarmkit/watch"
+	gogotypes "github.com/gogo/protobuf/types"
 	memdb "github.com/hashicorp/go-memdb"
 	memdb "github.com/hashicorp/go-memdb"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
@@ -744,7 +744,7 @@ func touchMeta(meta *api.Meta, version *api.Version) error {
 		return nil
 		return nil
 	}
 	}
 
 
-	now, err := ptypes.TimestampProto(time.Now())
+	now, err := gogotypes.TimestampProto(time.Now())
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 27 - 7
vendor/github.com/docker/swarmkit/node/node.go

@@ -135,11 +135,11 @@ func (n *Node) RemoteAPIAddr() (string, error) {
 	n.RLock()
 	n.RLock()
 	defer n.RUnlock()
 	defer n.RUnlock()
 	if n.manager == nil {
 	if n.manager == nil {
-		return "", errors.Errorf("node is not manager")
+		return "", errors.New("manager is not running")
 	}
 	}
 	addr := n.manager.Addr()
 	addr := n.manager.Addr()
 	if addr == "" {
 	if addr == "" {
-		return "", errors.Errorf("manager addr is not set")
+		return "", errors.New("manager addr is not set")
 	}
 	}
 	return addr, nil
 	return addr, nil
 }
 }
@@ -186,6 +186,21 @@ func New(c *Config) (*Node, error) {
 	return n, nil
 	return n, nil
 }
 }
 
 
+// BindRemote starts a listener that exposes the remote API.
+func (n *Node) BindRemote(ctx context.Context, listenAddr string, advertiseAddr string) error {
+	n.RLock()
+	defer n.RUnlock()
+
+	if n.manager == nil {
+		return errors.New("manager is not running")
+	}
+
+	return n.manager.BindRemote(ctx, manager.RemoteAddrs{
+		ListenAddr:    listenAddr,
+		AdvertiseAddr: advertiseAddr,
+	})
+}
+
 // Start starts a node instance.
 // Start starts a node instance.
 func (n *Node) Start(ctx context.Context) error {
 func (n *Node) Start(ctx context.Context) error {
 	err := errNodeStarted
 	err := errNodeStarted
@@ -670,13 +685,18 @@ func (n *Node) waitRole(ctx context.Context, role string) error {
 }
 }
 
 
 func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}, workerRole <-chan struct{}) error {
 func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}, workerRole <-chan struct{}) error {
-	remoteAddr, _ := n.remotes.Select(n.NodeID())
-	m, err := manager.New(&manager.Config{
-		ForceNewCluster: n.config.ForceNewCluster,
-		RemoteAPI: manager.RemoteAddrs{
+	var remoteAPI *manager.RemoteAddrs
+	if n.config.ListenRemoteAPI != "" {
+		remoteAPI = &manager.RemoteAddrs{
 			ListenAddr:    n.config.ListenRemoteAPI,
 			ListenAddr:    n.config.ListenRemoteAPI,
 			AdvertiseAddr: n.config.AdvertiseRemoteAPI,
 			AdvertiseAddr: n.config.AdvertiseRemoteAPI,
-		},
+		}
+	}
+
+	remoteAddr, _ := n.remotes.Select(n.NodeID())
+	m, err := manager.New(&manager.Config{
+		ForceNewCluster:  n.config.ForceNewCluster,
+		RemoteAPI:        remoteAPI,
 		ControlAPI:       n.config.ListenControlAPI,
 		ControlAPI:       n.config.ListenControlAPI,
 		SecurityConfig:   securityConfig,
 		SecurityConfig:   securityConfig,
 		ExternalCAs:      n.config.ExternalCAs,
 		ExternalCAs:      n.config.ExternalCAs,

+ 49 - 97
vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go

@@ -19,9 +19,6 @@ import math "math"
 import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
 import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
 
 
 import strings "strings"
 import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
 import reflect "reflect"
 import reflect "reflect"
 
 
 import io "io"
 import io "io"
@@ -72,121 +69,77 @@ func init() {
 	proto.RegisterExtension(E_Deepcopy)
 	proto.RegisterExtension(E_Deepcopy)
 	proto.RegisterExtension(E_TlsAuthorization)
 	proto.RegisterExtension(E_TlsAuthorization)
 }
 }
-func (this *TLSAuthorization) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&plugin.TLSAuthorization{")
-	if this.Roles != nil {
-		s = append(s, "Roles: "+fmt.Sprintf("%#v", this.Roles)+",\n")
-	}
-	if this.Insecure != nil {
-		s = append(s, "Insecure: "+valueToGoStringPlugin(this.Insecure, "bool")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringPlugin(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringPlugin(m github_com_gogo_protobuf_proto.Message) string {
-	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
-	if e == nil {
-		return "nil"
-	}
-	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "})"
-	return s
-}
-func (m *TLSAuthorization) Marshal() (data []byte, err error) {
+func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 }
 
 
-func (m *TLSAuthorization) MarshalTo(data []byte) (int, error) {
+func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	var i int
 	_ = i
 	_ = i
 	var l int
 	var l int
 	_ = l
 	_ = l
 	if len(m.Roles) > 0 {
 	if len(m.Roles) > 0 {
 		for _, s := range m.Roles {
 		for _, s := range m.Roles {
-			data[i] = 0xa
+			dAtA[i] = 0xa
 			i++
 			i++
 			l = len(s)
 			l = len(s)
 			for l >= 1<<7 {
 			for l >= 1<<7 {
-				data[i] = uint8(uint64(l)&0x7f | 0x80)
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
 				l >>= 7
 				l >>= 7
 				i++
 				i++
 			}
 			}
-			data[i] = uint8(l)
+			dAtA[i] = uint8(l)
 			i++
 			i++
-			i += copy(data[i:], s)
+			i += copy(dAtA[i:], s)
 		}
 		}
 	}
 	}
 	if m.Insecure != nil {
 	if m.Insecure != nil {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
 		i++
 		if *m.Insecure {
 		if *m.Insecure {
-			data[i] = 1
+			dAtA[i] = 1
 		} else {
 		} else {
-			data[i] = 0
+			dAtA[i] = 0
 		}
 		}
 		i++
 		i++
 	}
 	}
 	if m.XXX_unrecognized != nil {
 	if m.XXX_unrecognized != nil {
-		i += copy(data[i:], m.XXX_unrecognized)
+		i += copy(dAtA[i:], m.XXX_unrecognized)
 	}
 	}
 	return i, nil
 	return i, nil
 }
 }
 
 
-func encodeFixed64Plugin(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 	return offset + 8
 }
 }
-func encodeFixed32Plugin(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 	return offset + 4
 }
 }
-func encodeVarintPlugin(data []byte, offset int, v uint64) int {
+func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		v >>= 7
 		offset++
 		offset++
 	}
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 	return offset + 1
 }
 }
 func (m *TLSAuthorization) Size() (n int) {
 func (m *TLSAuthorization) Size() (n int) {
@@ -240,8 +193,8 @@ func valueToStringPlugin(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 	return fmt.Sprintf("*%v", pv)
 }
 }
-func (m *TLSAuthorization) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *TLSAuthorization) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		preIndex := iNdEx
 		preIndex := iNdEx
@@ -253,7 +206,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -281,7 +234,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -296,7 +249,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 			if postIndex > l {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 			iNdEx = postIndex
 		case 2:
 		case 2:
 			if wireType != 0 {
 			if wireType != 0 {
@@ -310,7 +263,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 					return io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				v |= (int(b) & 0x7F) << shift
 				v |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -321,7 +274,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 			m.Insecure = &b
 			m.Insecure = &b
 		default:
 		default:
 			iNdEx = preIndex
 			iNdEx = preIndex
-			skippy, err := skipPlugin(data[iNdEx:])
+			skippy, err := skipPlugin(dAtA[iNdEx:])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -331,7 +284,7 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 			if (iNdEx + skippy) > l {
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 				return io.ErrUnexpectedEOF
 			}
 			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 			iNdEx += skippy
 		}
 		}
 	}
 	}
@@ -341,8 +294,8 @@ func (m *TLSAuthorization) Unmarshal(data []byte) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-func skipPlugin(data []byte) (n int, err error) {
-	l := len(data)
+func skipPlugin(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	iNdEx := 0
 	for iNdEx < l {
 	for iNdEx < l {
 		var wire uint64
 		var wire uint64
@@ -353,7 +306,7 @@ func skipPlugin(data []byte) (n int, err error) {
 			if iNdEx >= l {
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 				return 0, io.ErrUnexpectedEOF
 			}
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
 			if b < 0x80 {
@@ -371,7 +324,7 @@ func skipPlugin(data []byte) (n int, err error) {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
 				iNdEx++
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 					break
 				}
 				}
 			}
 			}
@@ -388,7 +341,7 @@ func skipPlugin(data []byte) (n int, err error) {
 				if iNdEx >= l {
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 					return 0, io.ErrUnexpectedEOF
 				}
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 				if b < 0x80 {
@@ -411,7 +364,7 @@ func skipPlugin(data []byte) (n int, err error) {
 					if iNdEx >= l {
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 						return 0, io.ErrUnexpectedEOF
 					}
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 					if b < 0x80 {
@@ -422,7 +375,7 @@ func skipPlugin(data []byte) (n int, err error) {
 				if innerWireType == 4 {
 				if innerWireType == 4 {
 					break
 					break
 				}
 				}
-				next, err := skipPlugin(data[start:])
+				next, err := skipPlugin(dAtA[start:])
 				if err != nil {
 				if err != nil {
 					return 0, err
 					return 0, err
 				}
 				}
@@ -449,7 +402,7 @@ var (
 func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
 func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
 
 
 var fileDescriptorPlugin = []byte{
 var fileDescriptorPlugin = []byte{
-	// 259 bytes of a gzipped FileDescriptorProto
+	// 254 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
 	0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4b, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
 	0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4b, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
 	0x82, 0xf0, 0x92, 0x4a, 0xd3, 0xf4, 0x20, 0xb2, 0x52, 0x0a, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9,
 	0x82, 0xf0, 0x92, 0x4a, 0xd3, 0xf4, 0x20, 0xb2, 0x52, 0x0a, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9,
@@ -463,8 +416,7 @@ var fileDescriptorPlugin = []byte{
 	0x58, 0x4a, 0x8a, 0x4a, 0x53, 0x83, 0xe0, 0x1a, 0xad, 0x2a, 0xb8, 0x04, 0x4b, 0x72, 0x8a, 0xe3,
 	0x58, 0x4a, 0x8a, 0x4a, 0x53, 0x83, 0xe0, 0x1a, 0xad, 0x2a, 0xb8, 0x04, 0x4b, 0x72, 0x8a, 0xe3,
 	0x13, 0x51, 0xdc, 0x22, 0x87, 0xc5, 0xb4, 0x92, 0x8c, 0xfc, 0x14, 0x98, 0x61, 0x2f, 0x9f, 0xf6,
 	0x13, 0x51, 0xdc, 0x22, 0x87, 0xc5, 0xb4, 0x92, 0x8c, 0xfc, 0x14, 0x98, 0x61, 0x2f, 0x9f, 0xf6,
 	0x2a, 0x2b, 0x30, 0x6a, 0x70, 0x1b, 0x69, 0xe8, 0x61, 0x0f, 0x03, 0x3d, 0x74, 0xef, 0x05, 0x09,
 	0x2a, 0x2b, 0x30, 0x6a, 0x70, 0x1b, 0x69, 0xe8, 0x61, 0x0f, 0x03, 0x3d, 0x74, 0xef, 0x05, 0x09,
-	0x94, 0xe4, 0x14, 0xa3, 0x88, 0x38, 0xc9, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3,
-	0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e,
-	0xf1, 0xc1, 0x23, 0x39, 0x46, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x04, 0x4e, 0xf8, 0x38, 0x6b,
-	0x01, 0x00, 0x00,
+	0x94, 0xe4, 0x14, 0xa3, 0x88, 0x38, 0x49, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43,
+	0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e,
+	0x11, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4c, 0x2c, 0xf3, 0x67, 0x01, 0x00, 0x00,
 }
 }

+ 2 - 8
vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go

@@ -1,9 +1,3 @@
-// Package ptypes is a copy of the golang/protobuf/ptypes that we'll need to
-// use with our regenerated ptypes until google gets their act together and
-// makes their "Well Known Types" actually usable by other parties.
-//
-// It is more likely that this issue will be resolved by gogo.
-//
-// Note that this is not a vendoring of the package. We have to change the
-// types to match the generated types.
+// Package ptypes provides utility functions for use with
+// gogo/protobuf/ptypes.
 package ptypes
 package ptypes

+ 3 - 121
vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go

@@ -1,135 +1,17 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
 package ptypes
 package ptypes
 
 
-// This file implements operations on google.protobuf.Timestamp.
-
 import (
 import (
-	"errors"
-	"fmt"
 	"time"
 	"time"
 
 
-	tspb "github.com/docker/swarmkit/api/timestamp"
-)
-
-const (
-	// Seconds field of the earliest valid Timestamp.
-	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	minValidSeconds = -62135596800
-	// Seconds field just after the latest valid Timestamp.
-	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	maxValidSeconds = 253402300800
+	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *tspb.Timestamp) error {
-	if ts == nil {
-		return errors.New("timestamp: nil Timestamp")
-	}
-	if ts.Seconds < minValidSeconds {
-		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
-	}
-	if ts.Seconds >= maxValidSeconds {
-		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
-	}
-	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
-		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
-	}
-	return nil
-}
-
-// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
-	// Don't return the zero value on error, because corresponds to a valid
-	// timestamp. Instead return whatever time.Unix gives us.
-	var t time.Time
-	if ts == nil {
-		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
-	} else {
-		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
-	}
-	return t, validateTimestamp(ts)
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
-	seconds := t.Unix()
-	nanos := int32(t.Sub(time.Unix(seconds, 0)))
-	ts := &tspb.Timestamp{
-		Seconds: seconds,
-		Nanos:   nanos,
-	}
-	if err := validateTimestamp(ts); err != nil {
-		return nil, err
-	}
-	return ts, nil
-}
-
 // MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto.
 // MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto.
 // It panics if input timestamp is invalid.
 // It panics if input timestamp is invalid.
-func MustTimestampProto(t time.Time) *tspb.Timestamp {
-	ts, err := TimestampProto(t)
+func MustTimestampProto(t time.Time) *gogotypes.Timestamp {
+	ts, err := gogotypes.TimestampProto(t)
 	if err != nil {
 	if err != nil {
 		panic(err.Error())
 		panic(err.Error())
 	}
 	}
 	return ts
 	return ts
 }
 }
-
-// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
-// Timestamps, it returns an error message in parentheses.
-func TimestampString(ts *tspb.Timestamp) string {
-	t, err := Timestamp(ts)
-	if err != nil {
-		return fmt.Sprintf("(%v)", err)
-	}
-	return t.Format(time.RFC3339Nano)
-}

+ 88 - 68
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go

@@ -524,6 +524,22 @@ var E_Castvalue = &proto.ExtensionDesc{
 	Tag:           "bytes,65009,opt,name=castvalue",
 	Tag:           "bytes,65009,opt,name=castvalue",
 }
 }
 
 
+var E_Stdtime = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65010,
+	Name:          "gogoproto.stdtime",
+	Tag:           "varint,65010,opt,name=stdtime",
+}
+
+var E_Stdduration = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65011,
+	Name:          "gogoproto.stdduration",
+	Tag:           "varint,65011,opt,name=stdduration",
+}
+
 func init() {
 func init() {
 	proto.RegisterExtension(E_GoprotoEnumPrefix)
 	proto.RegisterExtension(E_GoprotoEnumPrefix)
 	proto.RegisterExtension(E_GoprotoEnumStringer)
 	proto.RegisterExtension(E_GoprotoEnumStringer)
@@ -587,79 +603,83 @@ func init() {
 	proto.RegisterExtension(E_Casttype)
 	proto.RegisterExtension(E_Casttype)
 	proto.RegisterExtension(E_Castkey)
 	proto.RegisterExtension(E_Castkey)
 	proto.RegisterExtension(E_Castvalue)
 	proto.RegisterExtension(E_Castvalue)
+	proto.RegisterExtension(E_Stdtime)
+	proto.RegisterExtension(E_Stdduration)
 }
 }
 
 
 func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
 func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
 
 
 var fileDescriptorGogo = []byte{
 var fileDescriptorGogo = []byte{
-	// 1098 bytes of a gzipped FileDescriptorProto
+	// 1129 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
-	0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9,
-	0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87,
-	0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
+	0x14, 0x87, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, 0xe4,
+	0x9c, 0x22, 0x94, 0xb2, 0x22, 0xcb, 0xb1, 0x9c, 0x51, 0x10, 0x86, 0x91, 0x89, 0x03, 0x88, 0xc3,
+	0xa8, 0x67, 0xa6, 0xdc, 0x69, 0xe8, 0xee, 0x6a, 0xba, 0xaa, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
 	0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
 	0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
-	0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7,
-	0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e,
-	0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff,
-	0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20,
-	0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62,
-	0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3,
-	0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06,
-	0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18,
-	0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66,
-	0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31,
-	0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63,
-	0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35,
-	0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69,
-	0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb,
-	0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce,
-	0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb,
-	0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e,
-	0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2,
-	0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97,
-	0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf,
-	0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b,
-	0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a,
-	0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9,
-	0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc,
-	0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2,
-	0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7,
-	0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7,
-	0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83,
-	0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e,
-	0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52,
-	0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7,
-	0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d,
-	0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28,
-	0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c,
-	0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8,
-	0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68,
-	0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42,
-	0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4,
-	0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b,
-	0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35,
-	0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14,
-	0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6,
-	0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94,
-	0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a,
-	0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae,
-	0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c,
-	0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2,
-	0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1,
-	0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06,
-	0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7,
-	0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd,
-	0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d,
-	0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c,
-	0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16,
-	0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18,
-	0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab,
-	0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35,
-	0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f,
-	0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53,
-	0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87,
-	0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38,
-	0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae,
-	0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00,
-	0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
+	0x02, 0x84, 0xdd, 0x37, 0x5f, 0x50, 0x75, 0xbf, 0xd7, 0x53, 0xdd, 0x1e, 0xa9, 0x6a, 0x6e, 0xe3,
+	0x71, 0x7d, 0xdf, 0x54, 0xbf, 0x37, 0xf5, 0x7e, 0x53, 0x00, 0xbe, 0xf0, 0xc5, 0x52, 0x92, 0x0a,
+	0x25, 0x9a, 0x0d, 0xfd, 0x3a, 0x7f, 0x79, 0xe8, 0xb0, 0x2f, 0x84, 0x1f, 0xf2, 0xa3, 0xf9, 0x5f,
+	0xdd, 0x6c, 0xfb, 0x68, 0x9f, 0xcb, 0x5e, 0x1a, 0x24, 0x4a, 0xa4, 0xc5, 0x62, 0x76, 0x3f, 0xcc,
+	0xe3, 0xe2, 0x0e, 0x8f, 0xb3, 0xa8, 0x93, 0xa4, 0x7c, 0x3b, 0xb8, 0xd0, 0xbc, 0x63, 0xa9, 0x20,
+	0x97, 0x88, 0x5c, 0x5a, 0x8f, 0xb3, 0xe8, 0x81, 0x44, 0x05, 0x22, 0x96, 0x07, 0xaf, 0xff, 0x72,
+	0xf3, 0xe1, 0x9b, 0xee, 0x9e, 0xd8, 0x9c, 0x43, 0x54, 0xff, 0xaf, 0x9d, 0x83, 0x6c, 0x13, 0x6e,
+	0xad, 0xf8, 0xa4, 0x4a, 0x83, 0xd8, 0xe7, 0xa9, 0xc5, 0xf8, 0x03, 0x1a, 0xe7, 0x0d, 0xe3, 0x83,
+	0x88, 0xb2, 0x35, 0x98, 0x1e, 0xc5, 0xf5, 0x23, 0xba, 0xa6, 0xb8, 0x29, 0x69, 0xc1, 0x6c, 0x2e,
+	0xe9, 0x65, 0x52, 0x89, 0x28, 0xf6, 0x22, 0x6e, 0xd1, 0xfc, 0x94, 0x6b, 0x1a, 0x9b, 0x33, 0x1a,
+	0x5b, 0x2b, 0x29, 0x76, 0x16, 0x16, 0xf4, 0x3b, 0xe7, 0xbd, 0x30, 0xe3, 0xa6, 0xed, 0xc8, 0x50,
+	0xdb, 0x59, 0xbd, 0x8c, 0x94, 0x3f, 0x5f, 0x1a, 0xcb, 0x95, 0xf3, 0xa5, 0xc0, 0xf0, 0x1a, 0x9d,
+	0xf0, 0xb9, 0x52, 0x3c, 0x95, 0x1d, 0x2f, 0x0c, 0x87, 0x6c, 0xf2, 0x54, 0x10, 0x96, 0xc6, 0xcb,
+	0x37, 0xaa, 0x9d, 0x68, 0x15, 0xe4, 0x6a, 0x18, 0xb2, 0x2d, 0xb8, 0x6d, 0x48, 0x67, 0x1d, 0x9c,
+	0x57, 0xd0, 0xb9, 0xb0, 0xaf, 0xbb, 0x5a, 0xdb, 0x06, 0x7a, 0xbf, 0xec, 0x87, 0x83, 0xf3, 0x2d,
+	0x74, 0x36, 0x91, 0xa5, 0xb6, 0x68, 0xe3, 0xbd, 0x30, 0x77, 0x9e, 0xa7, 0x5d, 0x21, 0x79, 0x87,
+	0x3f, 0x91, 0x79, 0xa1, 0x83, 0xee, 0x2a, 0xea, 0x66, 0x11, 0x5c, 0xd7, 0x9c, 0x76, 0x1d, 0x87,
+	0x89, 0x6d, 0xaf, 0xc7, 0x1d, 0x14, 0xd7, 0x50, 0x31, 0xae, 0xd7, 0x6b, 0x74, 0x15, 0xa6, 0x7c,
+	0x51, 0x3c, 0x92, 0x03, 0xfe, 0x36, 0xe2, 0x93, 0xc4, 0xa0, 0x22, 0x11, 0x49, 0x16, 0x7a, 0xca,
+	0x65, 0x07, 0xef, 0x90, 0x82, 0x18, 0x54, 0x8c, 0x50, 0xd6, 0x77, 0x49, 0x21, 0x8d, 0x7a, 0xae,
+	0xc0, 0xa4, 0x88, 0xc3, 0x1d, 0x11, 0xbb, 0x6c, 0xe2, 0x3d, 0x34, 0x00, 0x22, 0x5a, 0xb0, 0x0c,
+	0x0d, 0xd7, 0x46, 0xbc, 0x8f, 0xf8, 0x04, 0xa7, 0x0e, 0xb4, 0x60, 0x96, 0x86, 0x4c, 0x20, 0x62,
+	0x07, 0xc5, 0x07, 0xa8, 0x98, 0x31, 0x30, 0x7c, 0x0c, 0xc5, 0xa5, 0xf2, 0xb9, 0x8b, 0xe4, 0x43,
+	0x7a, 0x0c, 0x44, 0xb0, 0x94, 0x5d, 0x1e, 0xf7, 0xce, 0xb9, 0x19, 0x3e, 0xa2, 0x52, 0x12, 0xa3,
+	0x15, 0x6b, 0x30, 0x1d, 0x79, 0xa9, 0x3c, 0xe7, 0x85, 0x4e, 0xed, 0xf8, 0x18, 0x1d, 0x53, 0x25,
+	0x84, 0x15, 0xc9, 0xe2, 0x51, 0x34, 0x9f, 0x50, 0x45, 0x0c, 0x0c, 0x8f, 0x9e, 0x54, 0x5e, 0x37,
+	0xe4, 0x9d, 0x51, 0x6c, 0x9f, 0xd2, 0xd1, 0x2b, 0xd8, 0x0d, 0xd3, 0xb8, 0x0c, 0x0d, 0x19, 0x5c,
+	0x74, 0xd2, 0x7c, 0x46, 0x9d, 0xce, 0x01, 0x0d, 0x3f, 0x02, 0xb7, 0x0f, 0x1d, 0xf5, 0x0e, 0xb2,
+	0xcf, 0x51, 0xb6, 0x38, 0x64, 0xdc, 0xe3, 0x48, 0x18, 0x55, 0xf9, 0x05, 0x8d, 0x04, 0x5e, 0x73,
+	0xb5, 0x61, 0x21, 0x8b, 0xa5, 0xb7, 0x3d, 0x5a, 0xd5, 0xbe, 0xa4, 0xaa, 0x15, 0x6c, 0xa5, 0x6a,
+	0x67, 0x60, 0x11, 0x8d, 0xa3, 0xf5, 0xf5, 0x2b, 0x1a, 0xac, 0x05, 0xbd, 0x55, 0xed, 0xee, 0xa3,
+	0x70, 0xa8, 0x2c, 0xe7, 0x05, 0xc5, 0x63, 0xa9, 0x99, 0x4e, 0xe4, 0x25, 0x0e, 0xe6, 0xeb, 0x68,
+	0xa6, 0x89, 0xbf, 0x5e, 0x0a, 0x36, 0xbc, 0x44, 0xcb, 0x1f, 0x86, 0x83, 0x24, 0xcf, 0xe2, 0x94,
+	0xf7, 0x84, 0x1f, 0x07, 0x17, 0x79, 0xdf, 0x41, 0xfd, 0x75, 0xad, 0x55, 0x5b, 0x06, 0xae, 0xcd,
+	0xa7, 0xe1, 0x96, 0xf2, 0xf7, 0x46, 0x27, 0x88, 0x12, 0x91, 0x2a, 0x8b, 0xf1, 0x1b, 0xea, 0x54,
+	0xc9, 0x9d, 0xce, 0x31, 0xb6, 0x0e, 0x33, 0xf9, 0x9f, 0xae, 0x5f, 0xc9, 0x6f, 0x51, 0x34, 0x3d,
+	0xa0, 0x70, 0x70, 0xf4, 0x44, 0x94, 0x78, 0xa9, 0xcb, 0xfc, 0xfb, 0x8e, 0x06, 0x07, 0x22, 0xc5,
+	0xb7, 0x6f, 0xb6, 0x96, 0xc4, 0xcd, 0xbb, 0xf6, 0x49, 0x36, 0xb8, 0x94, 0x9e, 0x5f, 0x7a, 0x9e,
+	0xdc, 0xc5, 0x33, 0x5b, 0x0d, 0x62, 0x76, 0x9f, 0x2e, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x4b, 0xbb,
+	0x65, 0x85, 0x2a, 0x69, 0xc9, 0x4e, 0xc1, 0x74, 0x25, 0x2a, 0xed, 0xaa, 0xa7, 0x50, 0x35, 0x65,
+	0x26, 0x25, 0x3b, 0x06, 0x63, 0x3a, 0xf6, 0xec, 0xf8, 0xd3, 0x88, 0xe7, 0xcb, 0xd9, 0x09, 0x98,
+	0xa0, 0xb8, 0xb3, 0xa3, 0xcf, 0x20, 0x5a, 0x22, 0x1a, 0xa7, 0xa8, 0xb3, 0xe3, 0xcf, 0x12, 0x4e,
+	0x88, 0xc6, 0xdd, 0x4b, 0xf8, 0xfd, 0xf3, 0x63, 0x38, 0xae, 0xa8, 0x76, 0xcb, 0x30, 0x8e, 0x19,
+	0x67, 0xa7, 0x9f, 0xc3, 0x0f, 0x27, 0x82, 0xdd, 0x03, 0x07, 0x1c, 0x0b, 0xfe, 0x02, 0xa2, 0xc5,
+	0x7a, 0xb6, 0x06, 0x93, 0x46, 0xae, 0xd9, 0xf1, 0x17, 0x11, 0x37, 0x29, 0xbd, 0x75, 0xcc, 0x35,
+	0xbb, 0xe0, 0x25, 0xda, 0x3a, 0x12, 0xba, 0x6c, 0x14, 0x69, 0x76, 0xfa, 0x65, 0xaa, 0x3a, 0x21,
+	0x6c, 0x05, 0x1a, 0xe5, 0x98, 0xb2, 0xf3, 0xaf, 0x20, 0x3f, 0x60, 0x74, 0x05, 0x8c, 0x31, 0x69,
+	0x57, 0xbc, 0x4a, 0x15, 0x30, 0x28, 0x7d, 0x8c, 0xea, 0xd1, 0x67, 0x37, 0xbd, 0x46, 0xc7, 0xa8,
+	0x96, 0x7c, 0xba, 0x9b, 0xf9, 0xb4, 0xb0, 0x2b, 0x5e, 0xa7, 0x6e, 0xe6, 0xeb, 0xf5, 0x36, 0xea,
+	0x59, 0x62, 0x77, 0xbc, 0x41, 0xdb, 0xa8, 0x45, 0x09, 0x6b, 0x43, 0x73, 0x7f, 0x8e, 0xd8, 0x7d,
+	0x6f, 0xa2, 0x6f, 0x6e, 0x5f, 0x8c, 0xb0, 0x87, 0x60, 0x71, 0x78, 0x86, 0xd8, 0xad, 0x97, 0x77,
+	0x6b, 0xbf, 0xfa, 0xcd, 0x08, 0x61, 0x67, 0x06, 0xbf, 0xfa, 0xcd, 0xfc, 0xb0, 0x6b, 0xaf, 0xec,
+	0x56, 0x2f, 0x76, 0x66, 0x7c, 0xb0, 0x55, 0x80, 0xc1, 0xe8, 0xb6, 0xbb, 0xae, 0xa2, 0xcb, 0x80,
+	0xf4, 0xd1, 0xc0, 0xc9, 0x6d, 0xe7, 0xaf, 0xd1, 0xd1, 0x40, 0x82, 0x2d, 0xc3, 0x44, 0x9c, 0x85,
+	0xa1, 0xfe, 0x72, 0x34, 0xef, 0x1c, 0x12, 0x13, 0x3c, 0xec, 0x13, 0xfb, 0xeb, 0x1e, 0x1e, 0x0c,
+	0x02, 0xd8, 0x31, 0x38, 0xc0, 0xa3, 0x2e, 0xef, 0xdb, 0xc8, 0xdf, 0xf6, 0x68, 0x20, 0xe8, 0xd5,
+	0x6c, 0x05, 0xa0, 0xb8, 0x34, 0xaa, 0x9d, 0xc4, 0xfa, 0xa9, 0xbf, 0xef, 0x15, 0x77, 0x50, 0x03,
+	0x19, 0x08, 0xf2, 0x5b, 0xa7, 0x45, 0x70, 0xa3, 0x2a, 0xc8, 0x2f, 0x9a, 0xc7, 0x61, 0xfc, 0x31,
+	0x29, 0x62, 0xe5, 0xf9, 0x36, 0xfa, 0x0f, 0xa4, 0x69, 0xbd, 0x2e, 0x58, 0x24, 0x52, 0xae, 0x3c,
+	0x5f, 0xda, 0xd8, 0x3f, 0x91, 0x2d, 0x01, 0x0d, 0xf7, 0x3c, 0xa9, 0x5c, 0x9e, 0xfb, 0x2f, 0x82,
+	0x09, 0xd0, 0x9b, 0xd6, 0xaf, 0x1f, 0xe7, 0x3b, 0x36, 0xf6, 0x6f, 0xda, 0x34, 0xae, 0x67, 0x27,
+	0xa0, 0xa1, 0x5f, 0xe6, 0xf7, 0x6d, 0x1b, 0xfc, 0x0f, 0xc2, 0x03, 0x42, 0x7f, 0xb2, 0x54, 0x7d,
+	0x15, 0xd8, 0x8b, 0xfd, 0x2f, 0x76, 0x9a, 0xd6, 0xb3, 0x55, 0x98, 0x94, 0xaa, 0xdf, 0xcf, 0x52,
+	0x2f, 0x1f, 0xfe, 0x16, 0xfc, 0xbf, 0xbd, 0xf2, 0x32, 0x57, 0x32, 0x27, 0x8f, 0xc0, 0x7c, 0x4f,
+	0x44, 0x75, 0xf0, 0x24, 0xb4, 0x44, 0x4b, 0xb4, 0xf3, 0x63, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0x3f, 0x9b, 0x2b, 0x54, 0xfc, 0x11, 0x00, 0x00,
 }
 }

+ 3 - 0
vendor/github.com/gogo/protobuf/gogoproto/gogo.proto

@@ -119,4 +119,7 @@ extend google.protobuf.FieldOptions {
 	optional string casttype = 65007;
 	optional string casttype = 65007;
 	optional string castkey = 65008;
 	optional string castkey = 65008;
 	optional string castvalue = 65009;
 	optional string castvalue = 65009;
+
+	optional bool stdtime = 65010;
+	optional bool stdduration = 65011;
 }
 }

+ 35 - 0
vendor/github.com/gogo/protobuf/gogoproto/helper.go

@@ -39,6 +39,14 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
 	return proto.GetBoolExtension(field.Options, E_Nullable, true)
 	return proto.GetBoolExtension(field.Options, E_Nullable, true)
 }
 }
 
 
+func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdtime, false)
+}
+
+func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdduration, false)
+}
+
 func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
 func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
 	nullable := IsNullable(field)
 	nullable := IsNullable(field)
 	if field.IsMessage() || IsCustomType(field) {
 	if field.IsMessage() || IsCustomType(field) {
@@ -83,6 +91,9 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
 }
 }
 
 
 func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
 func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Customtype)
 		v, err := proto.GetExtension(field.Options, E_Customtype)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -93,6 +104,9 @@ func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
 }
 }
 
 
 func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
 func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Casttype)
 		v, err := proto.GetExtension(field.Options, E_Casttype)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -103,6 +117,9 @@ func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
 }
 }
 
 
 func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
 func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Castkey)
 		v, err := proto.GetExtension(field.Options, E_Castkey)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -113,6 +130,9 @@ func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
 }
 }
 
 
 func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
 func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Castvalue)
 		v, err := proto.GetExtension(field.Options, E_Castvalue)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -147,6 +167,9 @@ func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool
 }
 }
 
 
 func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
 func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Customname)
 		v, err := proto.GetExtension(field.Options, E_Customname)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -157,6 +180,9 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
 }
 }
 
 
 func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
 func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_EnumCustomname)
 		v, err := proto.GetExtension(field.Options, E_EnumCustomname)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -167,6 +193,9 @@ func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
 }
 }
 
 
 func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
 func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
 		v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -177,6 +206,9 @@ func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) str
 }
 }
 
 
 func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
 func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Jsontag)
 		v, err := proto.GetExtension(field.Options, E_Jsontag)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {
@@ -187,6 +219,9 @@ func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
 }
 }
 
 
 func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
 func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
 	if field.Options != nil {
 	if field.Options != nil {
 		v, err := proto.GetExtension(field.Options, E_Moretags)
 		v, err := proto.GetExtension(field.Options, E_Moretags)
 		if err == nil && v.(*string) != nil {
 		if err == nil && v.(*string) != nil {

+ 104 - 8
vendor/github.com/gogo/protobuf/proto/decode.go

@@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
 // int32, int64, uint32, uint64, bool, and enum
 // int32, int64, uint32, uint64, bool, and enum
 // protocol buffer types.
 // protocol buffer types.
 func DecodeVarint(buf []byte) (x uint64, n int) {
 func DecodeVarint(buf []byte) (x uint64, n int) {
-	// x, n already 0
 	for shift := uint(0); shift < 64; shift += 7 {
 	for shift := uint(0); shift < 64; shift += 7 {
 		if n >= len(buf) {
 		if n >= len(buf) {
 			return 0, 0
 			return 0, 0
@@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
 	return 0, 0
 	return 0, 0
 }
 }
 
 
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	// x, err already 0
-
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
 	i := p.index
 	i := p.index
 	l := len(p.buf)
 	l := len(p.buf)
 
 
@@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
 	return
 	return
 }
 }
 
 
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
 // DecodeFixed64 reads a 64-bit integer from the Buffer.
 // DecodeFixed64 reads a 64-bit integer from the Buffer.
 // This is the format for the
 // This is the format for the
 // fixed64, sfixed64, and double protocol buffer types.
 // fixed64, sfixed64, and double protocol buffer types.
@@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
 // Buffer and places the decoded result in pb.  If the struct
 // Buffer and places the decoded result in pb.  If the struct
 // underlying pb does not match the data in the buffer, the results can be
 // underlying pb does not match the data in the buffer, the results can be
 // unpredictable.
 // unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
 func (p *Buffer) Unmarshal(pb Message) error {
 func (p *Buffer) Unmarshal(pb Message) error {
 	// If the object can unmarshal itself, let it.
 	// If the object can unmarshal itself, let it.
 	if u, ok := pb.(Unmarshaler); ok {
 	if u, ok := pb.(Unmarshaler); ok {

+ 17 - 19
vendor/github.com/docker/swarmkit/protobuf/ptypes/duration.go → vendor/github.com/gogo/protobuf/proto/duration.go

@@ -29,7 +29,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-package ptypes
+package proto
 
 
 // This file implements conversions between google.protobuf.Duration
 // This file implements conversions between google.protobuf.Duration
 // and time.Duration.
 // and time.Duration.
@@ -38,64 +38,62 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"time"
 	"time"
-
-	durpb "github.com/docker/swarmkit/api/duration"
 )
 )
 
 
 const (
 const (
-	// Range of a durpb.Duration in seconds, as specified in
+	// Range of a Duration in seconds, as specified in
 	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
 	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
 	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
 	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
 	minSeconds = -maxSeconds
 	minSeconds = -maxSeconds
 )
 )
 
 
-// validateDuration determines whether the durpb.Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid durpb.Duration
-// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// validateDuration determines whether the Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid Duration
+// may still be too large to fit into a time.Duration (the range of Duration
 // is about 10,000 years, and the range of time.Duration is about 290).
 // is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *durpb.Duration) error {
+func validateDuration(d *duration) error {
 	if d == nil {
 	if d == nil {
 		return errors.New("duration: nil Duration")
 		return errors.New("duration: nil Duration")
 	}
 	}
 	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
 	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
-		return fmt.Errorf("duration: %v: seconds out of range", d)
+		return fmt.Errorf("duration: %#v: seconds out of range", d)
 	}
 	}
 	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
 	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
-		return fmt.Errorf("duration: %v: nanos out of range", d)
+		return fmt.Errorf("duration: %#v: nanos out of range", d)
 	}
 	}
 	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
 	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
 	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
 	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
-		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+		return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-// Duration converts a durpb.Duration to a time.Duration. Duration
-// returns an error if the durpb.Duration is invalid or is too large to be
+// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
+// returns an error if the Duration is invalid or is too large to be
 // represented in a time.Duration.
 // represented in a time.Duration.
-func Duration(p *durpb.Duration) (time.Duration, error) {
+func durationFromProto(p *duration) (time.Duration, error) {
 	if err := validateDuration(p); err != nil {
 	if err := validateDuration(p); err != nil {
 		return 0, err
 		return 0, err
 	}
 	}
 	d := time.Duration(p.Seconds) * time.Second
 	d := time.Duration(p.Seconds) * time.Second
 	if int64(d/time.Second) != p.Seconds {
 	if int64(d/time.Second) != p.Seconds {
-		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
 	}
 	}
 	if p.Nanos != 0 {
 	if p.Nanos != 0 {
 		d += time.Duration(p.Nanos)
 		d += time.Duration(p.Nanos)
 		if (d < 0) != (p.Nanos < 0) {
 		if (d < 0) != (p.Nanos < 0) {
-			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+			return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
 		}
 		}
 	}
 	}
 	return d, nil
 	return d, nil
 }
 }
 
 
-// DurationProto converts a time.Duration to a durpb.Duration.
-func DurationProto(d time.Duration) *durpb.Duration {
+// DurationProto converts a time.Duration to a Duration.
+func durationProto(d time.Duration) *duration {
 	nanos := d.Nanoseconds()
 	nanos := d.Nanoseconds()
 	secs := nanos / 1e9
 	secs := nanos / 1e9
 	nanos -= secs * 1e9
 	nanos -= secs * 1e9
-	return &durpb.Duration{
+	return &duration{
 		Seconds: secs,
 		Seconds: secs,
 		Nanos:   int32(nanos),
 		Nanos:   int32(nanos),
 	}
 	}

+ 202 - 0
vendor/github.com/gogo/protobuf/proto/duration_gogo.go

@@ -0,0 +1,202 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
+
+type duration struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *duration) Reset()       { *m = duration{} }
+func (*duration) ProtoMessage()  {}
+func (*duration) String() string { return "duration<string>" }
+
+func init() {
+	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
+}
+
+func (o *Buffer) decDuration() (time.Duration, error) {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return 0, err
+	}
+	dproto := &duration{}
+	if err := Unmarshal(b, dproto); err != nil {
+		return 0, err
+	}
+	return durationFromProto(dproto)
+}
+
+func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
+	d, err := o.decDuration()
+	if err != nil {
+		return err
+	}
+	word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
+	return nil
+}
+
+func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
+	d, err := o.decDuration()
+	if err != nil {
+		return err
+	}
+	word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
+	return nil
+}
+
+func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
+	d, err := o.decDuration()
+	if err != nil {
+		return err
+	}
+	newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
+	setPtrCustomType(newBas, 0, &d)
+	return nil
+}
+
+func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
+	d, err := o.decDuration()
+	if err != nil {
+		return err
+	}
+	structPointer_Word64Slice(base, p.field).Append(uint64(d))
+	return nil
+}
+
+func size_duration(p *Properties, base structPointer) (n int) {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return 0
+	}
+	dur := structPointer_Interface(structp, durationType).(*time.Duration)
+	d := durationProto(*dur)
+	size := Size(d)
+	return size + sizeVarint(uint64(size)) + len(p.tagcode)
+}
+
+func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return ErrNil
+	}
+	dur := structPointer_Interface(structp, durationType).(*time.Duration)
+	d := durationProto(*dur)
+	data, err := Marshal(d)
+	if err != nil {
+		return err
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(data)
+	return nil
+}
+
+func size_ref_duration(p *Properties, base structPointer) (n int) {
+	dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
+	d := durationProto(*dur)
+	size := Size(d)
+	return size + sizeVarint(uint64(size)) + len(p.tagcode)
+}
+
+func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
+	dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
+	d := durationProto(*dur)
+	data, err := Marshal(d)
+	if err != nil {
+		return err
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(data)
+	return nil
+}
+
+func size_slice_duration(p *Properties, base structPointer) (n int) {
+	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
+	durs := *pdurs
+	for i := 0; i < len(durs); i++ {
+		if durs[i] == nil {
+			return 0
+		}
+		dproto := durationProto(*durs[i])
+		size := Size(dproto)
+		n += len(p.tagcode) + size + sizeVarint(uint64(size))
+	}
+	return n
+}
+
+func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
+	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
+	durs := *pdurs
+	for i := 0; i < len(durs); i++ {
+		if durs[i] == nil {
+			return errRepeatedHasNil
+		}
+		dproto := durationProto(*durs[i])
+		data, err := Marshal(dproto)
+		if err != nil {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+	}
+	return nil
+}
+
+func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
+	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
+	durs := *pdurs
+	for i := 0; i < len(durs); i++ {
+		dproto := durationProto(durs[i])
+		size := Size(dproto)
+		n += len(p.tagcode) + size + sizeVarint(uint64(size))
+	}
+	return n
+}
+
+func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
+	pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
+	durs := *pdurs
+	for i := 0; i < len(durs); i++ {
+		dproto := durationProto(durs[i])
+		data, err := Marshal(dproto)
+		if err != nil {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+	}
+	return nil
+}

+ 3 - 11
vendor/github.com/gogo/protobuf/proto/encode.go

@@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
 	}
 	}
 	p := NewBuffer(nil)
 	p := NewBuffer(nil)
 	err := p.Marshal(pb)
 	err := p.Marshal(pb)
-	var state errorState
-	if err != nil && !state.shouldContinue(err, nil) {
-		return nil, err
-	}
 	if p.buf == nil && err == nil {
 	if p.buf == nil && err == nil {
 		// Return a non-nil slice on success.
 		// Return a non-nil slice on success.
 		return []byte{}, nil
 		return []byte{}, nil
@@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
 	// Can the object marshal itself?
 	// Can the object marshal itself?
 	if m, ok := pb.(Marshaler); ok {
 	if m, ok := pb.(Marshaler); ok {
 		data, err := m.Marshal()
 		data, err := m.Marshal()
-		if err != nil {
-			return err
-		}
 		p.buf = append(p.buf, data...)
 		p.buf = append(p.buf, data...)
-		return nil
+		return err
 	}
 	}
 
 
 	t, base, err := getbase(pb)
 	t, base, err := getbase(pb)
@@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error {
 	}
 	}
 
 
 	if collectStats {
 	if collectStats {
-		stats.Encode++
+		(stats).Encode++ // Parens are to work around a goimports bug.
 	}
 	}
 
 
 	if len(p.buf) > maxMarshalSize {
 	if len(p.buf) > maxMarshalSize {
@@ -309,7 +302,7 @@ func Size(pb Message) (n int) {
 	}
 	}
 
 
 	if collectStats {
 	if collectStats {
-		stats.Size++
+		(stats).Size++ // Parens are to work around a goimports bug.
 	}
 	}
 
 
 	return
 	return
@@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
 		if p.isMarshaler {
 		if p.isMarshaler {
 			m := structPointer_Interface(structp, p.stype).(Marshaler)
 			m := structPointer_Interface(structp, p.stype).(Marshaler)
 			data, _ := m.Marshal()
 			data, _ := m.Marshal()
-			n += len(p.tagcode)
 			n += sizeRawBytes(data)
 			n += sizeRawBytes(data)
 			continue
 			continue
 		}
 		}

+ 6 - 2
vendor/github.com/gogo/protobuf/proto/equal.go

@@ -54,13 +54,17 @@ Equality is defined in this way:
     in a proto3 .proto file, fields are not "set"; specifically,
     in a proto3 .proto file, fields are not "set"; specifically,
     zero length proto3 "bytes" fields are equal (nil == {}).
     zero length proto3 "bytes" fields are equal (nil == {}).
   - Two repeated fields are equal iff their lengths are the same,
   - Two repeated fields are equal iff their lengths are the same,
-    and their corresponding elements are equal (a "bytes" field,
-    although represented by []byte, is not a repeated field)
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
   - Two unset fields are equal.
   - Two unset fields are equal.
   - Two unknown field sets are equal if their current
   - Two unknown field sets are equal if their current
     encoded state is equal.
     encoded state is equal.
   - Two extension sets are equal iff they have corresponding
   - Two extension sets are equal iff they have corresponding
     elements that are pairwise equal.
     elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
   - Every other combination of things are not equal.
   - Every other combination of things are not equal.
 
 
 The return value is undefined if a and b are not protocol buffers.
 The return value is undefined if a and b are not protocol buffers.

+ 3 - 0
vendor/github.com/gogo/protobuf/proto/extensions.go

@@ -587,6 +587,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
 	registeredExtensions := RegisteredExtensions(pb)
 	registeredExtensions := RegisteredExtensions(pb)
 
 
 	emap, mu := epb.extensionsRead()
 	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
 	mu.Lock()
 	mu.Lock()
 	defer mu.Unlock()
 	defer mu.Unlock()
 	extensions := make([]*ExtensionDesc, 0, len(emap))
 	extensions := make([]*ExtensionDesc, 0, len(emap))

+ 1 - 1
vendor/github.com/gogo/protobuf/proto/lib.go

@@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
 // temporary Buffer and are fine for most applications.
 // temporary Buffer and are fine for most applications.
 type Buffer struct {
 type Buffer struct {
 	buf   []byte // encode/decode byte stream
 	buf   []byte // encode/decode byte stream
-	index int    // write point
+	index int    // read point
 
 
 	// pools of basic types to amortize allocation.
 	// pools of basic types to amortize allocation.
 	bools   []bool
 	bools   []bool

+ 34 - 6
vendor/github.com/gogo/protobuf/proto/properties.go

@@ -190,10 +190,11 @@ type Properties struct {
 	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
 	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
 	oneof    bool   // whether this is a oneof field
 	oneof    bool   // whether this is a oneof field
 
 
-	Default    string // default value
-	HasDefault bool   // whether an explicit default was provided
-	CustomType string
-	def_uint64 uint64
+	Default     string // default value
+	HasDefault  bool   // whether an explicit default was provided
+	CustomType  string
+	StdTime     bool
+	StdDuration bool
 
 
 	enc           encoder
 	enc           encoder
 	valEnc        valueEncoder // set for bool and numeric types only
 	valEnc        valueEncoder // set for bool and numeric types only
@@ -340,6 +341,10 @@ func (p *Properties) Parse(s string) {
 			p.OrigName = strings.Split(f, "=")[1]
 			p.OrigName = strings.Split(f, "=")[1]
 		case strings.HasPrefix(f, "customtype="):
 		case strings.HasPrefix(f, "customtype="):
 			p.CustomType = strings.Split(f, "=")[1]
 			p.CustomType = strings.Split(f, "=")[1]
+		case f == "stdtime":
+			p.StdTime = true
+		case f == "stdduration":
+			p.StdDuration = true
 		}
 		}
 	}
 	}
 }
 }
@@ -355,11 +360,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
 	p.enc = nil
 	p.enc = nil
 	p.dec = nil
 	p.dec = nil
 	p.size = nil
 	p.size = nil
-	if len(p.CustomType) > 0 {
+	isMap := typ.Kind() == reflect.Map
+	if len(p.CustomType) > 0 && !isMap {
 		p.setCustomEncAndDec(typ)
 		p.setCustomEncAndDec(typ)
 		p.setTag(lockGetProp)
 		p.setTag(lockGetProp)
 		return
 		return
 	}
 	}
+	if p.StdTime && !isMap {
+		p.setTimeEncAndDec(typ)
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdDuration && !isMap {
+		p.setDurationEncAndDec(typ)
+		p.setTag(lockGetProp)
+		return
+	}
 	switch t1 := typ; t1.Kind() {
 	switch t1 := typ; t1.Kind() {
 	default:
 	default:
 		fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
 		fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
@@ -630,6 +646,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
 			// so we need encoders for the pointer to this type.
 			// so we need encoders for the pointer to this type.
 			vtype = reflect.PtrTo(vtype)
 			vtype = reflect.PtrTo(vtype)
 		}
 		}
+
+		p.mvalprop.CustomType = p.CustomType
+		p.mvalprop.StdDuration = p.StdDuration
+		p.mvalprop.StdTime = p.StdTime
 		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
 		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
 	}
 	}
 	p.setTag(lockGetProp)
 	p.setTag(lockGetProp)
@@ -920,7 +940,15 @@ func RegisterType(x Message, name string) {
 }
 }
 
 
 // MessageName returns the fully-qualified proto name for the given message type.
 // MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
 
 
 // MessageType returns the message type (pointer to struct) for a named message.
 // MessageType returns the message type (pointer to struct) for a named message.
 func MessageType(name string) reflect.Type { return protoTypes[name] }
 func MessageType(name string) reflect.Type { return protoTypes[name] }

+ 45 - 0
vendor/github.com/gogo/protobuf/proto/properties_gogo.go

@@ -51,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
 	}
 	}
 }
 }
 
 
+func (p *Properties) setDurationEncAndDec(typ reflect.Type) {
+	if p.Repeated {
+		if typ.Elem().Kind() == reflect.Ptr {
+			p.enc = (*Buffer).enc_slice_duration
+			p.dec = (*Buffer).dec_slice_duration
+			p.size = size_slice_duration
+		} else {
+			p.enc = (*Buffer).enc_slice_ref_duration
+			p.dec = (*Buffer).dec_slice_ref_duration
+			p.size = size_slice_ref_duration
+		}
+	} else if typ.Kind() == reflect.Ptr {
+		p.enc = (*Buffer).enc_duration
+		p.dec = (*Buffer).dec_duration
+		p.size = size_duration
+	} else {
+		p.enc = (*Buffer).enc_ref_duration
+		p.dec = (*Buffer).dec_ref_duration
+		p.size = size_ref_duration
+	}
+}
+
+func (p *Properties) setTimeEncAndDec(typ reflect.Type) {
+	if p.Repeated {
+		if typ.Elem().Kind() == reflect.Ptr {
+			p.enc = (*Buffer).enc_slice_time
+			p.dec = (*Buffer).dec_slice_time
+			p.size = size_slice_time
+		} else {
+			p.enc = (*Buffer).enc_slice_ref_time
+			p.dec = (*Buffer).dec_slice_ref_time
+			p.size = size_slice_ref_time
+		}
+	} else if typ.Kind() == reflect.Ptr {
+		p.enc = (*Buffer).enc_time
+		p.dec = (*Buffer).dec_time
+		p.size = size_time
+	} else {
+		p.enc = (*Buffer).enc_ref_time
+		p.dec = (*Buffer).dec_ref_time
+		p.size = size_ref_time
+	}
+
+}
+
 func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
 func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
 	t2 := typ.Elem()
 	t2 := typ.Elem()
 	p.sstype = typ
 	p.sstype = typ

+ 145 - 32
vendor/github.com/gogo/protobuf/proto/text.go

@@ -51,6 +51,7 @@ import (
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
+	"time"
 )
 )
 
 
 var (
 var (
@@ -181,7 +182,93 @@ type raw interface {
 	Bytes() []byte
 	Bytes() []byte
 }
 }
 
 
-func writeStruct(w *textWriter, sv reflect.Value) error {
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
 	st := sv.Type()
 	st := sv.Type()
 	sprops := GetProperties(st)
 	sprops := GetProperties(st)
 	for i := 0; i < sv.NumField(); i++ {
 	for i := 0; i < sv.NumField(); i++ {
@@ -234,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 					continue
 					continue
 				}
 				}
 				if len(props.Enum) > 0 {
 				if len(props.Enum) > 0 {
-					if err := writeEnum(w, v, props); err != nil {
+					if err := tm.writeEnum(w, v, props); err != nil {
 						return err
 						return err
 					}
 					}
-				} else if err := writeAny(w, v, props); err != nil {
+				} else if err := tm.writeAny(w, v, props); err != nil {
 					return err
 					return err
 				}
 				}
 				if err := w.WriteByte('\n'); err != nil {
 				if err := w.WriteByte('\n'); err != nil {
@@ -279,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 						return err
 						return err
 					}
 					}
 				}
 				}
-				if err := writeAny(w, key, props.mkeyprop); err != nil {
+				if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
 					return err
 					return err
 				}
 				}
 				if err := w.WriteByte('\n'); err != nil {
 				if err := w.WriteByte('\n'); err != nil {
@@ -296,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 							return err
 							return err
 						}
 						}
 					}
 					}
-					if err := writeAny(w, val, props.mvalprop); err != nil {
+					if err := tm.writeAny(w, val, props.mvalprop); err != nil {
 						return err
 						return err
 					}
 					}
 					if err := w.WriteByte('\n'); err != nil {
 					if err := w.WriteByte('\n'); err != nil {
@@ -368,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 		}
 		}
 
 
 		if len(props.Enum) > 0 {
 		if len(props.Enum) > 0 {
-			if err := writeEnum(w, fv, props); err != nil {
+			if err := tm.writeEnum(w, fv, props); err != nil {
 				return err
 				return err
 			}
 			}
-		} else if err := writeAny(w, fv, props); err != nil {
+		} else if err := tm.writeAny(w, fv, props); err != nil {
 			return err
 			return err
 		}
 		}
 
 
@@ -389,7 +476,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 		pv.Elem().Set(sv)
 		pv.Elem().Set(sv)
 	}
 	}
 	if pv.Type().Implements(extensionRangeType) {
 	if pv.Type().Implements(extensionRangeType) {
-		if err := writeExtensions(w, pv); err != nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -419,20 +506,45 @@ func writeRaw(w *textWriter, b []byte) error {
 }
 }
 
 
 // writeAny writes an arbitrary field.
 // writeAny writes an arbitrary field.
-func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
 	v = reflect.Indirect(v)
 	v = reflect.Indirect(v)
 
 
-	if props != nil && len(props.CustomType) > 0 {
-		custom, ok := v.Interface().(Marshaler)
-		if ok {
-			data, err := custom.Marshal()
+	if props != nil {
+		if len(props.CustomType) > 0 {
+			custom, ok := v.Interface().(Marshaler)
+			if ok {
+				data, err := custom.Marshal()
+				if err != nil {
+					return err
+				}
+				if err := writeString(w, string(data)); err != nil {
+					return err
+				}
+				return nil
+			}
+		} else if props.StdTime {
+			t, ok := v.Interface().(time.Time)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
+			}
+			tproto, err := timestampProto(t)
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
-			if err := writeString(w, string(data)); err != nil {
-				return err
+			props.StdTime = false
+			err = tm.writeAny(w, reflect.ValueOf(tproto), props)
+			props.StdTime = true
+			return err
+		} else if props.StdDuration {
+			d, ok := v.Interface().(time.Duration)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
 			}
 			}
-			return nil
+			dproto := durationProto(d)
+			props.StdDuration = false
+			err := tm.writeAny(w, reflect.ValueOf(dproto), props)
+			props.StdDuration = true
+			return err
 		}
 		}
 	}
 	}
 
 
@@ -482,15 +594,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
 			}
 			}
 		}
 		}
 		w.indent()
 		w.indent()
-		if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
-			text, err := tm.MarshalText()
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
 			if _, err = w.Write(text); err != nil {
 			if _, err = w.Write(text); err != nil {
 				return err
 				return err
 			}
 			}
-		} else if err := writeStruct(w, v); err != nil {
+		} else if err := tm.writeStruct(w, v); err != nil {
 			return err
 			return err
 		}
 		}
 		w.unindent()
 		w.unindent()
@@ -634,7 +746,7 @@ func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
 
 
 // writeExtensions writes all the extensions in pv.
 // writeExtensions writes all the extensions in pv.
 // pv is assumed to be a pointer to a protocol message struct that is extendable.
 // pv is assumed to be a pointer to a protocol message struct that is extendable.
-func writeExtensions(w *textWriter, pv reflect.Value) error {
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
 	emap := extensionMaps[pv.Type().Elem()]
 	emap := extensionMaps[pv.Type().Elem()]
 	e := pv.Interface().(Message)
 	e := pv.Interface().(Message)
 
 
@@ -689,13 +801,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
 
 
 		// Repeated extensions will appear as a slice.
 		// Repeated extensions will appear as a slice.
 		if !desc.repeated() {
 		if !desc.repeated() {
-			if err := writeExtension(w, desc.Name, pb); err != nil {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
 				return err
 				return err
 			}
 			}
 		} else {
 		} else {
 			v := reflect.ValueOf(pb)
 			v := reflect.ValueOf(pb)
 			for i := 0; i < v.Len(); i++ {
 			for i := 0; i < v.Len(); i++ {
-				if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
 					return err
 					return err
 				}
 				}
 			}
 			}
@@ -704,7 +816,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
 	return nil
 	return nil
 }
 }
 
 
-func writeExtension(w *textWriter, name string, pb interface{}) error {
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
 	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
 	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
 		return err
 		return err
 	}
 	}
@@ -713,7 +825,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
 		return err
 		return err
 	}
 	}
 	if err := w.WriteByte('\n'); err != nil {
 	if err := w.WriteByte('\n'); err != nil {
@@ -740,12 +852,13 @@ func (w *textWriter) writeIndent() {
 
 
 // TextMarshaler is a configurable text format marshaler.
 // TextMarshaler is a configurable text format marshaler.
 type TextMarshaler struct {
 type TextMarshaler struct {
-	Compact bool // use compact text format (one line).
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
 }
 }
 
 
 // Marshal writes a given protocol buffer in text format.
 // Marshal writes a given protocol buffer in text format.
 // The only errors returned are from w.
 // The only errors returned are from w.
-func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
 	val := reflect.ValueOf(pb)
 	val := reflect.ValueOf(pb)
 	if pb == nil || val.IsNil() {
 	if pb == nil || val.IsNil() {
 		w.Write([]byte("<nil>"))
 		w.Write([]byte("<nil>"))
@@ -760,11 +873,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
 	aw := &textWriter{
 	aw := &textWriter{
 		w:        ww,
 		w:        ww,
 		complete: true,
 		complete: true,
-		compact:  m.Compact,
+		compact:  tm.Compact,
 	}
 	}
 
 
-	if tm, ok := pb.(encoding.TextMarshaler); ok {
-		text, err := tm.MarshalText()
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -778,7 +891,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
 	}
 	}
 	// Dereference the received pointer so we don't have outer < and >.
 	// Dereference the received pointer so we don't have outer < and >.
 	v := reflect.Indirect(val)
 	v := reflect.Indirect(val)
-	if err := writeStruct(aw, v); err != nil {
+	if err := tm.writeStruct(aw, v); err != nil {
 		return err
 		return err
 	}
 	}
 	if bw != nil {
 	if bw != nil {
@@ -788,9 +901,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
 }
 }
 
 
 // Text is the same as Marshal, but returns the string directly.
 // Text is the same as Marshal, but returns the string directly.
-func (m *TextMarshaler) Text(pb Message) string {
+func (tm *TextMarshaler) Text(pb Message) string {
 	var buf bytes.Buffer
 	var buf bytes.Buffer
-	m.Marshal(&buf, pb)
+	tm.Marshal(&buf, pb)
 	return buf.String()
 	return buf.String()
 }
 }
 
 

+ 3 - 3
vendor/github.com/gogo/protobuf/proto/text_gogo.go

@@ -33,10 +33,10 @@ import (
 	"reflect"
 	"reflect"
 )
 )
 
 
-func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
 	m, ok := enumStringMaps[props.Enum]
 	m, ok := enumStringMaps[props.Enum]
 	if !ok {
 	if !ok {
-		if err := writeAny(w, v, props); err != nil {
+		if err := tm.writeAny(w, v, props); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -48,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
 	}
 	}
 	s, ok := m[key]
 	s, ok := m[key]
 	if !ok {
 	if !ok {
-		if err := writeAny(w, v, props); err != nil {
+		if err := tm.writeAny(w, v, props); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}

+ 175 - 20
vendor/github.com/gogo/protobuf/proto/text_parser.go

@@ -46,9 +46,13 @@ import (
 	"reflect"
 	"reflect"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+	"time"
 	"unicode/utf8"
 	"unicode/utf8"
 )
 )
 
 
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
 type ParseError struct {
 type ParseError struct {
 	Message string
 	Message string
 	Line    int // 1-based line number
 	Line    int // 1-based line number
@@ -168,7 +172,7 @@ func (p *textParser) advance() {
 	p.cur.offset, p.cur.line = p.offset, p.line
 	p.cur.offset, p.cur.line = p.offset, p.line
 	p.cur.unquoted = ""
 	p.cur.unquoted = ""
 	switch p.s[0] {
 	switch p.s[0] {
-	case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
 		// Single symbol
 		// Single symbol
 		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
 		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
 	case '"', '\'':
 	case '"', '\'':
@@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 	fieldSet := make(map[string]bool)
 	fieldSet := make(map[string]bool)
 	// A struct is a sequence of "name: value", terminated by one of
 	// A struct is a sequence of "name: value", terminated by one of
 	// '>' or '}', or the end of the input.  A name may also be
 	// '>' or '}', or the end of the input.  A name may also be
-	// "[extension]".
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
 	for {
 	for {
 		tok := p.next()
 		tok := p.next()
 		if tok.err != nil {
 		if tok.err != nil {
@@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 			break
 			break
 		}
 		}
 		if tok.value == "[" {
 		if tok.value == "[" {
-			// Looks like an extension.
+			// Looks like an extension or an Any.
 			//
 			//
 			// TODO: Check whether we need to handle
 			// TODO: Check whether we need to handle
 			// namespace rooted names (e.g. ".something.Foo").
 			// namespace rooted names (e.g. ".something.Foo").
-			tok = p.next()
-			if tok.err != nil {
-				return tok.err
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
 			}
 			}
+
 			var desc *ExtensionDesc
 			var desc *ExtensionDesc
 			// This could be faster, but it's functional.
 			// This could be faster, but it's functional.
 			// TODO: Do something smarter than a linear scan.
 			// TODO: Do something smarter than a linear scan.
 			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
 			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
-				if d.Name == tok.value {
+				if d.Name == extName {
 					desc = d
 					desc = d
 					break
 					break
 				}
 				}
 			}
 			}
 			if desc == nil {
 			if desc == nil {
-				return p.errorf("unrecognized extension %q", tok.value)
-			}
-			// Check the extension terminator.
-			tok = p.next()
-			if tok.err != nil {
-				return tok.err
-			}
-			if tok.value != "]" {
-				return p.errorf("unrecognized extension terminator %q", tok.value)
+				return p.errorf("unrecognized extension %q", extName)
 			}
 			}
 
 
 			props := &Properties{}
 			props := &Properties{}
@@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 			props = oop.Prop
 			props = oop.Prop
 			nv := reflect.New(oop.Type.Elem())
 			nv := reflect.New(oop.Type.Elem())
 			dst = nv.Elem().Field(0)
 			dst = nv.Elem().Field(0)
-			sv.Field(oop.Field).Set(nv)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
 		}
 		}
 		if !dst.IsValid() {
 		if !dst.IsValid() {
 			return p.errorf("unknown field name %q in %v", name, st)
 			return p.errorf("unknown field name %q in %v", name, st)
@@ -657,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 	return reqFieldErr
 	return reqFieldErr
 }
 }
 
 
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
 // consumeOptionalSeparator consumes an optional semicolon or comma.
 // consumeOptionalSeparator consumes an optional semicolon or comma.
 // It is used in readStruct to provide backward compatibility.
 // It is used in readStruct to provide backward compatibility.
 func (p *textParser) consumeOptionalSeparator() error {
 func (p *textParser) consumeOptionalSeparator() error {
@@ -717,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
 		}
 		}
 		return nil
 		return nil
 	}
 	}
+	if props.StdTime {
+		fv := v
+		p.back()
+		props.StdTime = false
+		tproto := &timestamp{}
+		err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
+		props.StdTime = true
+		if err != nil {
+			return err
+		}
+		tim, err := timestampFromProto(tproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ts := fv.Interface().([]*time.Time)
+					ts = append(ts, &tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				} else {
+					ts := fv.Interface().([]time.Time)
+					ts = append(ts, tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&tim))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
+		}
+		return nil
+	}
+	if props.StdDuration {
+		fv := v
+		p.back()
+		props.StdDuration = false
+		dproto := &duration{}
+		err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
+		props.StdDuration = true
+		if err != nil {
+			return err
+		}
+		dur, err := durationFromProto(dproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ds := fv.Interface().([]*time.Duration)
+					ds = append(ds, &dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				} else {
+					ds := fv.Interface().([]time.Duration)
+					ds = append(ds, dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&dur))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
+		}
+		return nil
+	}
 	switch fv := v; fv.Kind() {
 	switch fv := v; fv.Kind() {
 	case reflect.Slice:
 	case reflect.Slice:
 		at := v.Type()
 		at := v.Type()
@@ -759,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
 		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
 		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
 		return p.readAny(fv.Index(fv.Len()-1), props)
 		return p.readAny(fv.Index(fv.Len()-1), props)
 	case reflect.Bool:
 	case reflect.Bool:
-		// Either "true", "false", 1 or 0.
+		// true/1/t/True or false/f/0/False.
 		switch tok.value {
 		switch tok.value {
-		case "true", "1":
+		case "true", "1", "t", "True":
 			fv.SetBool(true)
 			fv.SetBool(true)
 			return nil
 			return nil
-		case "false", "0":
+		case "false", "0", "f", "False":
 			fv.SetBool(false)
 			fv.SetBool(false)
 			return nil
 			return nil
 		}
 		}

+ 113 - 0
vendor/github.com/gogo/protobuf/proto/timestamp.go

@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func timestampFromProto(ts *timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func timestampProto(t time.Time) (*timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}

+ 227 - 0
vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go

@@ -0,0 +1,227 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+
+type timestamp struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *timestamp) Reset()       { *m = timestamp{} }
+func (*timestamp) ProtoMessage()  {}
+func (*timestamp) String() string { return "timestamp<string>" }
+
+func init() {
+	RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
+}
+
+func (o *Buffer) decTimestamp() (time.Time, error) {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return time.Time{}, err
+	}
+	tproto := &timestamp{}
+	if err := Unmarshal(b, tproto); err != nil {
+		return time.Time{}, err
+	}
+	return timestampFromProto(tproto)
+}
+
+func (o *Buffer) dec_time(p *Properties, base structPointer) error {
+	t, err := o.decTimestamp()
+	if err != nil {
+		return err
+	}
+	setPtrCustomType(base, p.field, &t)
+	return nil
+}
+
+func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
+	t, err := o.decTimestamp()
+	if err != nil {
+		return err
+	}
+	setCustomType(base, p.field, &t)
+	return nil
+}
+
+func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
+	t, err := o.decTimestamp()
+	if err != nil {
+		return err
+	}
+	newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
+	setPtrCustomType(newBas, 0, &t)
+	return nil
+}
+
+func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
+	t, err := o.decTimestamp()
+	if err != nil {
+		return err
+	}
+	newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
+	setCustomType(newBas, 0, &t)
+	return nil
+}
+
+func size_time(p *Properties, base structPointer) (n int) {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return 0
+	}
+	tim := structPointer_Interface(structp, timeType).(*time.Time)
+	t, err := timestampProto(*tim)
+	if err != nil {
+		return 0
+	}
+	size := Size(t)
+	return size + sizeVarint(uint64(size)) + len(p.tagcode)
+}
+
+func (o *Buffer) enc_time(p *Properties, base structPointer) error {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return ErrNil
+	}
+	tim := structPointer_Interface(structp, timeType).(*time.Time)
+	t, err := timestampProto(*tim)
+	if err != nil {
+		return err
+	}
+	data, err := Marshal(t)
+	if err != nil {
+		return err
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(data)
+	return nil
+}
+
+func size_ref_time(p *Properties, base structPointer) (n int) {
+	tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
+	t, err := timestampProto(*tim)
+	if err != nil {
+		return 0
+	}
+	size := Size(t)
+	return size + sizeVarint(uint64(size)) + len(p.tagcode)
+}
+
+func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
+	tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
+	t, err := timestampProto(*tim)
+	if err != nil {
+		return err
+	}
+	data, err := Marshal(t)
+	if err != nil {
+		return err
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(data)
+	return nil
+}
+
+func size_slice_time(p *Properties, base structPointer) (n int) {
+	ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
+	tims := *ptims
+	for i := 0; i < len(tims); i++ {
+		if tims[i] == nil {
+			return 0
+		}
+		tproto, err := timestampProto(*tims[i])
+		if err != nil {
+			return 0
+		}
+		size := Size(tproto)
+		n += len(p.tagcode) + size + sizeVarint(uint64(size))
+	}
+	return n
+}
+
+func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
+	ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
+	tims := *ptims
+	for i := 0; i < len(tims); i++ {
+		if tims[i] == nil {
+			return errRepeatedHasNil
+		}
+		tproto, err := timestampProto(*tims[i])
+		if err != nil {
+			return err
+		}
+		data, err := Marshal(tproto)
+		if err != nil {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+	}
+	return nil
+}
+
+func size_slice_ref_time(p *Properties, base structPointer) (n int) {
+	ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
+	tims := *ptims
+	for i := 0; i < len(tims); i++ {
+		tproto, err := timestampProto(tims[i])
+		if err != nil {
+			return 0
+		}
+		size := Size(tproto)
+		n += len(p.tagcode) + size + sizeVarint(uint64(size))
+	}
+	return n
+}
+
+func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
+	ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
+	tims := *ptims
+	for i := 0; i < len(tims); i++ {
+		tproto, err := timestampProto(tims[i])
+		if err != nil {
+			return err
+		}
+		data, err := Marshal(tproto)
+		if err != nil {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+	}
+	return nil
+}

+ 140 - 0
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto

@@ -0,0 +1,140 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "types";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name whose content describes the type of the
+  // serialized protocol buffer message.
+  //
+  // For URLs which use the scheme `http`, `https`, or no scheme, the
+  // following restrictions and interpretations apply:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * The last segment of the URL's path must represent the fully
+  //   qualified name of the type (as in `path/google.protobuf.Duration`).
+  //   The name should be in a canonical form (e.g., leading "." is
+  //   not accepted).
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}

+ 38 - 4
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto

@@ -45,6 +45,7 @@ option java_package = "com.google.protobuf";
 option java_outer_classname = "DescriptorProtos";
 option java_outer_classname = "DescriptorProtos";
 option csharp_namespace = "Google.Protobuf.Reflection";
 option csharp_namespace = "Google.Protobuf.Reflection";
 option objc_class_prefix = "GPB";
 option objc_class_prefix = "GPB";
+option java_generate_equals_and_hash = true;
 
 
 // descriptor.proto must be optimized for speed because reflection-based
 // descriptor.proto must be optimized for speed because reflection-based
 // algorithms don't work during bootstrapping.
 // algorithms don't work during bootstrapping.
@@ -202,6 +203,7 @@ message FieldDescriptorProto {
 // Describes a oneof.
 // Describes a oneof.
 message OneofDescriptorProto {
 message OneofDescriptorProto {
   optional string name = 1;
   optional string name = 1;
+  optional OneofOptions options = 2;
 }
 }
 
 
 // Describes an enum type.
 // Describes an enum type.
@@ -377,15 +379,13 @@ message FileOptions {
   // Namespace for generated classes; defaults to the package.
   // Namespace for generated classes; defaults to the package.
   optional string csharp_namespace = 37;
   optional string csharp_namespace = 37;
 
 
-  // Whether the nano proto compiler should generate in the deprecated non-nano
-  // suffixed package.
-  optional bool javanano_use_deprecated_package = 38;
-
   // The parser stores options it doesn't recognize here. See above.
   // The parser stores options it doesn't recognize here. See above.
   repeated UninterpretedOption uninterpreted_option = 999;
   repeated UninterpretedOption uninterpreted_option = 999;
 
 
   // Clients can define custom options in extensions of this message. See above.
   // Clients can define custom options in extensions of this message. See above.
   extensions 1000 to max;
   extensions 1000 to max;
+
+  //reserved 38;
 }
 }
 
 
 message MessageOptions {
 message MessageOptions {
@@ -540,6 +540,14 @@ message FieldOptions {
   extensions 1000 to max;
   extensions 1000 to max;
 }
 }
 
 
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
 message EnumOptions {
 message EnumOptions {
 
 
   // Set this option to true to allow mapping different tag names to the same
   // Set this option to true to allow mapping different tag names to the same
@@ -777,3 +785,29 @@ message SourceCodeInfo {
     repeated string leading_detached_comments = 6;
     repeated string leading_detached_comments = 6;
   }
   }
 }
 }
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}

+ 3 - 5
vendor/github.com/docker/swarmkit/api/duration/duration.proto → vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto

@@ -30,13 +30,10 @@
 
 
 syntax = "proto3";
 syntax = "proto3";
 
 
-package docker.swarmkit.v1;
-
-// TODO(stevvooe): Commenting this out from the maddening behavior of google's
-// Go protobuf implementation.
-//option go_package = "github.com/golang/protobuf/ptypes/duration";
+package google.protobuf;
 
 
 option csharp_namespace = "Google.Protobuf.WellKnownTypes";
 option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "types";
 option java_package = "com.google.protobuf";
 option java_package = "com.google.protobuf";
 option java_outer_classname = "DurationProto";
 option java_outer_classname = "DurationProto";
 option java_multiple_files = true;
 option java_multiple_files = true;
@@ -84,6 +81,7 @@ option objc_class_prefix = "GPB";
 //       end.nanos -= 1000000000;
 //       end.nanos -= 1000000000;
 //     }
 //     }
 //
 //
+//
 message Duration {
 message Duration {
 
 
   // Signed seconds of the span of time. Must be from -315,576,000,000
   // Signed seconds of the span of time. Must be from -315,576,000,000

+ 13 - 8
vendor/google.golang.org/genproto/protobuf/source_context.proto → vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto

@@ -33,16 +33,21 @@ syntax = "proto3";
 package google.protobuf;
 package google.protobuf;
 
 
 option csharp_namespace = "Google.Protobuf.WellKnownTypes";
 option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "types";
 option java_package = "com.google.protobuf";
 option java_package = "com.google.protobuf";
-option java_outer_classname = "SourceContextProto";
+option java_outer_classname = "EmptyProto";
 option java_multiple_files = true;
 option java_multiple_files = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
 option objc_class_prefix = "GPB";
 option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
 
 
-// `SourceContext` represents information about the source of a
-// protobuf element, like the file in which it is defined.
-message SourceContext {
-  // The path-qualified name of the .proto file that contained the associated
-  // protobuf element.  For example: `"google/protobuf/source_context.proto"`.
-  string file_name = 1;
-}
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}

+ 1 - 1
vendor/google.golang.org/genproto/protobuf/field_mask.proto → vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto

@@ -32,7 +32,7 @@ syntax = "proto3";
 
 
 package google.protobuf;
 package google.protobuf;
 
 
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "types";
 option java_package = "com.google.protobuf";
 option java_package = "com.google.protobuf";
 option java_outer_classname = "FieldMaskProto";
 option java_outer_classname = "FieldMaskProto";
 option java_multiple_files = true;
 option java_multiple_files = true;

+ 96 - 0
vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto

@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "types";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option java_generate_equals_and_hash = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}

Some files were not shown because too many files changed in this diff