Sfoglia il codice sorgente

Merge pull request #26538 from LK4D4/update_grpc

Update grpc to v1.0.1-GA
Vincent Demeester 8 anni fa
parent
commit
758a809f54
100 ha cambiato i file con 4980 aggiunte e 2639 eliminazioni
  1. 1 1
      Dockerfile
  2. 1 1
      Dockerfile.aarch64
  3. 1 1
      Dockerfile.armhf
  4. 1 1
      Dockerfile.ppc64le
  5. 1 1
      Dockerfile.s390x
  6. 1 1
      Dockerfile.simple
  7. 5 5
      hack/vendor.sh
  8. 1 1
      libcontainerd/remote_linux.go
  9. 10 5
      vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go
  10. 0 34
      vendor/src/github.com/docker/engine-api/types/reference/image_reference.go
  11. 68 7
      vendor/src/github.com/docker/swarmkit/agent/agent.go
  12. 1 1
      vendor/src/github.com/docker/swarmkit/agent/config.go
  13. 7 2
      vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
  14. 33 11
      vendor/src/github.com/docker/swarmkit/agent/node.go
  15. 84 47
      vendor/src/github.com/docker/swarmkit/agent/session.go
  16. 72 22
      vendor/src/github.com/docker/swarmkit/agent/worker.go
  17. 114 60
      vendor/src/github.com/docker/swarmkit/api/ca.pb.go
  18. 400 333
      vendor/src/github.com/docker/swarmkit/api/control.pb.go
  19. 730 71
      vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go
  20. 48 2
      vendor/src/github.com/docker/swarmkit/api/dispatcher.proto
  21. 9 4
      vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go
  22. 53 25
      vendor/src/github.com/docker/swarmkit/api/health.pb.go
  23. 157 98
      vendor/src/github.com/docker/swarmkit/api/objects.pb.go
  24. 4 0
      vendor/src/github.com/docker/swarmkit/api/objects.proto
  25. 132 76
      vendor/src/github.com/docker/swarmkit/api/raft.pb.go
  26. 1 1
      vendor/src/github.com/docker/swarmkit/api/raft.proto
  27. 70 40
      vendor/src/github.com/docker/swarmkit/api/resource.pb.go
  28. 6 3
      vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go
  29. 49 41
      vendor/src/github.com/docker/swarmkit/api/specs.pb.go
  30. 9 4
      vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go
  31. 431 275
      vendor/src/github.com/docker/swarmkit/api/types.pb.go
  32. 43 9
      vendor/src/github.com/docker/swarmkit/api/types.proto
  33. 9 30
      vendor/src/github.com/docker/swarmkit/ca/certificates.go
  34. 51 22
      vendor/src/github.com/docker/swarmkit/ca/config.go
  35. 4 4
      vendor/src/github.com/docker/swarmkit/ca/server.go
  36. 19 27
      vendor/src/github.com/docker/swarmkit/ca/transport.go
  37. 8 0
      vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
  38. 12 1
      vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go
  39. 0 12
      vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/cluster.go
  40. 0 141
      vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/raftpicker.go
  41. 7 6
      vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go
  42. 244 38
      vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
  43. 7 33
      vendor/src/github.com/docker/swarmkit/manager/manager.go
  44. 4 3
      vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go
  45. 195 41
      vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go
  46. 0 12
      vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go
  47. 0 127
      vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go
  48. 20 0
      vendor/src/github.com/docker/swarmkit/manager/raftselector/raftselector.go
  49. 0 153
      vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go
  50. 48 13
      vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
  51. 115 0
      vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeset.go
  52. 130 59
      vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
  53. 3 0
      vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go
  54. 114 56
      vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
  55. 1 1
      vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go
  56. 9 6
      vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go
  57. 24 1
      vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go
  58. 9 4
      vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
  59. 3 3
      vendor/src/github.com/gogo/protobuf/LICENSE
  60. 3 3
      vendor/src/github.com/gogo/protobuf/gogoproto/Makefile
  61. 3 3
      vendor/src/github.com/gogo/protobuf/gogoproto/doc.go
  62. 75 71
      vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go
  63. 4 2
      vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto
  64. 3 1
      vendor/src/github.com/gogo/protobuf/gogoproto/helper.go
  65. 10 4
      vendor/src/github.com/gogo/protobuf/proto/clone.go
  66. 22 12
      vendor/src/github.com/gogo/protobuf/proto/decode.go
  67. 4 2
      vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go
  68. 49 11
      vendor/src/github.com/gogo/protobuf/proto/encode.go
  69. 3 3
      vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go
  70. 23 3
      vendor/src/github.com/gogo/protobuf/proto/equal.go
  71. 290 119
      vendor/src/github.com/gogo/protobuf/proto/extensions.go
  72. 73 15
      vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go
  73. 4 0
      vendor/src/github.com/gogo/protobuf/proto/lib.go
  74. 4 2
      vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go
  75. 37 6
      vendor/src/github.com/gogo/protobuf/proto/message_set.go
  76. 6 1
      vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go
  77. 5 1
      vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go
  78. 9 10
      vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
  79. 35 18
      vendor/src/github.com/gogo/protobuf/proto/properties.go
  80. 4 2
      vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go
  81. 4 2
      vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go
  82. 52 30
      vendor/src/github.com/gogo/protobuf/proto/text.go
  83. 4 2
      vendor/src/github.com/gogo/protobuf/proto/text_gogo.go
  84. 42 33
      vendor/src/github.com/gogo/protobuf/proto/text_parser.go
  85. 3 0
      vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
  86. 173 209
      vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
  87. 48 25
      vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
  88. 3 1
      vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
  89. 3 1
      vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go
  90. 1 1
      vendor/src/github.com/golang/protobuf/proto/Makefile
  91. 9 3
      vendor/src/github.com/golang/protobuf/proto/clone.go
  92. 14 7
      vendor/src/github.com/golang/protobuf/proto/decode.go
  93. 49 11
      vendor/src/github.com/golang/protobuf/proto/encode.go
  94. 29 5
      vendor/src/github.com/golang/protobuf/proto/equal.go
  95. 207 20
      vendor/src/github.com/golang/protobuf/proto/extensions.go
  96. 5 1
      vendor/src/github.com/golang/protobuf/proto/lib.go
  97. 37 6
      vendor/src/github.com/golang/protobuf/proto/message_set.go
  98. 6 1
      vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
  99. 5 1
      vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
  100. 41 15
      vendor/src/github.com/golang/protobuf/proto/properties.go

+ 1 - 1
Dockerfile

@@ -243,7 +243,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 1 - 1
Dockerfile.aarch64

@@ -186,7 +186,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 1 - 1
Dockerfile.armhf

@@ -184,7 +184,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 1 - 1
Dockerfile.ppc64le

@@ -204,7 +204,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 1 - 1
Dockerfile.s390x

@@ -196,7 +196,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 1 - 1
Dockerfile.simple

@@ -68,7 +68,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 
 # Install containerd
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 5 - 5
hack/vendor.sh

@@ -102,7 +102,7 @@ clone git github.com/pborman/uuid v1.0
 # get desired notary commit, might also need to be updated in Dockerfile
 clone git github.com/docker/notary v0.3.0
 
-clone git google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git
+clone git google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go.git
 clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
 clone git github.com/docker/go v1.5.1-1-1-gbaf439e
 clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
@@ -114,7 +114,7 @@ clone git github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb2
 clone git github.com/coreos/go-systemd v4
 clone git github.com/godbus/dbus v4.0.0
 clone git github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-clone git github.com/golang/protobuf 3c84672111d91bb5ac31719e112f9f7126a0e26e
+clone git github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
 
 # gelf logging driver deps
 clone git github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883
@@ -141,12 +141,12 @@ clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https
 clone git github.com/docker/docker-credential-helpers v0.3.0
 
 # containerd
-clone git github.com/docker/containerd 4c21ad662f71af56c0e6b29c0afef72df441d1ff
+clone git github.com/docker/containerd 2545227b0357eb55e369fa0072baef9ad91cdb69
 
 # cluster
-clone git github.com/docker/swarmkit 27fbaef4ceed648bb575969ccc9083a6e104a719
+clone git github.com/docker/swarmkit 191acc1bbdb13d8ea3b8059dda14a12f8c3903f2
 clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
-clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
+clone git github.com/gogo/protobuf v0.3
 clone git github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
 clone git github.com/google/certificate-transparency 0f6e3d1d1ba4d03fdaab7cd716f36255c2e48341
 clone git golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 https://github.com/golang/crypto.git

+ 1 - 1
libcontainerd/remote_linux.go

@@ -279,7 +279,7 @@ func (r *remote) startEventsMonitor() error {
 	er := &containerd.EventsRequest{
 		Timestamp: tsp,
 	}
-	events, err := r.apiClient.Events(context.Background(), er)
+	events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false))
 	if err != nil {
 		return err
 	}

+ 10 - 5
vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go

@@ -75,7 +75,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.ProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
 
 type GetServerVersionRequest struct {
 }
@@ -223,7 +225,7 @@ func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
 type User struct {
 	Uid            uint32   `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"`
 	Gid            uint32   `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"`
-	AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"`
+	AdditionalGids []uint32 `protobuf:"varint,3,rep,packed,name=additionalGids" json:"additionalGids,omitempty"`
 }
 
 func (m *User) Reset()                    { *m = User{} }
@@ -385,7 +387,7 @@ type Container struct {
 	Processes  []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"`
 	Status     string     `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
 	Labels     []string   `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
-	Pids       []uint32   `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"`
+	Pids       []uint32   `protobuf:"varint,6,rep,packed,name=pids" json:"pids,omitempty"`
 	Runtime    string     `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"`
 }
 
@@ -628,7 +630,7 @@ func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []in
 
 type CpuUsage struct {
 	TotalUsage        uint64   `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"`
-	PercpuUsage       []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
+	PercpuUsage       []uint64 `protobuf:"varint,2,rep,packed,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
 	UsageInKernelmode uint64   `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"`
 	UsageInUsermode   uint64   `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"`
 }
@@ -978,7 +980,7 @@ var _ grpc.ClientConn
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion2
+const _ = grpc.SupportPackageIsVersion3
 
 // Client API for API service
 
@@ -1432,8 +1434,11 @@ var _API_serviceDesc = grpc.ServiceDesc{
 			ServerStreams: true,
 		},
 	},
+	Metadata: fileDescriptor0,
 }
 
+func init() { proto.RegisterFile("api.proto", fileDescriptor0) }
+
 var fileDescriptor0 = []byte{
 	// 2604 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6f, 0x1c, 0x5b,

+ 0 - 34
vendor/src/github.com/docker/engine-api/types/reference/image_reference.go

@@ -1,34 +0,0 @@
-package reference
-
-import (
-	distreference "github.com/docker/distribution/reference"
-)
-
-// Parse parses the given references and returns the repository and
-// tag (if present) from it. If there is an error during parsing, it will
-// return an error.
-func Parse(ref string) (string, string, error) {
-	distributionRef, err := distreference.ParseNamed(ref)
-	if err != nil {
-		return "", "", err
-	}
-
-	tag := GetTagFromNamedRef(distributionRef)
-	return distributionRef.Name(), tag, nil
-}
-
-// GetTagFromNamedRef returns a tag from the specified reference.
-// This function is necessary as long as the docker "server" api makes the distinction between repository
-// and tags.
-func GetTagFromNamedRef(ref distreference.Named) string {
-	var tag string
-	switch x := ref.(type) {
-	case distreference.Digested:
-		tag = x.Digest().String()
-	case distreference.NamedTagged:
-		tag = x.Tag()
-	default:
-		tag = "latest"
-	}
-	return tag
-}

+ 68 - 7
vendor/src/github.com/docker/swarmkit/agent/agent.go

@@ -15,6 +15,7 @@ import (
 const (
 	initialSessionFailureBackoff = 100 * time.Millisecond
 	maxSessionFailureBackoff     = 8 * time.Second
+	nodeUpdatePeriod             = 20 * time.Second
 )
 
 // Agent implements the primary node functionality for a member of a swarm
@@ -134,9 +135,18 @@ func (a *Agent) run(ctx context.Context) {
 	log.G(ctx).Debugf("(*Agent).run")
 	defer log.G(ctx).Debugf("(*Agent).run exited")
 
+	// get the node description
+	nodeDescription, err := a.nodeDescriptionWithHostname(ctx)
+	if err != nil {
+		log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Errorf("agent: node description unavailable")
+	}
+	// nodeUpdateTicker is used to periodically check for updates to node description
+	nodeUpdateTicker := time.NewTicker(nodeUpdatePeriod)
+	defer nodeUpdateTicker.Stop()
+
 	var (
 		backoff    time.Duration
-		session    = newSession(ctx, a, backoff, "") // start the initial session
+		session    = newSession(ctx, a, backoff, "", nodeDescription) // start the initial session
 		registered = session.registered
 		ready      = a.ready // first session ready
 		sessionq   chan sessionOperation
@@ -158,9 +168,16 @@ func (a *Agent) run(ctx context.Context) {
 		select {
 		case operation := <-sessionq:
 			operation.response <- operation.fn(session)
-		case msg := <-session.tasks:
-			if err := a.worker.Assign(ctx, msg.Tasks); err != nil {
-				log.G(ctx).WithError(err).Error("task assignment failed")
+		case msg := <-session.assignments:
+			switch msg.Type {
+			case api.AssignmentsMessage_COMPLETE:
+				if err := a.worker.AssignTasks(ctx, msg.UpdateTasks); err != nil {
+					log.G(ctx).WithError(err).Error("failed to synchronize worker assignments")
+				}
+			case api.AssignmentsMessage_INCREMENTAL:
+				if err := a.worker.UpdateTasks(ctx, msg.UpdateTasks, msg.RemoveTasks); err != nil {
+					log.G(ctx).WithError(err).Error("failed to update worker assignments")
+				}
 			}
 		case msg := <-session.messages:
 			if err := a.handleSessionMessage(ctx, msg); err != nil {
@@ -197,10 +214,42 @@ func (a *Agent) run(ctx context.Context) {
 			log.G(ctx).Debugf("agent: rebuild session")
 
 			// select a session registration delay from backoff range.
-			delay := time.Duration(rand.Int63n(int64(backoff)))
-			session = newSession(ctx, a, delay, session.sessionID)
+			delay := time.Duration(0)
+			if backoff > 0 {
+				delay = time.Duration(rand.Int63n(int64(backoff)))
+			}
+			session = newSession(ctx, a, delay, session.sessionID, nodeDescription)
 			registered = session.registered
 			sessionq = a.sessionq
+		case <-nodeUpdateTicker.C:
+			// skip this case if the registration isn't finished
+			if registered != nil {
+				continue
+			}
+			// get the current node description
+			newNodeDescription, err := a.nodeDescriptionWithHostname(ctx)
+			if err != nil {
+				log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Errorf("agent: updated node description unavailable")
+			}
+
+			// if newNodeDescription is nil, it will cause a panic when
+			// trying to create a session. Typically this can happen
+			// if the engine goes down
+			if newNodeDescription == nil {
+				continue
+			}
+
+			// if the node description has changed, update it to the new one
+			// and close the session. The old session will be stopped and a
+			// new one will be created with the updated description
+			if !reflect.DeepEqual(nodeDescription, newNodeDescription) {
+				nodeDescription = newNodeDescription
+				// close the session
+				log.G(ctx).Info("agent: found node update")
+				if err := session.close(); err != nil {
+					log.G(ctx).WithError(err).Error("agent: closing session for node update failed")
+				}
+			}
 		case <-a.stopped:
 			// TODO(stevvooe): Wait on shutdown and cleanup. May need to pump
 			// this loop a few times.
@@ -315,7 +364,8 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
 				if err == errTaskUnknown {
 					err = nil // dispatcher no longer cares about this task.
 				} else {
-					log.G(ctx).WithError(err).Error("sending task status update failed")
+					log.G(ctx).WithError(err).Error("closing session after fatal error")
+					session.close()
 				}
 			} else {
 				log.G(ctx).Debug("task status reported")
@@ -337,6 +387,17 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
 	}
 }
 
+// nodeDescriptionWithHostname retrieves node description, and overrides hostname if available
+func (a *Agent) nodeDescriptionWithHostname(ctx context.Context) (*api.NodeDescription, error) {
+	desc, err := a.config.Executor.Describe(ctx)
+
+	// Override hostname
+	if a.config.Hostname != "" && desc != nil {
+		desc.Hostname = a.config.Hostname
+	}
+	return desc, err
+}
+
 // nodesEqual returns true if the node states are functionaly equal, ignoring status,
 // version and other superfluous fields.
 //

+ 1 - 1
vendor/src/github.com/docker/swarmkit/agent/config.go

@@ -29,7 +29,7 @@ type Config struct {
 	NotifyRoleChange chan<- api.NodeRole
 
 	// Credentials is credentials for grpc connection to manager.
-	Credentials credentials.TransportAuthenticator
+	Credentials credentials.TransportCredentials
 }
 
 func (c *Config) validate() error {

+ 7 - 2
vendor/src/github.com/docker/swarmkit/agent/exec/controller.go

@@ -147,7 +147,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
 		if cs, ok := err.(ContainerStatuser); ok {
 			var err error
 			containerStatus, err = cs.ContainerStatus(ctx)
-			if err != nil {
+			if err != nil && !contextDoneError(err) {
 				log.G(ctx).WithError(err).Error("error resolving container status on fatal")
 			}
 		}
@@ -207,7 +207,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
 
 			var err error
 			containerStatus, err = cctlr.ContainerStatus(ctx)
-			if err != nil {
+			if err != nil && !contextDoneError(err) {
 				log.G(ctx).WithError(err).Error("container status unavailable")
 			}
 
@@ -297,3 +297,8 @@ func logStateChange(ctx context.Context, desired, previous, next api.TaskState)
 		log.G(ctx).WithFields(fields).Debug("state changed")
 	}
 }
+
+func contextDoneError(err error) bool {
+	cause := errors.Cause(err)
+	return cause == context.Canceled || cause == context.DeadlineExceeded
+}

+ 33 - 11
vendor/src/github.com/docker/swarmkit/agent/node.go

@@ -120,7 +120,7 @@ func NewNode(c *NodeConfig) (*Node, error) {
 
 	n := &Node{
 		remotes:              newPersistentRemotes(stateFile, p...),
-		role:                 ca.AgentRole,
+		role:                 ca.WorkerRole,
 		config:               c,
 		started:              make(chan struct{}),
 		stopped:              make(chan struct{}),
@@ -194,7 +194,9 @@ func (n *Node) run(ctx context.Context) (err error) {
 		select {
 		case <-ctx.Done():
 		case resp := <-issueResponseChan:
-			logrus.Debugf("Requesting certificate for NodeID: %v", resp.NodeID)
+			log.G(log.WithModule(ctx, "tls")).WithFields(logrus.Fields{
+				"node.id": resp.NodeID,
+			}).Debugf("requesting certificate")
 			n.Lock()
 			n.nodeID = resp.NodeID
 			n.nodeMembership = resp.NodeMembership
@@ -233,7 +235,7 @@ func (n *Node) run(ctx context.Context) (err error) {
 			case apirole := <-n.roleChangeReq:
 				n.Lock()
 				lastRole := n.role
-				role := ca.AgentRole
+				role := ca.WorkerRole
 				if apirole == api.NodeRoleManager {
 					role = ca.ManagerRole
 				}
@@ -242,7 +244,7 @@ func (n *Node) run(ctx context.Context) (err error) {
 					continue
 				}
 				// switch role to agent immediately to shutdown manager early
-				if role == ca.AgentRole {
+				if role == ca.WorkerRole {
 					n.role = role
 					n.roleCond.Broadcast()
 				}
@@ -343,7 +345,7 @@ func (n *Node) Err(ctx context.Context) error {
 	}
 }
 
-func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportAuthenticator, ready chan<- struct{}) error {
+func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportCredentials, ready chan<- struct{}) error {
 	select {
 	case <-ctx.Done():
 	case <-n.remotes.WaitSelect(ctx):
@@ -588,7 +590,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
 			return err
 		}
 
-		remoteAddr, _ := n.remotes.Select(n.nodeID)
+		remoteAddr, _ := n.remotes.Select(n.NodeID())
 		m, err := manager.New(&manager.Config{
 			ForceNewCluster: n.config.ForceNewCluster,
 			ProtoAddr: map[string]string{
@@ -607,8 +609,9 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
 			return err
 		}
 		done := make(chan struct{})
+		var runErr error
 		go func() {
-			m.Run(context.Background()) // todo: store error
+			runErr = m.Run(context.Background())
 			close(done)
 		}()
 
@@ -624,14 +627,31 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
 			go func(ready chan struct{}) {
 				select {
 				case <-ready:
-					n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, remotes.DefaultObservationWeight)
+					n.remotes.Observe(api.Peer{NodeID: n.NodeID(), Addr: n.config.ListenRemoteAPI}, remotes.DefaultObservationWeight)
 				case <-connCtx.Done():
 				}
 			}(ready)
 			ready = nil
 		}
 
-		err = n.waitRole(ctx, ca.AgentRole)
+		roleChanged := make(chan error)
+		waitCtx, waitCancel := context.WithCancel(ctx)
+		go func() {
+			err := n.waitRole(waitCtx, ca.WorkerRole)
+			roleChanged <- err
+		}()
+
+		select {
+		case <-done:
+			// Fail out if m.Run() returns error, otherwise wait for
+			// role change.
+			if runErr != nil {
+				err = runErr
+			} else {
+				err = <-roleChanged
+			}
+		case err = <-roleChanged:
+		}
 
 		n.Lock()
 		n.manager = nil
@@ -646,6 +666,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
 		}
 		connCancel()
 		n.setControlSocket(nil)
+		waitCancel()
 
 		if err != nil {
 			return err
@@ -672,17 +693,18 @@ func newPersistentRemotes(f string, peers ...api.Peer) *persistentRemotes {
 
 func (s *persistentRemotes) Observe(peer api.Peer, weight int) {
 	s.Lock()
+	defer s.Unlock()
 	s.Remotes.Observe(peer, weight)
 	s.c.Broadcast()
 	if err := s.save(); err != nil {
 		logrus.Errorf("error writing cluster state file: %v", err)
-		s.Unlock()
 		return
 	}
-	s.Unlock()
 	return
 }
 func (s *persistentRemotes) Remove(peers ...api.Peer) {
+	s.Lock()
+	defer s.Unlock()
 	s.Remotes.Remove(peers...)
 	if err := s.save(); err != nil {
 		logrus.Errorf("error writing cluster state file: %v", err)

+ 84 - 47
vendor/src/github.com/docker/swarmkit/agent/session.go

@@ -2,8 +2,10 @@ package agent
 
 import (
 	"errors"
+	"sync"
 	"time"
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/protobuf/ptypes"
@@ -31,26 +33,27 @@ type session struct {
 	conn *grpc.ClientConn
 	addr string
 
-	agent     *Agent
-	sessionID string
-	session   api.Dispatcher_SessionClient
-	errs      chan error
-	messages  chan *api.SessionMessage
-	tasks     chan *api.TasksMessage
+	agent       *Agent
+	sessionID   string
+	session     api.Dispatcher_SessionClient
+	errs        chan error
+	messages    chan *api.SessionMessage
+	assignments chan *api.AssignmentsMessage
 
 	registered chan struct{} // closed registration
 	closed     chan struct{}
+	closeOnce  sync.Once
 }
 
-func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string) *session {
+func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string, description *api.NodeDescription) *session {
 	s := &session{
-		agent:      agent,
-		sessionID:  sessionID,
-		errs:       make(chan error, 1),
-		messages:   make(chan *api.SessionMessage),
-		tasks:      make(chan *api.TasksMessage),
-		registered: make(chan struct{}),
-		closed:     make(chan struct{}),
+		agent:       agent,
+		sessionID:   sessionID,
+		errs:        make(chan error, 1),
+		messages:    make(chan *api.SessionMessage),
+		assignments: make(chan *api.AssignmentsMessage),
+		registered:  make(chan struct{}),
+		closed:      make(chan struct{}),
 	}
 	peer, err := agent.config.Managers.Select()
 	if err != nil {
@@ -68,14 +71,14 @@ func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionI
 	s.addr = peer.Addr
 	s.conn = cc
 
-	go s.run(ctx, delay)
+	go s.run(ctx, delay, description)
 	return s
 }
 
-func (s *session) run(ctx context.Context, delay time.Duration) {
+func (s *session) run(ctx context.Context, delay time.Duration, description *api.NodeDescription) {
 	time.Sleep(delay) // delay before registering.
 
-	if err := s.start(ctx); err != nil {
+	if err := s.start(ctx, description); err != nil {
 		select {
 		case s.errs <- err:
 		case <-s.closed:
@@ -94,24 +97,14 @@ func (s *session) run(ctx context.Context, delay time.Duration) {
 }
 
 // start begins the session and returns the first SessionMessage.
-func (s *session) start(ctx context.Context) error {
+func (s *session) start(ctx context.Context, description *api.NodeDescription) error {
 	log.G(ctx).Debugf("(*session).start")
 
-	description, err := s.agent.config.Executor.Describe(ctx)
-	if err != nil {
-		log.G(ctx).WithError(err).WithField("executor", s.agent.config.Executor).
-			Errorf("node description unavailable")
-		return err
-	}
-	// Override hostname
-	if s.agent.config.Hostname != "" {
-		description.Hostname = s.agent.config.Hostname
-	}
-
 	errChan := make(chan error, 1)
 	var (
 		msg    *api.SessionMessage
 		stream api.Dispatcher_SessionClient
+		err    error
 	)
 	// Note: we don't defer cancellation of this context, because the
 	// streaming RPC is used after this function returned. We only cancel
@@ -215,22 +208,68 @@ func (s *session) handleSessionMessage(ctx context.Context, msg *api.SessionMess
 }
 
 func (s *session) watch(ctx context.Context) error {
-	log.G(ctx).Debugf("(*session).watch")
-	client := api.NewDispatcherClient(s.conn)
-	watch, err := client.Tasks(ctx, &api.TasksRequest{
-		SessionID: s.sessionID})
-	if err != nil {
-		return err
-	}
+	log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).watch"})
+	log.Debugf("")
+	var (
+		resp            *api.AssignmentsMessage
+		assignmentWatch api.Dispatcher_AssignmentsClient
+		tasksWatch      api.Dispatcher_TasksClient
+		streamReference string
+		tasksFallback   bool
+		err             error
+	)
 
+	client := api.NewDispatcherClient(s.conn)
 	for {
-		resp, err := watch.Recv()
-		if err != nil {
-			return err
+		// If this is the first time we're running the loop, or there was a reference mismatch
+		// attempt to get the assignmentWatch
+		if assignmentWatch == nil && !tasksFallback {
+			assignmentWatch, err = client.Assignments(ctx, &api.AssignmentsRequest{SessionID: s.sessionID})
+			if err != nil {
+				return err
+			}
+		}
+		// We have an assignmentWatch, let's try to receive an AssignmentMessage
+		if assignmentWatch != nil {
+			// If we get a code = 12 desc = unknown method Assignments, try to use tasks
+			resp, err = assignmentWatch.Recv()
+			if err != nil {
+				if grpc.Code(err) != codes.Unimplemented {
+					return err
+				}
+				tasksFallback = true
+				assignmentWatch = nil
+				log.WithError(err).Infof("falling back to Tasks")
+			}
+		}
+
+		// This code is here for backwards compatibility (so that newer clients can use the
+		// older method Tasks)
+		if tasksWatch == nil && tasksFallback {
+			tasksWatch, err = client.Tasks(ctx, &api.TasksRequest{SessionID: s.sessionID})
+			if err != nil {
+				return err
+			}
+		}
+		if tasksWatch != nil {
+			var taskResp *api.TasksMessage
+			taskResp, err = tasksWatch.Recv()
+			if err != nil {
+				return err
+			}
+			resp = &api.AssignmentsMessage{Type: api.AssignmentsMessage_COMPLETE, UpdateTasks: taskResp.Tasks}
+		}
+
+		// If there seems to be a gap in the stream, let's break out of the inner for and
+		// re-sync (by calling Assignments again).
+		if streamReference != "" && streamReference != resp.AppliesTo {
+			assignmentWatch = nil
+		} else {
+			streamReference = resp.ResultsIn
 		}
 
 		select {
-		case s.tasks <- resp:
+		case s.assignments <- resp:
 		case <-s.closed:
 			return errSessionClosed
 		case <-ctx.Done():
@@ -241,7 +280,6 @@ func (s *session) watch(ctx context.Context) error {
 
 // sendTaskStatus uses the current session to send the status of a single task.
 func (s *session) sendTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
-
 	client := api.NewDispatcherClient(s.conn)
 	if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{
 		SessionID: s.sessionID,
@@ -302,15 +340,14 @@ func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTa
 }
 
 func (s *session) close() error {
-	select {
-	case <-s.closed:
-		return errSessionClosed
-	default:
+	s.closeOnce.Do(func() {
 		if s.conn != nil {
 			s.agent.config.Managers.ObserveIfExists(api.Peer{Addr: s.addr}, -remotes.DefaultObservationWeight)
 			s.conn.Close()
 		}
+
 		close(s.closed)
-		return nil
-	}
+	})
+
+	return nil
 }

+ 72 - 22
vendor/src/github.com/docker/swarmkit/agent/worker.go

@@ -17,9 +17,13 @@ type Worker interface {
 	// Init prepares the worker for task assignment.
 	Init(ctx context.Context) error
 
-	// Assign the set of tasks to the worker. Tasks outside of this set will be
-	// removed.
-	Assign(ctx context.Context, tasks []*api.Task) error
+	// AssignTasks assigns a complete set of tasks to a worker. Any task not included in
+	// this set will be removed.
+	AssignTasks(ctx context.Context, tasks []*api.Task) error
+
+	// UpdateTasks updates an incremental set of tasks to the worker. Any task not included
+	// either in added or removed will remain untouched.
+	UpdateTasks(ctx context.Context, added []*api.Task, removed []string) error
 
 	// Listen to updates about tasks controlled by the worker. When first
 	// called, the reporter will receive all updates for all tasks controlled
@@ -86,14 +90,37 @@ func (w *worker) Init(ctx context.Context) error {
 	})
 }
 
-// Assign the set of tasks to the worker. Any tasks not previously known will
+// AssignTasks assigns  the set of tasks to the worker. Any tasks not previously known will
 // be started. Any tasks that are in the task set and already running will be
 // updated, if possible. Any tasks currently running on the
 // worker outside the task set will be terminated.
-func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
+func (w *worker) AssignTasks(ctx context.Context, tasks []*api.Task) error {
 	w.mu.Lock()
 	defer w.mu.Unlock()
 
+	log.G(ctx).WithFields(logrus.Fields{
+		"len(tasks)": len(tasks),
+	}).Debug("(*worker).AssignTasks")
+
+	return reconcileTaskState(ctx, w, tasks, nil, true)
+}
+
+// UpdateTasks the set of tasks to the worker.
+// Tasks in the added set will be added to the worker, and tasks in the removed set
+// will be removed from the worker
+func (w *worker) UpdateTasks(ctx context.Context, added []*api.Task, removed []string) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	log.G(ctx).WithFields(logrus.Fields{
+		"len(added)":   len(added),
+		"len(removed)": len(removed),
+	}).Debug("(*worker).UpdateTasks")
+
+	return reconcileTaskState(ctx, w, added, removed, false)
+}
+
+func reconcileTaskState(ctx context.Context, w *worker, added []*api.Task, removed []string, fullSnapshot bool) error {
 	tx, err := w.db.Begin(true)
 	if err != nil {
 		log.G(ctx).WithError(err).Error("failed starting transaction against task database")
@@ -101,10 +128,9 @@ func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
 	}
 	defer tx.Rollback()
 
-	log.G(ctx).WithField("len(tasks)", len(tasks)).Debug("(*worker).Assign")
 	assigned := map[string]struct{}{}
 
-	for _, task := range tasks {
+	for _, task := range added {
 		log.G(ctx).WithFields(
 			logrus.Fields{
 				"task.id":           task.ID,
@@ -135,35 +161,59 @@ func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
 					return err
 				}
 			} else {
-				task.Status = *status // overwrite the stale manager status with ours.
+				task.Status = *status
 			}
-
 			w.startTask(ctx, tx, task)
 		}
 
 		assigned[task.ID] = struct{}{}
 	}
 
-	for id, tm := range w.taskManagers {
-		if _, ok := assigned[id]; ok {
-			continue
+	closeManager := func(tm *taskManager) {
+		// when a task is no longer assigned, we shutdown the task manager for
+		// it and leave cleanup to the sweeper.
+		if err := tm.Close(); err != nil {
+			log.G(ctx).WithError(err).Error("error closing task manager")
 		}
+	}
 
-		ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", id))
-		if err := SetTaskAssignment(tx, id, false); err != nil {
+	removeTaskAssignment := func(taskID string) error {
+		ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", taskID))
+		if err := SetTaskAssignment(tx, taskID, false); err != nil {
 			log.G(ctx).WithError(err).Error("error setting task assignment in database")
-			continue
 		}
+		return err
+	}
+
+	// If this was a complete set of assignments, we're going to remove all the remaining
+	// tasks.
+	if fullSnapshot {
+		for id, tm := range w.taskManagers {
+			if _, ok := assigned[id]; ok {
+				continue
+			}
 
-		delete(w.taskManagers, id)
+			err := removeTaskAssignment(id)
+			if err == nil {
+				delete(w.taskManagers, id)
+				go closeManager(tm)
+			}
+		}
+	} else {
+		// If this was an incremental set of assignments, we're going to remove only the tasks
+		// in the removed set
+		for _, taskID := range removed {
+			err := removeTaskAssignment(taskID)
+			if err != nil {
+				continue
+			}
 
-		go func(tm *taskManager) {
-			// when a task is no longer assigned, we shutdown the task manager for
-			// it and leave cleanup to the sweeper.
-			if err := tm.Close(); err != nil {
-				log.G(ctx).WithError(err).Error("error closing task manager")
+			tm, ok := w.taskManagers[taskID]
+			if ok {
+				delete(w.taskManagers, taskID)
+				go closeManager(tm)
 			}
-		}(tm)
+		}
 	}
 
 	return tx.Commit()

+ 114 - 60
vendor/src/github.com/docker/swarmkit/api/ca.pb.go

@@ -21,10 +21,11 @@ import (
 	grpc "google.golang.org/grpc"
 )
 
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
+import time "time"
 
 import io "io"
 
@@ -285,11 +286,12 @@ func valueToGoStringCa(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringCa(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringCa(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -299,7 +301,7 @@ func extensionToGoStringCa(e map[int32]github_com_gogo_protobuf_proto.Extension)
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 
@@ -309,7 +311,7 @@ var _ grpc.ClientConn
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion2
+const _ = grpc.SupportPackageIsVersion3
 
 // Client API for CA service
 
@@ -371,7 +373,8 @@ var _CA_serviceDesc = grpc.ServiceDesc{
 			Handler:    _CA_GetRootCACertificate_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorCa,
 }
 
 // Client API for NodeCA service
@@ -467,7 +470,8 @@ var _NodeCA_serviceDesc = grpc.ServiceDesc{
 			Handler:    _NodeCA_NodeCertificateStatus_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorCa,
 }
 
 func (m *NodeCertificateStatusRequest) Marshal() (data []byte, err error) {
@@ -668,12 +672,11 @@ func encodeVarintCa(data []byte, offset int, v uint64) int {
 
 type raftProxyCAServer struct {
 	local        CAServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyCAServer(local CAServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) CAServer {
+func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) CAServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -695,7 +698,6 @@ func NewRaftProxyCAServer(local CAServer, connSelector raftpicker.Interface, clu
 
 	return &raftProxyCAServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -710,44 +712,68 @@ func (p *raftProxyCAServer) runCtxMods(ctx context.Context) (context.Context, er
 	}
 	return ctx, nil
 }
+func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.GetRootCACertificate(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.GetRootCACertificate(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.GetRootCACertificate(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewCAClient(conn).GetRootCACertificate(ctx, r)
+		return NewCAClient(conn).GetRootCACertificate(modCtx, r)
+	}
+	return resp, err
 }
 
 type raftProxyNodeCAServer struct {
 	local        NodeCAServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) NodeCAServer {
+func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) NodeCAServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -769,7 +795,6 @@ func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftpicker.Interf
 
 	return &raftProxyNodeCAServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -784,63 +809,90 @@ func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context) (context.Context
 	}
 	return ctx, nil
 }
+func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.IssueNodeCertificate(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.IssueNodeCertificate(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.IssueNodeCertificate(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewNodeCAClient(conn).IssueNodeCertificate(ctx, r)
+		return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r)
+	}
+	return resp, err
 }
 
 func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) {
 
-	if p.cluster.IsLeader() {
-		return p.local.NodeCertificateStatus(ctx, r)
-	}
-	ctx, err := p.runCtxMods(ctx)
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.NodeCertificateStatus(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.NodeCertificateStatus(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewNodeCAClient(conn).NodeCertificateStatus(ctx, r)
+		return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r)
+	}
+	return resp, err
 }
 
 func (m *NodeCertificateStatusRequest) Size() (n int) {
@@ -1655,6 +1707,8 @@ var (
 	ErrIntOverflowCa   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("ca.proto", fileDescriptorCa) }
+
 var fileDescriptorCa = []byte{
 	// 493 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x40,

File diff suppressed because it is too large
+ 400 - 333
vendor/src/github.com/docker/swarmkit/api/control.pb.go


File diff suppressed because it is too large
+ 730 - 71
vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go


+ 48 - 2
vendor/src/github.com/docker/swarmkit/api/dispatcher.proto

@@ -47,13 +47,22 @@ service Dispatcher { // maybe dispatch, al likes this
 	// it should be terminated.
 	rpc Tasks(TasksRequest) returns (stream TasksMessage) {
 		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+		option deprecated = true;
+	};
+
+	// Assignments is a stream of assignments such as tasks and secrets for node.
+	// The first message in the stream contains all of the tasks and secrets
+	// that are relevant to the node. Future messages in the stream are updates to
+	// the set of assignments.
+	rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
 	};
 }
 
 // SessionRequest starts a session.
 message SessionRequest {
 	NodeDescription description = 1;
-	// SessionID can be provided to attempt resuming an exising session. If the 
+	// SessionID can be provided to attempt resuming an exising session. If the
 	// SessionID is empty or invalid, a new SessionID will be assigned.
 	//
 	// See SessionMessage.SessionID for details.
@@ -115,7 +124,7 @@ message SessionMessage {
 	repeated WeightedPeer managers = 3;
 
 	// Symmetric encryption key distributed by the lead manager. Used by agents
-	// for securing network bootstrapping and communication. 
+	// for securing network bootstrapping and communication.
 	repeated EncryptionKey network_bootstrap_keys = 4;
 }
 
@@ -157,3 +166,40 @@ message TasksMessage {
 	repeated Task tasks = 1;
 }
 
+message AssignmentsRequest {
+	string session_id = 1 [(gogoproto.customname) = "SessionID"];
+}
+
+message AssignmentsMessage {
+	// AssignmentType specifies whether this assignment message carries
+	// the full state, or is an update to an existing state.
+	enum Type {
+		COMPLETE = 0;
+		INCREMENTAL = 1;
+	}
+
+	Type type = 1;
+
+	// AppliesTo references the previous ResultsIn value, to chain
+	// incremental updates together. For the first update in a stream,
+	// AppliesTo is empty.  If AppliesTo does not match the previously
+	// received ResultsIn, the consumer of the stream should start a new
+	// Assignments stream to re-sync.
+	string applies_to = 2;
+
+	// ResultsIn identifies the result of this assignments message, to
+	// match against the next message's AppliesTo value and protect
+	// against missed messages.
+	string results_in = 3;
+
+	// UpdateTasks is a set of new or updated tasks to run on this node.
+	// In the first assignments message, it contains all of the tasks
+	// to run on this node. Tasks outside of this set running on the node
+	// should be terminated.
+	repeated Task update_tasks = 4;
+
+	// RemoveTasks is a set of previously-assigned task IDs to remove from the
+	// assignment set. It is not used in the first assignments message of
+	// a stream.
+	repeated string remove_tasks = 5;
+}

+ 9 - 4
vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go

@@ -32,7 +32,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 // A Duration represents a signed, fixed-length span of time represented
 // as a count of seconds and fractions of seconds at nanosecond
@@ -128,11 +130,12 @@ func valueToGoStringDuration(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringDuration(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringDuration(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -142,7 +145,7 @@ func extensionToGoStringDuration(e map[int32]github_com_gogo_protobuf_proto.Exte
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *Duration) Marshal() (data []byte, err error) {
@@ -438,6 +441,8 @@ var (
 	ErrIntOverflowDuration   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) }
+
 var fileDescriptorDuration = []byte{
 	// 201 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a,

+ 53 - 25
vendor/src/github.com/docker/swarmkit/api/health.pb.go

@@ -21,10 +21,11 @@ import (
 	grpc "google.golang.org/grpc"
 )
 
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
+import time "time"
 
 import io "io"
 
@@ -153,11 +154,12 @@ func valueToGoStringHealth(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringHealth(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringHealth(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -167,7 +169,7 @@ func extensionToGoStringHealth(e map[int32]github_com_gogo_protobuf_proto.Extens
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 
@@ -177,7 +179,7 @@ var _ grpc.ClientConn
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion2
+const _ = grpc.SupportPackageIsVersion3
 
 // Client API for Health service
 
@@ -239,7 +241,8 @@ var _Health_serviceDesc = grpc.ServiceDesc{
 			Handler:    _Health_Check_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorHealth,
 }
 
 func (m *HealthCheckRequest) Marshal() (data []byte, err error) {
@@ -319,12 +322,11 @@ func encodeVarintHealth(data []byte, offset int, v uint64) int {
 
 type raftProxyHealthServer struct {
 	local        HealthServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyHealthServer(local HealthServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) HealthServer {
+func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) HealthServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -346,7 +348,6 @@ func NewRaftProxyHealthServer(local HealthServer, connSelector raftpicker.Interf
 
 	return &raftProxyHealthServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -361,34 +362,59 @@ func (p *raftProxyHealthServer) runCtxMods(ctx context.Context) (context.Context
 	}
 	return ctx, nil
 }
+func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.Check(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.Check(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewHealthClient(conn).Check(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.Check(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewHealthClient(conn).Check(ctx, r)
+		return NewHealthClient(conn).Check(modCtx, r)
+	}
+	return resp, err
 }
 
 func (m *HealthCheckRequest) Size() (n int) {
@@ -704,6 +730,8 @@ var (
 	ErrIntOverflowHealth   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("health.proto", fileDescriptorHealth) }
+
 var fileDescriptorHealth = []byte{
 	// 291 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,

+ 157 - 98
vendor/src/github.com/docker/swarmkit/api/objects.pb.go

@@ -66,6 +66,9 @@ type Service struct {
 	ID   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
 	Meta Meta        `protobuf:"bytes,2,opt,name=meta" json:"meta"`
 	Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
+	// PreviousSpec is the previous service spec that was in place before
+	// "Spec".
+	PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"`
 	// Runtime state of service endpoint. This may be different
 	// from the spec version because the user may not have entered
 	// the optional fields like node_port or virtual_ip and it
@@ -284,6 +287,7 @@ func (m *Service) Copy() *Service {
 		ID:           m.ID,
 		Meta:         *m.Meta.Copy(),
 		Spec:         *m.Spec.Copy(),
+		PreviousSpec: m.PreviousSpec.Copy(),
 		Endpoint:     m.Endpoint.Copy(),
 		UpdateStatus: m.UpdateStatus.Copy(),
 	}
@@ -468,11 +472,14 @@ func (this *Service) GoString() string {
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 9)
+	s := make([]string, 0, 10)
 	s = append(s, "&api.Service{")
 	s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
 	s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
 	s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
+	if this.PreviousSpec != nil {
+		s = append(s, "PreviousSpec: "+fmt.Sprintf("%#v", this.PreviousSpec)+",\n")
+	}
 	if this.Endpoint != nil {
 		s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
 	}
@@ -596,11 +603,12 @@ func valueToGoStringObjects(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringObjects(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringObjects(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -610,7 +618,7 @@ func extensionToGoStringObjects(e map[int32]github_com_gogo_protobuf_proto.Exten
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *Meta) Marshal() (data []byte, err error) {
@@ -802,6 +810,16 @@ func (m *Service) MarshalTo(data []byte) (int, error) {
 		}
 		i += n14
 	}
+	if m.PreviousSpec != nil {
+		data[i] = 0x32
+		i++
+		i = encodeVarintObjects(data, i, uint64(m.PreviousSpec.Size()))
+		n15, err := m.PreviousSpec.MarshalTo(data[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n15
+	}
 	return i, nil
 }
 
@@ -824,11 +842,11 @@ func (m *Endpoint) MarshalTo(data []byte) (int, error) {
 		data[i] = 0xa
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
-		n15, err := m.Spec.MarshalTo(data[i:])
+		n16, err := m.Spec.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n15
+		i += n16
 	}
 	if len(m.Ports) > 0 {
 		for _, msg := range m.Ports {
@@ -911,19 +929,19 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
 	data[i] = 0x12
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
-	n16, err := m.Meta.MarshalTo(data[i:])
+	n17, err := m.Meta.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n16
+	i += n17
 	data[i] = 0x1a
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
-	n17, err := m.Spec.MarshalTo(data[i:])
+	n18, err := m.Spec.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n17
+	i += n18
 	if len(m.ServiceID) > 0 {
 		data[i] = 0x22
 		i++
@@ -944,27 +962,27 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
 	data[i] = 0x3a
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Annotations.Size()))
-	n18, err := m.Annotations.MarshalTo(data[i:])
+	n19, err := m.Annotations.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n18
+	i += n19
 	data[i] = 0x42
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.ServiceAnnotations.Size()))
-	n19, err := m.ServiceAnnotations.MarshalTo(data[i:])
+	n20, err := m.ServiceAnnotations.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n19
+	i += n20
 	data[i] = 0x4a
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Status.Size()))
-	n20, err := m.Status.MarshalTo(data[i:])
+	n21, err := m.Status.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n20
+	i += n21
 	if m.DesiredState != 0 {
 		data[i] = 0x50
 		i++
@@ -986,21 +1004,21 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
 		data[i] = 0x62
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size()))
-		n21, err := m.Endpoint.MarshalTo(data[i:])
+		n22, err := m.Endpoint.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n21
+		i += n22
 	}
 	if m.LogDriver != nil {
 		data[i] = 0x6a
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.LogDriver.Size()))
-		n22, err := m.LogDriver.MarshalTo(data[i:])
+		n23, err := m.LogDriver.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n22
+		i += n23
 	}
 	return i, nil
 }
@@ -1024,11 +1042,11 @@ func (m *NetworkAttachment) MarshalTo(data []byte) (int, error) {
 		data[i] = 0xa
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.Network.Size()))
-		n23, err := m.Network.MarshalTo(data[i:])
+		n24, err := m.Network.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n23
+		i += n24
 	}
 	if len(m.Addresses) > 0 {
 		for _, s := range m.Addresses {
@@ -1087,38 +1105,38 @@ func (m *Network) MarshalTo(data []byte) (int, error) {
 	data[i] = 0x12
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
-	n24, err := m.Meta.MarshalTo(data[i:])
+	n25, err := m.Meta.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n24
+	i += n25
 	data[i] = 0x1a
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
-	n25, err := m.Spec.MarshalTo(data[i:])
+	n26, err := m.Spec.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n25
+	i += n26
 	if m.DriverState != nil {
 		data[i] = 0x22
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.DriverState.Size()))
-		n26, err := m.DriverState.MarshalTo(data[i:])
+		n27, err := m.DriverState.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n26
+		i += n27
 	}
 	if m.IPAM != nil {
 		data[i] = 0x2a
 		i++
 		i = encodeVarintObjects(data, i, uint64(m.IPAM.Size()))
-		n27, err := m.IPAM.MarshalTo(data[i:])
+		n28, err := m.IPAM.MarshalTo(data[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += n27
+		i += n28
 	}
 	return i, nil
 }
@@ -1147,27 +1165,27 @@ func (m *Cluster) MarshalTo(data []byte) (int, error) {
 	data[i] = 0x12
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
-	n28, err := m.Meta.MarshalTo(data[i:])
+	n29, err := m.Meta.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n28
+	i += n29
 	data[i] = 0x1a
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
-	n29, err := m.Spec.MarshalTo(data[i:])
+	n30, err := m.Spec.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n29
+	i += n30
 	data[i] = 0x22
 	i++
 	i = encodeVarintObjects(data, i, uint64(m.RootCA.Size()))
-	n30, err := m.RootCA.MarshalTo(data[i:])
+	n31, err := m.RootCA.MarshalTo(data[i:])
 	if err != nil {
 		return 0, err
 	}
-	i += n30
+	i += n31
 	if len(m.NetworkBootstrapKeys) > 0 {
 		for _, msg := range m.NetworkBootstrapKeys {
 			data[i] = 0x2a
@@ -1281,6 +1299,10 @@ func (m *Service) Size() (n int) {
 		l = m.UpdateStatus.Size()
 		n += 1 + l + sovObjects(uint64(l))
 	}
+	if m.PreviousSpec != nil {
+		l = m.PreviousSpec.Size()
+		n += 1 + l + sovObjects(uint64(l))
+	}
 	return n
 }
 
@@ -1489,6 +1511,7 @@ func (this *Service) String() string {
 		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
 		`Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
 		`UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`,
+		`PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2215,6 +2238,39 @@ func (m *Service) Unmarshal(data []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowObjects
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthObjects
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PreviousSpec == nil {
+				m.PreviousSpec = &ServiceSpec{}
+			}
+			if err := m.PreviousSpec.Unmarshal(data[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipObjects(data[iNdEx:])
@@ -3581,70 +3637,73 @@ var (
 	ErrIntOverflowObjects   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("objects.proto", fileDescriptorObjects) }
+
 var fileDescriptorObjects = []byte{
-	// 1009 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6e, 0x1b, 0x45,
-	0x1c, 0xce, 0xda, 0x1b, 0xdb, 0xfb, 0x73, 0x1c, 0x89, 0xa1, 0xaa, 0xb6, 0x21, 0xd8, 0xc1, 0x15,
-	0xa8, 0x87, 0xca, 0x15, 0xa5, 0x20, 0x2a, 0x5a, 0x21, 0xff, 0x13, 0x58, 0x25, 0x10, 0x4d, 0x4b,
-	0x7a, 0x5c, 0x4d, 0x76, 0xa7, 0x66, 0xb1, 0xbd, 0xb3, 0x9a, 0x19, 0xbb, 0xf2, 0x0d, 0xf1, 0x00,
-	0x48, 0xbc, 0x00, 0xaf, 0xc2, 0x35, 0x07, 0x0e, 0x1c, 0x39, 0x59, 0xc4, 0x37, 0x4e, 0xf0, 0x08,
-	0x68, 0x66, 0x67, 0xed, 0x8d, 0xbc, 0x0e, 0x8d, 0x84, 0x72, 0x9b, 0xd9, 0xf9, 0xbe, 0x6f, 0x7e,
-	0xff, 0x67, 0xa1, 0xc6, 0xce, 0xbe, 0xa7, 0xbe, 0x14, 0xad, 0x98, 0x33, 0xc9, 0x10, 0x0a, 0x98,
+	// 1029 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
+	0x18, 0xce, 0xda, 0x1b, 0xdb, 0xfb, 0x3a, 0x8e, 0xc4, 0x50, 0x55, 0xdb, 0x10, 0xec, 0xe0, 0x0a,
+	0xd4, 0x43, 0xe5, 0x8a, 0x52, 0x10, 0x15, 0xad, 0x90, 0xbf, 0x04, 0x56, 0x09, 0x44, 0xd3, 0x92,
+	0x1e, 0x57, 0x93, 0xdd, 0xa9, 0x59, 0x6c, 0xef, 0xac, 0x66, 0xc6, 0xae, 0x7c, 0x43, 0xfc, 0x00,
+	0x7e, 0x02, 0x7f, 0x85, 0x6b, 0x0e, 0x1c, 0xb8, 0xc1, 0xc9, 0x22, 0xbe, 0x71, 0x82, 0x9f, 0x80,
+	0x66, 0x76, 0xd6, 0xde, 0xc8, 0xeb, 0x90, 0x4a, 0x28, 0xb7, 0x99, 0x9d, 0xe7, 0x79, 0xde, 0xcf,
+	0x79, 0x77, 0xa0, 0xc6, 0xce, 0xbe, 0xa7, 0xbe, 0x14, 0xad, 0x98, 0x33, 0xc9, 0x10, 0x0a, 0x98,
 	0x3f, 0xa2, 0xbc, 0x25, 0x5e, 0x13, 0x3e, 0x19, 0x85, 0xb2, 0x35, 0xfb, 0xf0, 0xa0, 0x2a, 0xe7,
 	0x31, 0x35, 0x80, 0x83, 0xaa, 0x88, 0xa9, 0x9f, 0x6e, 0xee, 0xc8, 0x70, 0x42, 0x85, 0x24, 0x93,
 	0xf8, 0xc1, 0x6a, 0x65, 0x8e, 0x6e, 0x0d, 0xd9, 0x90, 0xe9, 0xe5, 0x03, 0xb5, 0x4a, 0xbe, 0x36,
-	0x7f, 0xb5, 0xc0, 0x3e, 0xa6, 0x92, 0xa0, 0xcf, 0xa0, 0x3c, 0xa3, 0x5c, 0x84, 0x2c, 0x72, 0xad,
-	0x23, 0xeb, 0x5e, 0xf5, 0xe1, 0x3b, 0xad, 0xcd, 0x9b, 0x5b, 0xa7, 0x09, 0xa4, 0x63, 0x9f, 0x2f,
-	0x1a, 0x3b, 0x38, 0x65, 0xa0, 0x27, 0x00, 0x3e, 0xa7, 0x44, 0xd2, 0xc0, 0x23, 0xd2, 0x2d, 0x68,
-	0xfe, 0xbb, 0x79, 0xfc, 0x17, 0xa9, 0x51, 0xd8, 0x31, 0x84, 0xb6, 0x54, 0xec, 0x69, 0x1c, 0xa4,
-	0xec, 0xe2, 0x1b, 0xb1, 0x0d, 0xa1, 0x2d, 0x9b, 0x7f, 0x15, 0xc1, 0xfe, 0x9a, 0x05, 0x14, 0xdd,
-	0x86, 0x42, 0x18, 0x68, 0xe3, 0x9d, 0x4e, 0x69, 0xb9, 0x68, 0x14, 0x06, 0x3d, 0x5c, 0x08, 0x03,
-	0xf4, 0x10, 0xec, 0x09, 0x95, 0xc4, 0x98, 0xe5, 0xe6, 0x09, 0xab, 0x08, 0x18, 0x9f, 0x34, 0x16,
-	0x7d, 0x02, 0xb6, 0x0a, 0xab, 0x31, 0xe6, 0x30, 0x8f, 0xa3, 0xee, 0x7c, 0x1e, 0x53, 0x3f, 0xe5,
-	0x29, 0x3c, 0xea, 0x43, 0x35, 0xa0, 0xc2, 0xe7, 0x61, 0x2c, 0x55, 0x24, 0x6d, 0x4d, 0xbf, 0xbb,
-	0x8d, 0xde, 0x5b, 0x43, 0x71, 0x96, 0x87, 0x9e, 0x40, 0x49, 0x48, 0x22, 0xa7, 0xc2, 0xdd, 0xd5,
-	0x0a, 0xf5, 0xad, 0x06, 0x68, 0x94, 0x31, 0xc1, 0x70, 0xd0, 0x97, 0xb0, 0x3f, 0x21, 0x11, 0x19,
-	0x52, 0xee, 0x19, 0x95, 0x92, 0x56, 0x79, 0x2f, 0xd7, 0xf5, 0x04, 0x99, 0x08, 0xe1, 0xda, 0x24,
-	0xbb, 0x45, 0x7d, 0x00, 0x22, 0x25, 0xf1, 0xbf, 0x9b, 0xd0, 0x48, 0xba, 0x65, 0xad, 0xf2, 0x7e,
-	0xae, 0x2d, 0x54, 0xbe, 0x66, 0x7c, 0xd4, 0x5e, 0x81, 0x71, 0x86, 0x88, 0xbe, 0x80, 0xaa, 0x4f,
-	0xb9, 0x0c, 0x5f, 0x85, 0x3e, 0x91, 0xd4, 0xad, 0x68, 0x9d, 0x46, 0x9e, 0x4e, 0x77, 0x0d, 0x33,
-	0x4e, 0x65, 0x99, 0xcd, 0x9f, 0x0b, 0x50, 0x7e, 0x4e, 0xf9, 0x2c, 0xf4, 0xff, 0xdf, 0x74, 0x3f,
-	0xbe, 0x94, 0xee, 0x5c, 0xcb, 0xcc, 0xb5, 0x1b, 0x19, 0xff, 0x14, 0x2a, 0x34, 0x0a, 0x62, 0x16,
-	0x46, 0xd2, 0xa4, 0x3b, 0xb7, 0x5a, 0xfa, 0x06, 0x83, 0x57, 0x68, 0xd4, 0x87, 0x5a, 0x52, 0xc5,
-	0xde, 0xa5, 0x5c, 0x1f, 0xe5, 0xd1, 0xbf, 0xd5, 0x40, 0x93, 0xa4, 0xbd, 0x69, 0x66, 0xd7, 0xfc,
-	0xa5, 0x00, 0x95, 0x54, 0x1d, 0x3d, 0x32, 0x8e, 0x58, 0xdb, 0xa5, 0x52, 0xac, 0xf2, 0xc4, 0xf8,
-	0xf0, 0x08, 0x76, 0x63, 0xc6, 0xa5, 0x70, 0x0b, 0x47, 0xc5, 0x6d, 0xd5, 0x76, 0xc2, 0xb8, 0xec,
-	0xb2, 0xe8, 0x55, 0x38, 0xc4, 0x09, 0x18, 0xbd, 0x84, 0xea, 0x2c, 0xe4, 0x72, 0x4a, 0xc6, 0x5e,
-	0x18, 0x0b, 0xb7, 0xa8, 0xb9, 0x1f, 0x5c, 0x75, 0x65, 0xeb, 0x34, 0xc1, 0x0f, 0x4e, 0x3a, 0xfb,
-	0xcb, 0x45, 0x03, 0x56, 0x5b, 0x81, 0xc1, 0x48, 0x0d, 0x62, 0x71, 0x70, 0x0c, 0xce, 0xea, 0x04,
-	0xdd, 0x07, 0x88, 0x92, 0xe2, 0xf2, 0x56, 0xe9, 0xae, 0x2d, 0x17, 0x0d, 0xc7, 0x94, 0xdc, 0xa0,
-	0x87, 0x1d, 0x03, 0x18, 0x04, 0x08, 0x81, 0x4d, 0x82, 0x80, 0xeb, 0xe4, 0x3b, 0x58, 0xaf, 0x9b,
-	0xbf, 0xed, 0x82, 0xfd, 0x82, 0x88, 0xd1, 0x4d, 0x0f, 0x08, 0x75, 0xe7, 0x46, 0xb9, 0xdc, 0x07,
-	0x10, 0x49, 0x25, 0x29, 0x77, 0xec, 0xb5, 0x3b, 0xa6, 0xbe, 0x94, 0x3b, 0x06, 0x90, 0xb8, 0x23,
-	0xc6, 0x4c, 0xea, 0xca, 0xb0, 0xb1, 0x5e, 0xa3, 0xbb, 0x50, 0x8e, 0x58, 0xa0, 0xe9, 0x25, 0x4d,
-	0x87, 0xe5, 0xa2, 0x51, 0x52, 0xc3, 0x60, 0xd0, 0xc3, 0x25, 0x75, 0x34, 0x08, 0x54, 0xc7, 0x91,
-	0x28, 0x62, 0x92, 0xa8, 0x71, 0x22, 0x4c, 0xe7, 0xe6, 0xd6, 0x75, 0x7b, 0x0d, 0x4b, 0x3b, 0x2e,
-	0xc3, 0x44, 0xa7, 0xf0, 0x76, 0x6a, 0x6f, 0x56, 0xb0, 0x72, 0x1d, 0x41, 0x64, 0x14, 0x32, 0x27,
-	0x99, 0x09, 0xe7, 0x6c, 0x9f, 0x70, 0x3a, 0x82, 0x79, 0x13, 0xae, 0x03, 0xb5, 0x80, 0x8a, 0x90,
-	0xd3, 0x40, 0xf7, 0x0e, 0x75, 0xe1, 0xc8, 0xba, 0xb7, 0xbf, 0xe5, 0xd1, 0x30, 0x22, 0x14, 0xef,
-	0x19, 0x8e, 0xde, 0xa1, 0x36, 0x54, 0x4c, 0xdd, 0x08, 0xb7, 0xaa, 0x6b, 0xf7, 0x0d, 0x27, 0xdb,
-	0x8a, 0x76, 0xa9, 0xf7, 0xf7, 0xae, 0xd5, 0xfb, 0x8f, 0x01, 0xc6, 0x6c, 0xe8, 0x05, 0x3c, 0x9c,
-	0x51, 0xee, 0xd6, 0x34, 0xf7, 0x20, 0x8f, 0xdb, 0xd3, 0x08, 0xec, 0x8c, 0xd9, 0x30, 0x59, 0x36,
-	0x7f, 0xb4, 0xe0, 0xad, 0x0d, 0xa3, 0xd0, 0xc7, 0x50, 0x36, 0x66, 0x5d, 0xf5, 0x7c, 0x1b, 0x1e,
-	0x4e, 0xb1, 0xe8, 0x10, 0x1c, 0xd5, 0x23, 0x54, 0x08, 0x9a, 0x74, 0xbf, 0x83, 0xd7, 0x1f, 0x90,
-	0x0b, 0x65, 0x32, 0x0e, 0x89, 0x3a, 0x2b, 0xea, 0xb3, 0x74, 0xdb, 0xfc, 0xa9, 0x00, 0x65, 0x23,
-	0x76, 0xd3, 0x83, 0xd8, 0x5c, 0xbb, 0xd1, 0x59, 0x4f, 0x61, 0x2f, 0x09, 0xa7, 0x29, 0x09, 0xfb,
-	0x3f, 0x83, 0x5a, 0x4d, 0xf0, 0x49, 0x39, 0x3c, 0x05, 0x3b, 0x8c, 0xc9, 0xc4, 0x0c, 0xe1, 0xdc,
-	0x9b, 0x07, 0x27, 0xed, 0xe3, 0x6f, 0xe2, 0xa4, 0xb2, 0x2b, 0xcb, 0x45, 0xc3, 0x56, 0x1f, 0xb0,
-	0xa6, 0x35, 0xff, 0x2e, 0x40, 0xb9, 0x3b, 0x9e, 0x0a, 0x49, 0xf9, 0x4d, 0x07, 0xc4, 0x5c, 0xbb,
-	0x11, 0x90, 0x2e, 0x94, 0x39, 0x63, 0xd2, 0xf3, 0xc9, 0x55, 0xb1, 0xc0, 0x8c, 0xc9, 0x6e, 0xbb,
-	0xb3, 0xaf, 0x88, 0x6a, 0x90, 0x24, 0x7b, 0x5c, 0x52, 0xd4, 0x2e, 0x41, 0x2f, 0xe1, 0x76, 0x3a,
-	0x7e, 0xcf, 0x18, 0x93, 0x42, 0x72, 0x12, 0x7b, 0x23, 0x3a, 0x57, 0xaf, 0x55, 0x71, 0xdb, 0x3f,
-	0x45, 0x3f, 0xf2, 0xf9, 0x5c, 0x07, 0xea, 0x19, 0x9d, 0xe3, 0x5b, 0x46, 0xa0, 0x93, 0xf2, 0x9f,
-	0xd1, 0xb9, 0x40, 0x9f, 0xc3, 0x21, 0x5d, 0xc1, 0x94, 0xa2, 0x37, 0x26, 0x13, 0xf5, 0xb0, 0x78,
-	0xfe, 0x98, 0xf9, 0x23, 0x3d, 0xdb, 0x6c, 0x7c, 0x87, 0x66, 0xa5, 0xbe, 0x4a, 0x10, 0x5d, 0x05,
-	0xe8, 0x1c, 0x9e, 0x5f, 0xd4, 0x77, 0xfe, 0xb8, 0xa8, 0xef, 0xfc, 0x73, 0x51, 0xb7, 0x7e, 0x58,
-	0xd6, 0xad, 0xf3, 0x65, 0xdd, 0xfa, 0x7d, 0x59, 0xb7, 0xfe, 0x5c, 0xd6, 0xad, 0xb3, 0x92, 0xfe,
-	0xbd, 0xfd, 0xe8, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0x49, 0xe6, 0x55, 0x4e, 0x0b, 0x00,
-	0x00,
+	0x7f, 0xb1, 0xc0, 0x3e, 0xa6, 0x92, 0xa0, 0xcf, 0xa0, 0x3c, 0xa3, 0x5c, 0x84, 0x2c, 0x72, 0xad,
+	0x23, 0xeb, 0x5e, 0xf5, 0xe1, 0x3b, 0xad, 0x4d, 0xcb, 0xad, 0xd3, 0x04, 0xd2, 0xb1, 0xcf, 0x17,
+	0x8d, 0x1d, 0x9c, 0x32, 0xd0, 0x13, 0x00, 0x9f, 0x53, 0x22, 0x69, 0xe0, 0x11, 0xe9, 0x16, 0x34,
+	0xff, 0xdd, 0x3c, 0xfe, 0x8b, 0xd4, 0x29, 0xec, 0x18, 0x42, 0x5b, 0x2a, 0xf6, 0x34, 0x0e, 0x52,
+	0x76, 0xf1, 0x5a, 0x6c, 0x43, 0x68, 0xcb, 0xe6, 0x5f, 0x45, 0xb0, 0xbf, 0x66, 0x01, 0x45, 0xb7,
+	0xa1, 0x10, 0x06, 0xda, 0x79, 0xa7, 0x53, 0x5a, 0x2e, 0x1a, 0x85, 0x41, 0x0f, 0x17, 0xc2, 0x00,
+	0x3d, 0x04, 0x7b, 0x42, 0x25, 0x31, 0x6e, 0xb9, 0x79, 0xc2, 0x2a, 0x03, 0x26, 0x26, 0x8d, 0x45,
+	0x9f, 0x80, 0xad, 0xd2, 0x6a, 0x9c, 0x39, 0xcc, 0xe3, 0x28, 0x9b, 0xcf, 0x63, 0xea, 0xa7, 0x3c,
+	0x85, 0x47, 0x7d, 0xa8, 0x06, 0x54, 0xf8, 0x3c, 0x8c, 0xa5, 0xca, 0xa4, 0xad, 0xe9, 0x77, 0xb7,
+	0xd1, 0x7b, 0x6b, 0x28, 0xce, 0xf2, 0xd0, 0x13, 0x28, 0x09, 0x49, 0xe4, 0x54, 0xb8, 0xbb, 0x5a,
+	0xa1, 0xbe, 0xd5, 0x01, 0x8d, 0x32, 0x2e, 0x18, 0x0e, 0xfa, 0x12, 0xf6, 0x27, 0x24, 0x22, 0x43,
+	0xca, 0x3d, 0xa3, 0x52, 0xd2, 0x2a, 0xef, 0xe5, 0x86, 0x9e, 0x20, 0x13, 0x21, 0x5c, 0x9b, 0x64,
+	0xb7, 0xa8, 0x0f, 0x40, 0xa4, 0x24, 0xfe, 0x77, 0x13, 0x1a, 0x49, 0xb7, 0xac, 0x55, 0xde, 0xcf,
+	0xf5, 0x85, 0xca, 0xd7, 0x8c, 0x8f, 0xda, 0x2b, 0x30, 0xce, 0x10, 0xd1, 0x17, 0x50, 0xf5, 0x29,
+	0x97, 0xe1, 0xab, 0xd0, 0x27, 0x92, 0xba, 0x15, 0xad, 0xd3, 0xc8, 0xd3, 0xe9, 0xae, 0x61, 0x26,
+	0xa8, 0x2c, 0xb3, 0xf9, 0x7b, 0x01, 0xca, 0xcf, 0x29, 0x9f, 0x85, 0xfe, 0xff, 0x5b, 0xee, 0xc7,
+	0x97, 0xca, 0x9d, 0xeb, 0x99, 0x31, 0xbb, 0x51, 0xf1, 0x4f, 0xa1, 0x42, 0xa3, 0x20, 0x66, 0x61,
+	0x24, 0x4d, 0xb9, 0x73, 0xbb, 0xa5, 0x6f, 0x30, 0x78, 0x85, 0x46, 0x7d, 0xa8, 0x25, 0x5d, 0xec,
+	0x5d, 0xaa, 0xf5, 0x51, 0x1e, 0xfd, 0x5b, 0x0d, 0x34, 0x45, 0xda, 0x9b, 0x66, 0x76, 0xa8, 0x07,
+	0xb5, 0x98, 0xd3, 0x59, 0xc8, 0xa6, 0xc2, 0xd3, 0x41, 0x94, 0xae, 0x15, 0x04, 0xde, 0x4b, 0x59,
+	0x6a, 0xd7, 0xfc, 0xb9, 0x00, 0x95, 0xd4, 0x47, 0xf4, 0xc8, 0xa4, 0xc3, 0xda, 0xee, 0x50, 0x8a,
+	0xd5, 0x52, 0x49, 0x26, 0x1e, 0xc1, 0x6e, 0xcc, 0xb8, 0x14, 0x6e, 0xe1, 0xa8, 0xb8, 0xad, 0x67,
+	0x4f, 0x18, 0x97, 0x5d, 0x16, 0xbd, 0x0a, 0x87, 0x38, 0x01, 0xa3, 0x97, 0x50, 0x9d, 0x85, 0x5c,
+	0x4e, 0xc9, 0xd8, 0x0b, 0x63, 0xe1, 0x16, 0x35, 0xf7, 0x83, 0xab, 0x4c, 0xb6, 0x4e, 0x13, 0xfc,
+	0xe0, 0xa4, 0xb3, 0xbf, 0x5c, 0x34, 0x60, 0xb5, 0x15, 0x18, 0x8c, 0xd4, 0x20, 0x16, 0x07, 0xc7,
+	0xe0, 0xac, 0x4e, 0xd0, 0x7d, 0x80, 0x28, 0x69, 0x51, 0x6f, 0xd5, 0x34, 0xb5, 0xe5, 0xa2, 0xe1,
+	0x98, 0xc6, 0x1d, 0xf4, 0xb0, 0x63, 0x00, 0x83, 0x00, 0x21, 0xb0, 0x49, 0x10, 0x70, 0xdd, 0x42,
+	0x0e, 0xd6, 0xeb, 0xe6, 0xaf, 0xbb, 0x60, 0xbf, 0x20, 0x62, 0x74, 0xd3, 0x63, 0x46, 0xd9, 0xdc,
+	0x68, 0xba, 0xfb, 0x00, 0x22, 0x29, 0xa5, 0x0a, 0xc7, 0x5e, 0x87, 0x63, 0x0a, 0xac, 0xc2, 0x31,
+	0x80, 0x24, 0x1c, 0x31, 0x66, 0x52, 0xf7, 0x97, 0x8d, 0xf5, 0x1a, 0xdd, 0x85, 0x72, 0xc4, 0x02,
+	0x4d, 0x2f, 0x69, 0x3a, 0x2c, 0x17, 0x8d, 0x92, 0x1a, 0x29, 0x83, 0x1e, 0x2e, 0xa9, 0xa3, 0x41,
+	0xa0, 0xee, 0x2d, 0x89, 0x22, 0x26, 0x89, 0x1a, 0x4a, 0xc2, 0xdc, 0xff, 0xdc, 0xc6, 0x6a, 0xaf,
+	0x61, 0xe9, 0xbd, 0xcd, 0x30, 0xd1, 0x29, 0xbc, 0x9d, 0xfa, 0x9b, 0x15, 0xac, 0xbc, 0x89, 0x20,
+	0x32, 0x0a, 0x99, 0x93, 0xcc, 0x9c, 0x74, 0xb6, 0xcf, 0x49, 0x9d, 0xc1, 0xbc, 0x39, 0xd9, 0x81,
+	0x5a, 0x40, 0x45, 0xc8, 0x69, 0xa0, 0x6f, 0x20, 0x75, 0xe1, 0xc8, 0xba, 0xb7, 0xbf, 0xe5, 0xd7,
+	0x63, 0x44, 0x28, 0xde, 0x33, 0x1c, 0xbd, 0x43, 0x6d, 0xa8, 0x98, 0xbe, 0x11, 0x6e, 0x55, 0xf7,
+	0xee, 0x35, 0xe7, 0xe3, 0x8a, 0x76, 0x69, 0x82, 0xec, 0xbd, 0xd1, 0x04, 0x79, 0x0c, 0x30, 0x66,
+	0x43, 0x2f, 0xe0, 0xe1, 0x8c, 0x72, 0xb7, 0xa6, 0xb9, 0x07, 0x79, 0xdc, 0x9e, 0x46, 0x60, 0x67,
+	0xcc, 0x86, 0xc9, 0xb2, 0xf9, 0xa3, 0x05, 0x6f, 0x6d, 0x38, 0x85, 0x3e, 0x86, 0xb2, 0x71, 0xeb,
+	0xaa, 0x47, 0x80, 0xe1, 0xe1, 0x14, 0x8b, 0x0e, 0xc1, 0x51, 0x77, 0x84, 0x0a, 0x41, 0x93, 0xdb,
+	0xef, 0xe0, 0xf5, 0x07, 0xe4, 0x42, 0x99, 0x8c, 0x43, 0xa2, 0xce, 0x8a, 0xfa, 0x2c, 0xdd, 0x36,
+	0x7f, 0x2a, 0x40, 0xd9, 0x88, 0xdd, 0xf4, 0x38, 0x37, 0x66, 0x37, 0x6e, 0xd6, 0x53, 0xd8, 0x4b,
+	0xd2, 0x69, 0x5a, 0xc2, 0xfe, 0xcf, 0xa4, 0x56, 0x13, 0x7c, 0xd2, 0x0e, 0x4f, 0xc1, 0x0e, 0x63,
+	0x32, 0x31, 0xa3, 0x3c, 0xd7, 0xf2, 0xe0, 0xa4, 0x7d, 0xfc, 0x4d, 0x9c, 0x74, 0x76, 0x65, 0xb9,
+	0x68, 0xd8, 0xea, 0x03, 0xd6, 0xb4, 0xe6, 0xdf, 0x05, 0x28, 0x77, 0xc7, 0x53, 0x21, 0x29, 0xbf,
+	0xe9, 0x84, 0x18, 0xb3, 0x1b, 0x09, 0xe9, 0x42, 0x99, 0x33, 0x26, 0x3d, 0x9f, 0x5c, 0x95, 0x0b,
+	0xcc, 0x98, 0xec, 0xb6, 0x3b, 0xfb, 0x8a, 0xa8, 0x06, 0x49, 0xb2, 0xc7, 0x25, 0x45, 0xed, 0x12,
+	0xf4, 0x12, 0x6e, 0xa7, 0xe3, 0xf7, 0x8c, 0x31, 0x29, 0x24, 0x27, 0xb1, 0x37, 0xa2, 0x73, 0xf5,
+	0xcf, 0x2b, 0x6e, 0x7b, 0x99, 0xf4, 0x23, 0x9f, 0xcf, 0x75, 0xa2, 0x9e, 0xd1, 0x39, 0xbe, 0x65,
+	0x04, 0x3a, 0x29, 0xff, 0x19, 0x9d, 0x0b, 0xf4, 0x39, 0x1c, 0xd2, 0x15, 0x4c, 0x29, 0x7a, 0x63,
+	0x32, 0x51, 0x3f, 0x16, 0xcf, 0x1f, 0x33, 0x7f, 0xa4, 0x67, 0x9b, 0x8d, 0xef, 0xd0, 0xac, 0xd4,
+	0x57, 0x09, 0xa2, 0xab, 0x00, 0x9d, 0xc3, 0xf3, 0x8b, 0xfa, 0xce, 0x1f, 0x17, 0xf5, 0x9d, 0x7f,
+	0x2e, 0xea, 0xd6, 0x0f, 0xcb, 0xba, 0x75, 0xbe, 0xac, 0x5b, 0xbf, 0x2d, 0xeb, 0xd6, 0x9f, 0xcb,
+	0xba, 0x75, 0x56, 0xd2, 0x8f, 0xe4, 0x8f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb2, 0x97,
+	0xcc, 0x94, 0x0b, 0x00, 0x00,
 }

+ 4 - 0
vendor/src/github.com/docker/swarmkit/api/objects.proto

@@ -57,6 +57,10 @@ message Service {
 
 	ServiceSpec spec = 3 [(gogoproto.nullable) = false];
 
+	// PreviousSpec is the previous service spec that was in place before
+	// "Spec".
+	ServiceSpec previous_spec = 6;
+
 	// Runtime state of service endpoint. This may be different
 	// from the spec version because the user may not have entered
 	// the optional fields like node_port or virtual_ip and it

+ 132 - 76
vendor/src/github.com/docker/swarmkit/api/raft.pb.go

@@ -23,10 +23,11 @@ import (
 	grpc "google.golang.org/grpc"
 )
 
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
+import time "time"
 
 import io "io"
 
@@ -163,7 +164,7 @@ func (m *InternalRaftRequest) Reset()                    { *m = InternalRaftRequ
 func (*InternalRaftRequest) ProtoMessage()               {}
 func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} }
 
-// StoreAction defines a taret and operation to apply on the storage system.
+// StoreAction defines a target and operation to apply on the storage system.
 type StoreAction struct {
 	Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"`
 	// Types that are valid to be assigned to Target:
@@ -797,11 +798,12 @@ func valueToGoStringRaft(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringRaft(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringRaft(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -811,7 +813,7 @@ func extensionToGoStringRaft(e map[int32]github_com_gogo_protobuf_proto.Extensio
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 
@@ -821,7 +823,7 @@ var _ grpc.ClientConn
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion2
+const _ = grpc.SupportPackageIsVersion3
 
 // Client API for Raft service
 
@@ -922,7 +924,8 @@ var _Raft_serviceDesc = grpc.ServiceDesc{
 			Handler:    _Raft_ResolveAddress_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorRaft,
 }
 
 // Client API for RaftMembership service
@@ -1022,7 +1025,8 @@ var _RaftMembership_serviceDesc = grpc.ServiceDesc{
 			Handler:    _RaftMembership_Leave_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorRaft,
 }
 
 func (m *RaftMember) Marshal() (data []byte, err error) {
@@ -1438,12 +1442,11 @@ func encodeVarintRaft(data []byte, offset int, v uint64) int {
 
 type raftProxyRaftServer struct {
 	local        RaftServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyRaftServer(local RaftServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) RaftServer {
+func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) RaftServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -1465,7 +1468,6 @@ func NewRaftProxyRaftServer(local RaftServer, connSelector raftpicker.Interface,
 
 	return &raftProxyRaftServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -1480,73 +1482,99 @@ func (p *raftProxyRaftServer) runCtxMods(ctx context.Context) (context.Context,
 	}
 	return ctx, nil
 }
+func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.ProcessRaftMessage(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.ProcessRaftMessage(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.ProcessRaftMessage(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewRaftClient(conn).ProcessRaftMessage(ctx, r)
+		return NewRaftClient(conn).ProcessRaftMessage(modCtx, r)
+	}
+	return resp, err
 }
 
 func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
 
-	if p.cluster.IsLeader() {
-		return p.local.ResolveAddress(ctx, r)
-	}
-	ctx, err := p.runCtxMods(ctx)
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.ResolveAddress(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.ResolveAddress(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewRaftClient(conn).ResolveAddress(ctx, r)
+		return NewRaftClient(conn).ResolveAddress(modCtx, r)
+	}
+	return resp, err
 }
 
 type raftProxyRaftMembershipServer struct {
 	local        RaftMembershipServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) RaftMembershipServer {
+func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) RaftMembershipServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -1568,7 +1596,6 @@ func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector r
 
 	return &raftProxyRaftMembershipServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -1583,63 +1610,90 @@ func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context) (context
 	}
 	return ctx, nil
 }
+func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.Join(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.Join(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewRaftMembershipClient(conn).Join(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.Join(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewRaftMembershipClient(conn).Join(ctx, r)
+		return NewRaftMembershipClient(conn).Join(modCtx, r)
+	}
+	return resp, err
 }
 
 func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) {
 
-	if p.cluster.IsLeader() {
-		return p.local.Leave(ctx, r)
-	}
-	ctx, err := p.runCtxMods(ctx)
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.Leave(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.Leave(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewRaftMembershipClient(conn).Leave(ctx, r)
+		return NewRaftMembershipClient(conn).Leave(modCtx, r)
+	}
+	return resp, err
 }
 
 func (m *RaftMember) Size() (n int) {
@@ -3205,6 +3259,8 @@ var (
 	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
+
 var fileDescriptorRaft = []byte{
 	// 868 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0x4f, 0x73, 0xdb, 0x44,

+ 1 - 1
vendor/src/github.com/docker/swarmkit/api/raft.proto

@@ -115,7 +115,7 @@ enum StoreActionKind {
 	STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"];
 }
 
-// StoreAction defines a taret and operation to apply on the storage system.
+// StoreAction defines a target and operation to apply on the storage system.
 message StoreAction {
 	StoreActionKind action = 1;
 	oneof target {

+ 70 - 40
vendor/src/github.com/docker/swarmkit/api/resource.pb.go

@@ -21,10 +21,11 @@ import (
 	grpc "google.golang.org/grpc"
 )
 
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
 import codes "google.golang.org/grpc/codes"
 import metadata "google.golang.org/grpc/metadata"
 import transport "google.golang.org/grpc/transport"
+import time "time"
 
 import io "io"
 
@@ -197,11 +198,12 @@ func valueToGoStringResource(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringResource(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringResource(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -211,7 +213,7 @@ func extensionToGoStringResource(e map[int32]github_com_gogo_protobuf_proto.Exte
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 
@@ -221,7 +223,7 @@ var _ grpc.ClientConn
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion2
+const _ = grpc.SupportPackageIsVersion3
 
 // Client API for ResourceAllocator service
 
@@ -316,7 +318,8 @@ var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{
 			Handler:    _ResourceAllocator_DetachNetwork_Handler,
 		},
 	},
-	Streams: []grpc.StreamDesc{},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: fileDescriptorResource,
 }
 
 func (m *AttachNetworkRequest) Marshal() (data []byte, err error) {
@@ -449,12 +452,11 @@ func encodeVarintResource(data []byte, offset int, v uint64) int {
 
 type raftProxyResourceAllocatorServer struct {
 	local        ResourceAllocatorServer
-	connSelector raftpicker.Interface
-	cluster      raftpicker.RaftCluster
+	connSelector raftselector.ConnProvider
 	ctxMods      []func(context.Context) (context.Context, error)
 }
 
-func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer {
+func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer {
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
 		s, ok := transport.StreamFromContext(ctx)
 		if !ok {
@@ -476,7 +478,6 @@ func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSele
 
 	return &raftProxyResourceAllocatorServer{
 		local:        local,
-		cluster:      cluster,
 		connSelector: connSelector,
 		ctxMods:      mods,
 	}
@@ -491,63 +492,90 @@ func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context) (cont
 	}
 	return ctx, nil
 }
+func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			conn, err := p.connSelector.LeaderConn(ctx)
+			if err != nil {
+				return nil, err
+			}
 
-func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) {
+			client := NewHealthClient(conn)
 
-	if p.cluster.IsLeader() {
-		return p.local.AttachNetwork(ctx, r)
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
+				continue
+			}
+			return conn, nil
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	ctx, err := p.runCtxMods(ctx)
+}
+
+func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) {
+
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.AttachNetwork(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.AttachNetwork(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewResourceAllocatorClient(conn).AttachNetwork(ctx, r)
+		return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r)
+	}
+	return resp, err
 }
 
 func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) {
 
-	if p.cluster.IsLeader() {
-		return p.local.DetachNetwork(ctx, r)
-	}
-	ctx, err := p.runCtxMods(ctx)
+	conn, err := p.connSelector.LeaderConn(ctx)
 	if err != nil {
+		if err == raftselector.ErrIsLeader {
+			return p.local.DetachNetwork(ctx, r)
+		}
 		return nil, err
 	}
-	conn, err := p.connSelector.Conn()
+	modCtx, err := p.runCtxMods(ctx)
 	if err != nil {
 		return nil, err
 	}
 
-	defer func() {
+	resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r)
+	if err != nil {
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
+			return resp, err
+		}
+		conn, err := p.pollNewLeaderConn(ctx)
 		if err != nil {
-			errStr := err.Error()
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
-				strings.Contains(errStr, "connection error") ||
-				grpc.Code(err) == codes.Internal {
-				p.connSelector.Reset()
+			if err == raftselector.ErrIsLeader {
+				return p.local.DetachNetwork(ctx, r)
 			}
+			return nil, err
 		}
-	}()
-
-	return NewResourceAllocatorClient(conn).DetachNetwork(ctx, r)
+		return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r)
+	}
+	return resp, err
 }
 
 func (m *AttachNetworkRequest) Size() (n int) {
@@ -1076,6 +1104,8 @@ var (
 	ErrIntOverflowResource   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("resource.proto", fileDescriptorResource) }
+
 var fileDescriptorResource = []byte{
 	// 373 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce,

+ 6 - 3
vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go

@@ -222,11 +222,12 @@ func valueToGoStringSnapshot(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringSnapshot(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringSnapshot(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -236,7 +237,7 @@ func extensionToGoStringSnapshot(e map[int32]github_com_gogo_protobuf_proto.Exte
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *StoreSnapshot) Marshal() (data []byte, err error) {
@@ -1085,6 +1086,8 @@ var (
 	ErrIntOverflowSnapshot   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
+
 var fileDescriptorSnapshot = []byte{
 	// 396 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbd, 0x6e, 0xdb, 0x30,

+ 49 - 41
vendor/src/github.com/docker/swarmkit/api/specs.pb.go

@@ -1047,11 +1047,12 @@ func valueToGoStringSpecs(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringSpecs(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringSpecs(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -1061,7 +1062,7 @@ func extensionToGoStringSpecs(e map[int32]github_com_gogo_protobuf_proto.Extensi
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *NodeSpec) Marshal() (data []byte, err error) {
@@ -3252,50 +3253,55 @@ func (m *ContainerSpec) Unmarshal(data []byte) error {
 			}
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
 			iNdEx = postStringIndexmapkey
-			var valuekey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSpecs
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSpecs
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
 				}
-				b := data[iNdEx]
-				iNdEx++
-				valuekey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSpecs
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
 				}
-			}
-			var stringLenmapvalue uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSpecs
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthSpecs
 				}
-				if iNdEx >= l {
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
-				iNdEx++
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapvalue := int(stringLenmapvalue)
-			if intStringLenmapvalue < 0 {
-				return ErrInvalidLengthSpecs
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
 			}
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-			if postStringIndexmapvalue > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
-			iNdEx = postStringIndexmapvalue
-			if m.Labels == nil {
-				m.Labels = make(map[string]string)
-			}
-			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
@@ -4339,6 +4345,8 @@ var (
 	ErrIntOverflowSpecs   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) }
+
 var fileDescriptorSpecs = []byte{
 	// 1397 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0xdb, 0xc6,

+ 9 - 4
vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go

@@ -32,7 +32,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 // A Timestamp represents a point in time independent of any time zone
 // or calendar, represented as seconds and fractions of seconds at
@@ -141,11 +143,12 @@ func valueToGoStringTimestamp(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringTimestamp(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringTimestamp(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -155,7 +158,7 @@ func extensionToGoStringTimestamp(e map[int32]github_com_gogo_protobuf_proto.Ext
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *Timestamp) Marshal() (data []byte, err error) {
@@ -451,6 +454,8 @@ var (
 	ErrIntOverflowTimestamp   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) }
+
 var fileDescriptorTimestamp = []byte{
 	// 205 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d,

File diff suppressed because it is too large
+ 431 - 275
vendor/src/github.com/docker/swarmkit/api/types.pb.go


+ 43 - 9
vendor/src/github.com/docker/swarmkit/api/types.proto

@@ -281,15 +281,46 @@ message UpdateConfig {
 	enum FailureAction {
 		PAUSE = 0;
 		CONTINUE = 1;
-		// TODO(aaronl): Add ROLLBACK as a supported failure mode.
-		// (#486)
+		// NOTE: Automated rollback triggered as a failure action is an
+		// experimental feature that is not yet exposed to the end
+		// user. Currently, rollbacks must be initiated manually
+		// through the API by setting Spec to PreviousSpec. We may
+		// decide to expose automatic rollback in the future based on
+		// user feedback, or remove this feature otherwise.
+		ROLLBACK = 2;
 	}
 
 	// FailureAction is the action to take when an update failures.
-	// Currently, a failure is defined as a single updated task failing to
-	// reach the RUNNING state. In the future, there will be configuration
-	// to define what is treated as a failure (see #486 for a proposal).
 	FailureAction failure_action = 3;
+
+	// Monitor indicates how long to monitor a task for failure after it is
+	// created. If the task fails by ending up in one of the states
+	// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+	// this counts as a failure. If it fails after Monitor, it does not
+	// count as a failure. If Monitor is unspecified, a default value will
+	// be used.
+	Duration monitor = 4;
+
+	// AllowedFailureFraction is the fraction of tasks that may fail during
+	// an update before the failure action is invoked. Any task created by
+	// the current update which ends up in one of the states REJECTED,
+	// COMPLETED or FAILED within Monitor from its creation counts as a
+	// failure. The number of failures is divided by the number of tasks
+	// being updated, and if this fraction is greater than
+	// AllowedFailureFraction, the failure action is invoked.
+	//
+	// If the failure action is CONTINUE, there is no effect.
+	// If the failure action is PAUSE, no more tasks will be updated until
+	// another update is started.
+	// If the failure action is ROLLBACK, the orchestrator will attempt to
+	// roll back to the previous service spec. If the AllowedFailureFraction
+	// threshold is hit during the rollback, the rollback will pause.
+	//
+	// TODO(aaronl): Should there be a separate failure threshold for
+	// rollbacks? Should there be a failure action for rollbacks (to allow
+	// them to do something other than pause when the rollback encounters
+	// errors)?
+	float allowed_failure_fraction = 5;
 }
 
 // UpdateStatus is the status of an update in progress.
@@ -299,18 +330,21 @@ message UpdateStatus {
 		UPDATING = 1;
 		PAUSED = 2;
 		COMPLETED = 3;
-		// TODO(aaronl): add ROLLING_BACK, ROLLED_BACK as part of
-		// rollback support.
+		ROLLBACK_STARTED = 4;
+		ROLLBACK_PAUSED = 5; // if a rollback fails
+		ROLLBACK_COMPLETED = 6;
 	}
 
 	// State is the state of this update. It indicates whether the
-	// update is in progress, completed, or is paused.
+	// update is in progress, completed, paused, rolling back, or
+	// finished rolling back.
 	UpdateState state = 1;
 
 	// StartedAt is the time at which the update was started.
 	Timestamp started_at = 2;
 
-	// CompletedAt is the time at which the update completed.
+	// CompletedAt is the time at which the update completed successfully,
+	// paused, or finished rolling back.
 	Timestamp completed_at = 3;
 
 	// TODO(aaronl): Consider adding a timestamp showing when the most

+ 9 - 30
vendor/src/github.com/docker/swarmkit/ca/certificates.go

@@ -16,7 +16,6 @@ import (
 	"path/filepath"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
 	cfcsr "github.com/cloudflare/cfssl/csr"
 	"github.com/cloudflare/cfssl/helpers"
 	"github.com/cloudflare/cfssl/initca"
@@ -117,8 +116,7 @@ func (rca *RootCA) CanSign() bool {
 func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org string) (*tls.Certificate, error) {
 	csr, key, err := GenerateAndWriteNewKey(paths)
 	if err != nil {
-		log.Debugf("error when generating new node certs: %v", err)
-		return nil, err
+		return nil, fmt.Errorf("error when generating new node certs: %v", err)
 	}
 
 	if !rca.CanSign() {
@@ -128,8 +126,7 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
 	// Obtain a signed Certificate
 	certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org)
 	if err != nil {
-		log.Debugf("failed to sign node certificate: %v", err)
-		return nil, err
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
 	}
 
 	// Ensure directory exists
@@ -149,20 +146,18 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
 		return nil, err
 	}
 
-	log.Debugf("locally issued new TLS certificate for node ID: %s and role: %s", cn, ou)
 	return &tlsKeyPair, nil
 }
 
 // RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is
 // available, or by requesting them from the remote server at remoteAddr.
-func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, token string, remotes remotes.Remotes, transport credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
+func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, token string, remotes remotes.Remotes, transport credentials.TransportCredentials, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
 	// Create a new key/pair and CSR for the new manager
 	// Write the new CSR and the new key to a temporary location so we can survive crashes on rotation
 	tempPaths := genTempPaths(paths)
 	csr, key, err := GenerateAndWriteNewKey(tempPaths)
 	if err != nil {
-		log.Debugf("error when generating new node certs: %v", err)
-		return nil, err
+		return nil, fmt.Errorf("error when generating new node certs: %v", err)
 	}
 
 	// Get the remote manager to issue a CA signed certificate for this node
@@ -174,7 +169,6 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
 		if err == nil {
 			break
 		}
-		log.Warningf("error fetching signed node certificate: %v", err)
 	}
 	if err != nil {
 		return nil, err
@@ -206,10 +200,6 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
 		return nil, err
 	}
 
-	if len(X509Cert.Subject.OrganizationalUnit) != 0 {
-		log.Infof("Downloaded new TLS credentials with role: %s.", X509Cert.Subject.OrganizationalUnit[0])
-	}
-
 	// Ensure directory exists
 	err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
 	if err != nil {
@@ -259,8 +249,7 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string)
 
 	cert, err := rca.Signer.Sign(signRequest)
 	if err != nil {
-		log.Debugf("failed to sign node certificate: %v", err)
-		return nil, err
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
 	}
 
 	return rca.AppendFirstRootPEM(cert)
@@ -342,8 +331,7 @@ func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration) (RootCA, er
 	if err != nil {
 		priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
 		if err != nil {
-			log.Debug("Malformed private key %v", err)
-			return RootCA{}, err
+			return RootCA{}, fmt.Errorf("Malformed private key: %v", err)
 		}
 	}
 
@@ -414,12 +402,7 @@ func GetLocalRootCA(baseDir string) (RootCA, error) {
 		key = nil
 	}
 
-	rootCA, err := NewRootCA(cert, key, DefaultNodeCertExpiration)
-	if err == nil {
-		log.Debugf("successfully loaded the Root CA: %s", paths.RootCA.Cert)
-	}
-
-	return rootCA, err
+	return NewRootCA(cert, key, DefaultNodeCertExpiration)
 }
 
 // GetRemoteCA returns the remote endpoint's CA certificate
@@ -552,8 +535,7 @@ func GenerateAndSignNewTLSCert(rootCA RootCA, cn, ou, org string, paths CertPath
 	// Obtain a signed Certificate
 	certChain, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
 	if err != nil {
-		log.Debugf("failed to sign node certificate: %v", err)
-		return nil, err
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
 	}
 
 	// Ensure directory exists
@@ -603,7 +585,7 @@ func GenerateAndWriteNewKey(paths CertPaths) (csr, key []byte, err error) {
 
 // GetRemoteSignedCertificate submits a CSR to a remote CA server address,
 // and that is part of a CA identified by a specific certificate pool.
-func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, rootCAPool *x509.CertPool, r remotes.Remotes, creds credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
+func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, rootCAPool *x509.CertPool, r remotes.Remotes, creds credentials.TransportCredentials, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
 	if rootCAPool == nil {
 		return nil, fmt.Errorf("valid root CA pool required")
 	}
@@ -653,7 +635,6 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, r
 		Max:    30 * time.Second,
 	})
 
-	log.Infof("Waiting for TLS certificate to be issued...")
 	// Exponential backoff with Max of 30 seconds to wait for a new retry
 	for {
 		// Send the Request and retrieve the certificate
@@ -694,7 +675,6 @@ func readCertExpiration(paths CertPaths) (time.Duration, error) {
 	// Read the Cert
 	cert, err := ioutil.ReadFile(paths.Cert)
 	if err != nil {
-		log.Debugf("failed to read certificate file: %s", paths.Cert)
 		return time.Hour, err
 	}
 
@@ -730,7 +710,6 @@ func generateNewCSR() (csr, key []byte, err error) {
 
 	csr, key, err = cfcsr.ParseRequest(req)
 	if err != nil {
-		log.Debugf(`failed to generate CSR`)
 		return
 	}
 

+ 51 - 22
vendor/src/github.com/docker/swarmkit/ca/config.go

@@ -15,11 +15,12 @@ import (
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	cfconfig "github.com/cloudflare/cfssl/config"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/identity"
+	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/remotes"
 
 	"golang.org/x/net/context"
@@ -35,8 +36,8 @@ const (
 	rootCN = "swarm-ca"
 	// ManagerRole represents the Manager node type, and is used for authorization to endpoints
 	ManagerRole = "swarm-manager"
-	// AgentRole represents the Agent node type, and is used for authorization to endpoints
-	AgentRole = "swarm-worker"
+	// WorkerRole represents the Worker node type, and is used for authorization to endpoints
+	WorkerRole = "swarm-worker"
 	// CARole represents the CA node type, and is used for clients attempting to get new certificates issued
 	CARole = "swarm-ca"
 
@@ -184,6 +185,7 @@ func getCAHashFromToken(token string) (digest.Digest, error) {
 // Every node requires at least a set of TLS certificates with which to join the cluster with.
 // In the case of a manager, these certificates will be used both for client and server credentials.
 func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, proposedRole string, remotes remotes.Remotes, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
+	ctx = log.WithModule(ctx, "tls")
 	paths := NewConfigPaths(baseCertDir)
 
 	var (
@@ -196,9 +198,9 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 	rootCA, err = GetLocalRootCA(baseCertDir)
 	switch err {
 	case nil:
-		log.Debugf("loaded local CA certificate: %s.", paths.RootCA.Cert)
+		log.G(ctx).Debug("loaded CA certificate")
 	case ErrNoLocalRootCA:
-		log.Debugf("no valid local CA certificate found: %v", err)
+		log.G(ctx).WithError(err).Debugf("failed to load local CA certificate")
 
 		// Get a digest for the optional CA hash string that we've been provided
 		// If we were provided a non-empty string, and it is an invalid hash, return
@@ -221,7 +223,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 			if err == nil {
 				break
 			}
-			log.Warningf("failed to retrieve remote root CA certificate: %v", err)
+			log.G(ctx).WithError(err).Errorf("failed to retrieve remote root CA certificate")
 		}
 		if err != nil {
 			return nil, err
@@ -232,7 +234,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 			return nil, err
 		}
 
-		log.Debugf("downloaded remote CA certificate.")
+		log.G(ctx).Debugf("retrieved remote CA certificate: %s", paths.RootCA.Cert)
 	default:
 		return nil, err
 	}
@@ -242,7 +244,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 	// load our certificates.
 	clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
 	if err != nil {
-		log.Debugf("no valid local TLS credentials found: %v", err)
+		log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", paths.Node.Cert)
 
 		var (
 			tlsKeyPair *tls.Certificate
@@ -262,17 +264,27 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 			}
 			tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
 			if err != nil {
+				log.G(ctx).WithFields(logrus.Fields{
+					"node.id":   cn,
+					"node.role": proposedRole,
+				}).WithError(err).Errorf("failed to issue and save new certificate")
 				return nil, err
 			}
+
+			log.G(ctx).WithFields(logrus.Fields{
+				"node.id":   cn,
+				"node.role": proposedRole,
+			}).Debug("issued new TLS certificate")
 		} else {
 			// There was an error loading our Credentials, let's get a new certificate issued
 			// Last argument is nil because at this point we don't have any valid TLS creds
 			tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, paths.Node, token, remotes, nil, nodeInfo)
 			if err != nil {
+				log.G(ctx).WithError(err).Error("failed to request save new certificate")
 				return nil, err
 			}
 		}
-		// Create the Server TLS Credentials for this node. These will not be used by agents.
+		// Create the Server TLS Credentials for this node. These will not be used by workers.
 		serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
 		if err != nil {
 			return nil, err
@@ -284,7 +296,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 		if err != nil {
 			return nil, err
 		}
-		log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
+		log.G(ctx).WithFields(logrus.Fields{
+			"node.id":   clientTLSCreds.NodeID(),
+			"node.role": clientTLSCreds.Role(),
+		}).Debugf("new node credentials generated: %s", paths.Node.Cert)
 	} else {
 		if nodeInfo != nil {
 			nodeInfo <- api.IssueNodeCertificateResponse{
@@ -292,7 +307,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
 				NodeMembership: api.NodeMembershipAccepted,
 			}
 		}
-		log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
+		log.G(ctx).WithFields(logrus.Fields{
+			"node.id":   clientTLSCreds.NodeID(),
+			"node.role": clientTLSCreds.Role(),
+		}).Debug("loaded node credentials")
 	}
 
 	return NewSecurityConfig(&rootCA, clientTLSCreds, serverTLSCreds), nil
@@ -308,6 +326,11 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
 		var retry time.Duration
 		defer close(updates)
 		for {
+			ctx = log.WithModule(ctx, "tls")
+			log := log.G(ctx).WithFields(logrus.Fields{
+				"node.id":   s.ClientTLSCreds.NodeID(),
+				"node.role": s.ClientTLSCreds.Role(),
+			})
 			// Our starting default will be 5 minutes
 			retry = 5 * time.Minute
 
@@ -323,21 +346,27 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
 				// If we have an expired certificate, we let's stick with the starting default in
 				// the hope that this is a temporary clock skew.
 				if expiresIn.Minutes() < 0 {
-					log.Debugf("failed to create a new client TLS config: %v", err)
-					updates <- CertificateUpdate{Err: fmt.Errorf("TLS Certificate is expired")}
+					log.WithError(err).Errorf("failed to create a new client TLS config")
+					updates <- CertificateUpdate{Err: fmt.Errorf("TLS certificate is expired")}
 				} else {
 					// Random retry time between 50% and 80% of the total time to expiration
 					retry = calculateRandomExpiry(expiresIn)
 				}
 			}
 
+			log.WithFields(logrus.Fields{
+				"time": time.Now().Add(retry),
+			}).Debugf("next certificate renewal scheduled")
+
 			select {
 			case <-time.After(retry):
+				log.Infof("renewing certificate")
 			case <-renew:
+				log.Infof("forced certificate renewal")
 			case <-ctx.Done():
+				log.Infof("shuting down certificate renewal routine")
 				return
 			}
-			log.Infof("Renewing TLS Certificate.")
 
 			// Let's request new certs. Renewals don't require a token.
 			rootCA := s.RootCA()
@@ -348,25 +377,25 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
 				s.ClientTLSCreds,
 				nil)
 			if err != nil {
-				log.Debugf("failed to renew the TLS Certificate: %v", err)
+				log.WithError(err).Errorf("failed to renew the certificate")
 				updates <- CertificateUpdate{Err: err}
 				continue
 			}
 
 			clientTLSConfig, err := NewClientTLSConfig(tlsKeyPair, rootCA.Pool, CARole)
 			if err != nil {
-				log.Debugf("failed to create a new client TLS config: %v", err)
+				log.WithError(err).Errorf("failed to create a new client config")
 				updates <- CertificateUpdate{Err: err}
 			}
 			serverTLSConfig, err := NewServerTLSConfig(tlsKeyPair, rootCA.Pool)
 			if err != nil {
-				log.Debugf("failed to create a new server TLS config: %v", err)
+				log.WithError(err).Errorf("failed to create a new server config")
 				updates <- CertificateUpdate{Err: err}
 			}
 
 			err = s.ClientTLSCreds.LoadNewTLSConfig(clientTLSConfig)
 			if err != nil {
-				log.Debugf("failed to update the client TLS credentials: %v", err)
+				log.WithError(err).Errorf("failed to update the client credentials")
 				updates <- CertificateUpdate{Err: err}
 			}
 
@@ -380,7 +409,7 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
 
 			err = s.ServerTLSCreds.LoadNewTLSConfig(serverTLSConfig)
 			if err != nil {
-				log.Debugf("failed to update the server TLS credentials: %v", err)
+				log.WithError(err).Errorf("failed to update the server TLS credentials")
 				updates <- CertificateUpdate{Err: err}
 			}
 
@@ -478,7 +507,7 @@ func LoadTLSCreds(rootCA RootCA, paths CertPaths) (*MutableTLSCreds, *MutableTLS
 	}
 
 	// Load the Certificates also as client credentials.
-	// Both Agents and Managers always connect to remote Managers,
+	// Both workers and managers always connect to remote managers,
 	// so ServerName is always set to ManagerRole here.
 	clientTLSCreds, err := rootCA.NewClientTLSCredentials(&keyPair, ManagerRole)
 	if err != nil {
@@ -561,7 +590,7 @@ func ParseRole(apiRole api.NodeRole) (string, error) {
 	case api.NodeRoleManager:
 		return ManagerRole, nil
 	case api.NodeRoleWorker:
-		return AgentRole, nil
+		return WorkerRole, nil
 	default:
 		return "", fmt.Errorf("failed to parse api role: %v", apiRole)
 	}
@@ -572,7 +601,7 @@ func FormatRole(role string) (api.NodeRole, error) {
 	switch strings.ToLower(role) {
 	case strings.ToLower(ManagerRole):
 		return api.NodeRoleManager, nil
-	case strings.ToLower(AgentRole):
+	case strings.ToLower(WorkerRole):
 		return api.NodeRoleWorker, nil
 	default:
 		return 0, fmt.Errorf("failed to parse role: %s", role)

+ 4 - 4
vendor/src/github.com/docker/swarmkit/ca/server.go

@@ -149,14 +149,14 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod
 	}
 	defer s.doneTask()
 
-	// If the remote node is an Agent (either forwarded by a manager, or calling directly),
-	// issue a renew agent certificate entry with the correct ID
-	nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{AgentRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
+	// If the remote node is a worker (either forwarded by a manager, or calling directly),
+	// issue a renew worker certificate entry with the correct ID
+	nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
 	if err == nil {
 		return s.issueRenewCertificate(ctx, nodeID, request.CSR)
 	}
 
-	// If the remote node is a Manager (either forwarded by another manager, or calling directly),
+	// If the remote node is a manager (either forwarded by another manager, or calling directly),
 	// issue a renew certificate entry with the correct ID
 	nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
 	if err == nil {

+ 19 - 27
vendor/src/github.com/docker/swarmkit/ca/transport.go

@@ -8,7 +8,6 @@ import (
 	"net"
 	"strings"
 	"sync"
-	"time"
 
 	"google.golang.org/grpc/credentials"
 
@@ -33,12 +32,12 @@ type MutableTLSCreds struct {
 	// TLS configuration
 	config *tls.Config
 	// TLS Credentials
-	tlsCreds credentials.TransportAuthenticator
+	tlsCreds credentials.TransportCredentials
 	// store the subject for easy access
 	subject pkix.Name
 }
 
-// Info implements the credentials.TransportAuthenticator interface
+// Info implements the credentials.TransportCredentials interface
 func (c *MutableTLSCreds) Info() credentials.ProtocolInfo {
 	return credentials.ProtocolInfo{
 		SecurityProtocol: "tls",
@@ -46,26 +45,19 @@ func (c *MutableTLSCreds) Info() credentials.ProtocolInfo {
 	}
 }
 
-// GetRequestMetadata implements the credentials.TransportAuthenticator interface
+// GetRequestMetadata implements the credentials.TransportCredentials interface
 func (c *MutableTLSCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
 	return nil, nil
 }
 
-// RequireTransportSecurity implements the credentials.TransportAuthenticator interface
+// RequireTransportSecurity implements the credentials.TransportCredentials interface
 func (c *MutableTLSCreds) RequireTransportSecurity() bool {
 	return true
 }
 
-// ClientHandshake implements the credentials.TransportAuthenticator interface
-func (c *MutableTLSCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, credentials.AuthInfo, error) {
+// ClientHandshake implements the credentials.TransportCredentials interface
+func (c *MutableTLSCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
 	// borrow all the code from the original TLS credentials
-	var errChannel chan error
-	if timeout != 0 {
-		errChannel = make(chan error, 2)
-		time.AfterFunc(timeout, func() {
-			errChannel <- timeoutError{}
-		})
-	}
 	c.Lock()
 	if c.config.ServerName == "" {
 		colonPos := strings.LastIndex(addr, ":")
@@ -80,23 +72,23 @@ func (c *MutableTLSCreds) ClientHandshake(addr string, rawConn net.Conn, timeout
 	// would create a deadlock otherwise
 	c.Unlock()
 	var err error
-	if timeout == 0 {
-		err = conn.Handshake()
-	} else {
-		go func() {
-			errChannel <- conn.Handshake()
-		}()
-		err = <-errChannel
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+	}()
+	select {
+	case err = <-errChannel:
+	case <-ctx.Done():
+		err = ctx.Err()
 	}
 	if err != nil {
 		rawConn.Close()
 		return nil, nil, err
 	}
-
 	return conn, nil, nil
 }
 
-// ServerHandshake implements the credentials.TransportAuthenticator interface
+// ServerHandshake implements the credentials.TransportCredentials interface
 func (c *MutableTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
 	c.Lock()
 	conn := tls.Server(rawConn, c.config)
@@ -132,7 +124,7 @@ func (c *MutableTLSCreds) Config() *tls.Config {
 	return c.config
 }
 
-// Role returns the OU for the certificate encapsulated in this TransportAuthenticator
+// Role returns the OU for the certificate encapsulated in this TransportCredentials
 func (c *MutableTLSCreds) Role() string {
 	c.Lock()
 	defer c.Unlock()
@@ -140,7 +132,7 @@ func (c *MutableTLSCreds) Role() string {
 	return c.subject.OrganizationalUnit[0]
 }
 
-// Organization returns the O for the certificate encapsulated in this TransportAuthenticator
+// Organization returns the O for the certificate encapsulated in this TransportCredentials
 func (c *MutableTLSCreds) Organization() string {
 	c.Lock()
 	defer c.Unlock()
@@ -148,7 +140,7 @@ func (c *MutableTLSCreds) Organization() string {
 	return c.subject.Organization[0]
 }
 
-// NodeID returns the CN for the certificate encapsulated in this TransportAuthenticator
+// NodeID returns the CN for the certificate encapsulated in this TransportCredentials
 func (c *MutableTLSCreds) NodeID() string {
 	c.Lock()
 	defer c.Unlock()
@@ -156,7 +148,7 @@ func (c *MutableTLSCreds) NodeID() string {
 	return c.subject.CommonName
 }
 
-// NewMutableTLS uses c to construct a mutable TransportAuthenticator based on TLS.
+// NewMutableTLS uses c to construct a mutable TransportCredentials based on TLS.
 func NewMutableTLS(c *tls.Config) (*MutableTLSCreds, error) {
 	originalTC := credentials.NewTLS(c)
 

+ 8 - 0
vendor/src/github.com/docker/swarmkit/manager/allocator/network.go

@@ -564,7 +564,9 @@ func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *
 
 func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *api.Service) error {
 	if s.Spec.Endpoint != nil {
+		// service has user-defined endpoint
 		if s.Endpoint == nil {
+			// service currently has no allocated endpoint, need allocated.
 			s.Endpoint = &api.Endpoint{
 				Spec: s.Spec.Endpoint.Copy(),
 			}
@@ -587,6 +589,12 @@ func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *
 					&api.Endpoint_VirtualIP{NetworkID: nc.ingressNetwork.ID})
 			}
 		}
+	} else if s.Endpoint != nil {
+		// service has no user-defined endpoints while has already allocated network resources,
+		// need deallocated.
+		if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
+			return err
+		}
 	}
 
 	if err := nc.nwkAllocator.ServiceAllocate(s); err != nil {

+ 12 - 1
vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go

@@ -155,7 +155,18 @@ func (pa *portAllocator) serviceDeallocatePorts(s *api.Service) {
 }
 
 func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
-	if s.Endpoint == nil {
+	// If service has no user-defined endpoint and allocated endpoint,
+	// we assume it is allocated and return true.
+	if s.Endpoint == nil && s.Spec.Endpoint == nil {
+		return true
+	}
+
+	// If service has allocated endpoint while has no user-defined endpoint,
+	// we assume allocated endpoints are redudant, and they need deallocated.
+	// If service has no allocated endpoint while has user-defined endpoint,
+	// we assume it is not allocated.
+	if (s.Endpoint != nil && s.Spec.Endpoint == nil) ||
+		(s.Endpoint == nil && s.Spec.Endpoint != nil) {
 		return false
 	}
 

+ 0 - 12
vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/cluster.go

@@ -1,12 +0,0 @@
-package hackpicker
-
-// AddrSelector is interface which should track cluster for its leader address.
-type AddrSelector interface {
-	LeaderAddr() (string, error)
-}
-
-// RaftCluster is interface which combines useful methods for clustering.
-type RaftCluster interface {
-	AddrSelector
-	IsLeader() bool
-}

+ 0 - 141
vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/raftpicker.go

@@ -1,141 +0,0 @@
-// Package hackpicker is temporary solution to provide more seamless experience
-// for controlapi. It has drawback of slow reaction to leader change, but it
-// tracks leader automatically without erroring out to client.
-package hackpicker
-
-import (
-	"sync"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/transport"
-)
-
-// picker always picks address of cluster leader.
-type picker struct {
-	mu   sync.Mutex
-	addr string
-	raft AddrSelector
-	conn *grpc.Conn
-	cc   *grpc.ClientConn
-}
-
-// Init does initial processing for the Picker, e.g., initiate some connections.
-func (p *picker) Init(cc *grpc.ClientConn) error {
-	p.cc = cc
-	return nil
-}
-
-func (p *picker) initConn() error {
-	if p.conn == nil {
-		conn, err := grpc.NewConn(p.cc)
-		if err != nil {
-			return err
-		}
-		p.conn = conn
-	}
-	return nil
-}
-
-// Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
-// or some error happens.
-func (p *picker) Pick(ctx context.Context) (transport.ClientTransport, error) {
-	p.mu.Lock()
-	if err := p.initConn(); err != nil {
-		p.mu.Unlock()
-		return nil, err
-	}
-	p.mu.Unlock()
-
-	addr, err := p.raft.LeaderAddr()
-	if err != nil {
-		return nil, err
-	}
-	p.mu.Lock()
-	if p.addr != addr {
-		p.addr = addr
-		p.conn.NotifyReset()
-	}
-	p.mu.Unlock()
-	return p.conn.Wait(ctx)
-}
-
-// PickAddr picks a peer address for connecting. This will be called repeated for
-// connecting/reconnecting.
-func (p *picker) PickAddr() (string, error) {
-	addr, err := p.raft.LeaderAddr()
-	if err != nil {
-		return "", err
-	}
-	p.mu.Lock()
-	p.addr = addr
-	p.mu.Unlock()
-	return addr, nil
-}
-
-// State returns the connectivity state of the underlying connections.
-func (p *picker) State() (grpc.ConnectivityState, error) {
-	return p.conn.State(), nil
-}
-
-// WaitForStateChange blocks until the state changes to something other than
-// the sourceState. It returns the new state or error.
-func (p *picker) WaitForStateChange(ctx context.Context, sourceState grpc.ConnectivityState) (grpc.ConnectivityState, error) {
-	return p.conn.WaitForStateChange(ctx, sourceState)
-}
-
-// Reset the current connection and force a reconnect to another address.
-func (p *picker) Reset() error {
-	p.conn.NotifyReset()
-	return nil
-}
-
-// Close closes all the Conn's owned by this Picker.
-func (p *picker) Close() error {
-	return p.conn.Close()
-}
-
-// ConnSelector is struct for obtaining connection with raftpicker.
-type ConnSelector struct {
-	mu      sync.Mutex
-	cc      *grpc.ClientConn
-	cluster RaftCluster
-	opts    []grpc.DialOption
-}
-
-// NewConnSelector returns new ConnSelector with cluster and grpc.DialOpts which
-// will be used for Dial on first call of Conn.
-func NewConnSelector(cluster RaftCluster, opts ...grpc.DialOption) *ConnSelector {
-	return &ConnSelector{
-		cluster: cluster,
-		opts:    opts,
-	}
-}
-
-// Conn returns *grpc.ClientConn with picker which picks raft cluster leader.
-// Internal connection estabilished lazily on this call.
-// It can return error if cluster wasn't ready at the moment of initial call.
-func (c *ConnSelector) Conn() (*grpc.ClientConn, error) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.cc != nil {
-		return c.cc, nil
-	}
-	addr, err := c.cluster.LeaderAddr()
-	if err != nil {
-		return nil, err
-	}
-	picker := &picker{raft: c.cluster, addr: addr}
-	opts := append(c.opts, grpc.WithPicker(picker))
-	cc, err := grpc.Dial(addr, opts...)
-	if err != nil {
-		return nil, err
-	}
-	c.cc = cc
-	return c.cc, nil
-}
-
-// Reset does nothing for hackpicker.
-func (c *ConnSelector) Reset() error {
-	return nil
-}

+ 7 - 6
vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go

@@ -5,7 +5,7 @@ import (
 	"reflect"
 	"strconv"
 
-	"github.com/docker/engine-api/types/reference"
+	"github.com/docker/distribution/reference"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/identity"
 	"github.com/docker/swarmkit/manager/scheduler"
@@ -133,7 +133,7 @@ func validateTask(taskSpec api.TaskSpec) error {
 		return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
 	}
 
-	if _, _, err := reference.Parse(container.Image); err != nil {
+	if _, err := reference.ParseNamed(container.Image); err != nil {
 		return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", container.Image)
 	}
 	return nil
@@ -149,13 +149,13 @@ func validateEndpointSpec(epSpec *api.EndpointSpec) error {
 		return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: ports can't be used with dnsrr mode")
 	}
 
-	portSet := make(map[api.PortConfig]struct{})
+	portSet := make(map[uint32]struct{})
 	for _, port := range epSpec.Ports {
-		if _, ok := portSet[*port]; ok {
-			return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate ports provided")
+		if _, ok := portSet[port.PublishedPort]; ok {
+			return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided")
 		}
 
-		portSet[*port] = struct{}{}
+		portSet[port.PublishedPort] = struct{}{}
 	}
 
 	return nil
@@ -350,6 +350,7 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
 			return errModeChangeNotAllowed
 		}
 		service.Meta.Version = *request.ServiceVersion
+		service.PreviousSpec = service.Spec.Copy()
 		service.Spec = *request.Spec.Copy()
 
 		// Reset update status

+ 244 - 38
vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go

@@ -3,6 +3,7 @@ package dispatcher
 import (
 	"errors"
 	"fmt"
+	"strconv"
 	"sync"
 	"time"
 
@@ -41,6 +42,9 @@ const (
 	// into a single transaction. A fraction of a second feels about
 	// right.
 	maxBatchInterval = 100 * time.Millisecond
+
+	modificationBatchLimit = 100
+	batchingWaitTime       = 100 * time.Millisecond
 )
 
 var (
@@ -127,8 +131,6 @@ func New(cluster Cluster, c *Config) *Dispatcher {
 		nodes:                 newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
 		store:                 cluster.MemoryStore(),
 		cluster:               cluster,
-		mgrQueue:              watch.NewQueue(),
-		keyMgrQueue:           watch.NewQueue(),
 		taskUpdates:           make(map[string]*api.TaskStatus),
 		nodeUpdates:           make(map[string]nodeUpdate),
 		processUpdatesTrigger: make(chan struct{}, 1),
@@ -195,6 +197,9 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 		d.mu.Unlock()
 		return err
 	}
+	// set queues here to guarantee that Close will close them
+	d.mgrQueue = watch.NewQueue()
+	d.keyMgrQueue = watch.NewQueue()
 
 	peerWatcher, peerCancel := d.cluster.SubscribePeers()
 	defer peerCancel()
@@ -351,26 +356,10 @@ func (d *Dispatcher) isRunning() bool {
 	return true
 }
 
-// register is used for registration of node with particular dispatcher.
-func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
-	// prevent register until we're ready to accept it
-	if err := d.isRunningLocked(); err != nil {
-		return "", err
-	}
-
-	if err := d.nodes.CheckRateLimit(nodeID); err != nil {
-		return "", err
-	}
-
-	// TODO(stevvooe): Validate node specification.
-	var node *api.Node
-	d.store.View(func(tx store.ReadTx) {
-		node = store.GetNode(tx, nodeID)
-	})
-	if node == nil {
-		return "", ErrNodeNotFound
-	}
-
+// updateNode updates the description of a node and sets status to READY
+// this is used during registration when a new node description is provided
+// and during node updates when the node description changes
+func (d *Dispatcher) updateNode(nodeID string, description *api.NodeDescription) error {
 	d.nodeUpdatesLock.Lock()
 	d.nodeUpdates[nodeID] = nodeUpdate{status: &api.NodeStatus{State: api.NodeStatus_READY}, description: description}
 	numUpdates := len(d.nodeUpdates)
@@ -380,7 +369,7 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
 		select {
 		case d.processUpdatesTrigger <- struct{}{}:
 		case <-d.ctx.Done():
-			return "", d.ctx.Err()
+			return d.ctx.Err()
 		}
 
 	}
@@ -389,12 +378,39 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
 	d.processUpdatesLock.Lock()
 	select {
 	case <-d.ctx.Done():
-		return "", d.ctx.Err()
+		return d.ctx.Err()
 	default:
 	}
 	d.processUpdatesCond.Wait()
 	d.processUpdatesLock.Unlock()
 
+	return nil
+}
+
+// register is used for registration of node with particular dispatcher.
+func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
+	// prevent register until we're ready to accept it
+	if err := d.isRunningLocked(); err != nil {
+		return "", err
+	}
+
+	if err := d.nodes.CheckRateLimit(nodeID); err != nil {
+		return "", err
+	}
+
+	// TODO(stevvooe): Validate node specification.
+	var node *api.Node
+	d.store.View(func(tx store.ReadTx) {
+		node = store.GetNode(tx, nodeID)
+	})
+	if node == nil {
+		return "", ErrNodeNotFound
+	}
+
+	if err := d.updateNode(nodeID, description); err != nil {
+		return "", err
+	}
+
 	expireFunc := func() {
 		nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: "heartbeat failure"}
 		log.G(ctx).Debugf("heartbeat expiration")
@@ -657,14 +673,10 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
 		}
 
 		// bursty events should be processed in batches and sent out snapshot
-		const (
-			modificationBatchLimit = 200
-			eventPausedGap         = 50 * time.Millisecond
-		)
 		var (
-			modificationCnt    int
-			eventPausedTimer   *time.Timer
-			eventPausedTimeout <-chan time.Time
+			modificationCnt int
+			batchingTimer   *time.Timer
+			batchingTimeout <-chan time.Time
 		)
 
 	batchingLoop:
@@ -692,13 +704,189 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
 					delete(tasksMap, v.Task.ID)
 					modificationCnt++
 				}
-				if eventPausedTimer != nil {
-					eventPausedTimer.Reset(eventPausedGap)
+				if batchingTimer != nil {
+					batchingTimer.Reset(batchingWaitTime)
 				} else {
-					eventPausedTimer = time.NewTimer(eventPausedGap)
-					eventPausedTimeout = eventPausedTimer.C
+					batchingTimer = time.NewTimer(batchingWaitTime)
+					batchingTimeout = batchingTimer.C
+				}
+			case <-batchingTimeout:
+				break batchingLoop
+			case <-stream.Context().Done():
+				return stream.Context().Err()
+			case <-d.ctx.Done():
+				return d.ctx.Err()
+			}
+		}
+
+		if batchingTimer != nil {
+			batchingTimer.Stop()
+		}
+	}
+}
+
+// Assignments is a stream of assignments for a node. Each message contains
+// either full list of tasks and secrets for the node, or an incremental update.
+func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
+	nodeInfo, err := ca.RemoteNode(stream.Context())
+	if err != nil {
+		return err
+	}
+	nodeID := nodeInfo.NodeID
+
+	if err := d.isRunningLocked(); err != nil {
+		return err
+	}
+
+	fields := logrus.Fields{
+		"node.id":      nodeID,
+		"node.session": r.SessionID,
+		"method":       "(*Dispatcher).Assignments",
+	}
+	if nodeInfo.ForwardedBy != nil {
+		fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+	}
+	log := log.G(stream.Context()).WithFields(fields)
+	log.Debugf("")
+
+	if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+		return err
+	}
+
+	var (
+		sequence  int64
+		appliesTo string
+		initial   api.AssignmentsMessage
+	)
+	tasksMap := make(map[string]*api.Task)
+
+	sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
+		sequence++
+		msg.AppliesTo = appliesTo
+		msg.ResultsIn = strconv.FormatInt(sequence, 10)
+		appliesTo = msg.ResultsIn
+		msg.Type = assignmentType
+
+		if err := stream.Send(&msg); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	// TODO(aaronl): Also send node secrets that should be exposed to
+	// this node.
+	nodeTasks, cancel, err := store.ViewAndWatch(
+		d.store,
+		func(readTx store.ReadTx) error {
+			tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
+			if err != nil {
+				return err
+			}
+
+			for _, t := range tasks {
+				// We only care about tasks that are ASSIGNED or
+				// higher. If the state is below ASSIGNED, the
+				// task may not meet the constraints for this
+				// node, so we have to be careful about sending
+				// secrets associated with it.
+				if t.Status.State < api.TaskStateAssigned {
+					continue
+				}
+
+				tasksMap[t.ID] = t
+				initial.UpdateTasks = append(initial.UpdateTasks, t)
+			}
+			return nil
+		},
+		state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
+			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
+		state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
+			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
+	)
+	if err != nil {
+		return err
+	}
+	defer cancel()
+
+	if err := sendMessage(initial, api.AssignmentsMessage_COMPLETE); err != nil {
+		return err
+	}
+
+	for {
+		// Check for session expiration
+		if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+			return err
+		}
+
+		// bursty events should be processed in batches and sent out together
+		var (
+			update          api.AssignmentsMessage
+			modificationCnt int
+			batchingTimer   *time.Timer
+			batchingTimeout <-chan time.Time
+			updateTasks     = make(map[string]*api.Task)
+			removeTasks     = make(map[string]struct{})
+		)
+
+		oneModification := func() {
+			modificationCnt++
+
+			if batchingTimer != nil {
+				batchingTimer.Reset(batchingWaitTime)
+			} else {
+				batchingTimer = time.NewTimer(batchingWaitTime)
+				batchingTimeout = batchingTimer.C
+			}
+		}
+
+		// The batching loop waits for 50 ms after the most recent
+		// change, or until modificationBatchLimit is reached. The
+		// worst case latency is modificationBatchLimit * batchingWaitTime,
+		// which is 10 seconds.
+	batchingLoop:
+		for modificationCnt < modificationBatchLimit {
+			select {
+			case event := <-nodeTasks:
+				switch v := event.(type) {
+				// We don't monitor EventCreateTask because tasks are
+				// never created in the ASSIGNED state. First tasks are
+				// created by the orchestrator, then the scheduler moves
+				// them to ASSIGNED. If this ever changes, we will need
+				// to monitor task creations as well.
+				case state.EventUpdateTask:
+					// We only care about tasks that are ASSIGNED or
+					// higher.
+					if v.Task.Status.State < api.TaskStateAssigned {
+						continue
+					}
+
+					if oldTask, exists := tasksMap[v.Task.ID]; exists {
+						// States ASSIGNED and below are set by the orchestrator/scheduler,
+						// not the agent, so tasks in these states need to be sent to the
+						// agent even if nothing else has changed.
+						if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
+							// this update should not trigger a task change for the agent
+							tasksMap[v.Task.ID] = v.Task
+							continue
+						}
+					}
+					tasksMap[v.Task.ID] = v.Task
+					updateTasks[v.Task.ID] = v.Task
+
+					oneModification()
+				case state.EventDeleteTask:
+
+					if _, exists := tasksMap[v.Task.ID]; !exists {
+						continue
+					}
+
+					removeTasks[v.Task.ID] = struct{}{}
+
+					delete(tasksMap, v.Task.ID)
+
+					oneModification()
 				}
-			case <-eventPausedTimeout:
+			case <-batchingTimeout:
 				break batchingLoop
 			case <-stream.Context().Done():
 				return stream.Context().Err()
@@ -707,8 +895,22 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
 			}
 		}
 
-		if eventPausedTimer != nil {
-			eventPausedTimer.Stop()
+		if batchingTimer != nil {
+			batchingTimer.Stop()
+		}
+
+		if modificationCnt > 0 {
+			for id, task := range updateTasks {
+				if _, ok := removeTasks[id]; !ok {
+					update.UpdateTasks = append(update.UpdateTasks, task)
+				}
+			}
+			for id := range removeTasks {
+				update.RemoveTasks = append(update.RemoveTasks, id)
+			}
+			if err := sendMessage(update, api.AssignmentsMessage_INCREMENTAL); err != nil {
+				return err
+			}
 		}
 	}
 }
@@ -787,6 +989,10 @@ func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_Sessio
 		}
 	} else {
 		sessionID = r.SessionID
+		// update the node description
+		if err := d.updateNode(nodeID, r.Description); err != nil {
+			return err
+		}
 	}
 
 	fields := logrus.Fields{

+ 7 - 33
vendor/src/github.com/docker/swarmkit/manager/manager.go

@@ -9,7 +9,6 @@ import (
 	"path/filepath"
 	"sync"
 	"syscall"
-	"time"
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/go-events"
@@ -18,12 +17,10 @@ import (
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/allocator"
 	"github.com/docker/swarmkit/manager/controlapi"
-	"github.com/docker/swarmkit/manager/controlapi/hackpicker"
 	"github.com/docker/swarmkit/manager/dispatcher"
 	"github.com/docker/swarmkit/manager/health"
 	"github.com/docker/swarmkit/manager/keymanager"
 	"github.com/docker/swarmkit/manager/orchestrator"
-	"github.com/docker/swarmkit/manager/raftpicker"
 	"github.com/docker/swarmkit/manager/resourceapi"
 	"github.com/docker/swarmkit/manager/scheduler"
 	"github.com/docker/swarmkit/manager/state/raft"
@@ -92,7 +89,6 @@ type Manager struct {
 	server                 *grpc.Server
 	localserver            *grpc.Server
 	RaftNode               *raft.Node
-	connSelector           *raftpicker.ConnSelector
 
 	mu sync.Mutex
 
@@ -250,25 +246,6 @@ func (m *Manager) Run(parent context.Context) error {
 
 	go m.handleLeadershipEvents(ctx, leadershipCh)
 
-	proxyOpts := []grpc.DialOption{
-		grpc.WithTimeout(5 * time.Second),
-		grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
-	}
-
-	cs := raftpicker.NewConnSelector(m.RaftNode, proxyOpts...)
-	m.connSelector = cs
-
-	// We need special connSelector for controlapi because it provides automatic
-	// leader tracking.
-	// Other APIs are using connSelector which errors out on leader change, but
-	// allows to react quickly to reelections.
-	controlAPIProxyOpts := []grpc.DialOption{
-		grpc.WithBackoffMaxDelay(time.Second),
-		grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
-	}
-
-	controlAPIConnSelector := hackpicker.NewConnSelector(m.RaftNode, controlAPIProxyOpts...)
-
 	authorize := func(ctx context.Context, roles []string) error {
 		// Authorize the remote roles, ensure they can only be forwarded by managers
 		_, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization())
@@ -289,11 +266,11 @@ func (m *Manager) Run(parent context.Context) error {
 	authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
 	authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
 
-	proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
-	proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
-	proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
-	proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
-	proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+	proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+	proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+	proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+	proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
+	proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
 
 	// localProxyControlAPI is a special kind of proxy. It is only wired up
 	// to receive requests from a trusted local socket, and these requests
@@ -302,7 +279,7 @@ func (m *Manager) Run(parent context.Context) error {
 	// this manager rather than forwarded requests (it has no TLS
 	// information to put in the metadata map).
 	forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
-	localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, controlAPIConnSelector, m.RaftNode, forwardAsOwnRequest)
+	localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.RaftNode, forwardAsOwnRequest)
 
 	// Everything registered on m.server should be an authenticated
 	// wrapper, or a proxy wrapping an authenticated wrapper!
@@ -318,7 +295,7 @@ func (m *Manager) Run(parent context.Context) error {
 	api.RegisterControlServer(m.localserver, localProxyControlAPI)
 	api.RegisterHealthServer(m.localserver, localHealthServer)
 
-	errServe := make(chan error, 2)
+	errServe := make(chan error, len(m.listeners))
 	for proto, l := range m.listeners {
 		go m.serveListener(ctx, errServe, proto, l)
 	}
@@ -433,9 +410,6 @@ func (m *Manager) Stop(ctx context.Context) {
 		m.keyManager.Stop()
 	}
 
-	if m.connSelector != nil {
-		m.connSelector.Stop()
-	}
 	m.RaftNode.Shutdown()
 	// some time after this point, Run will receive an error from one of these
 	m.server.Stop()

+ 4 - 3
vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go

@@ -346,7 +346,8 @@ func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask
 			close(doneCh)
 		}()
 
-		oldTaskTimeout := time.After(r.taskTimeout)
+		oldTaskTimer := time.NewTimer(r.taskTimeout)
+		defer oldTaskTimer.Stop()
 
 		// Wait for the delay to elapse, if one is specified.
 		if delay != 0 {
@@ -357,10 +358,10 @@ func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask
 			}
 		}
 
-		if waitStop {
+		if waitStop && oldTask != nil {
 			select {
 			case <-watch:
-			case <-oldTaskTimeout:
+			case <-oldTaskTimer.C:
 			case <-ctx.Done():
 				return
 			}

+ 195 - 41
vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go

@@ -1,6 +1,7 @@
 package orchestrator
 
 import (
+	"errors"
 	"fmt"
 	"reflect"
 	"sync"
@@ -17,6 +18,8 @@ import (
 	"github.com/docker/swarmkit/protobuf/ptypes"
 )
 
+const defaultMonitor = 30 * time.Second
+
 // UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates,
 // shutting them down and replacing them.
 type UpdateSupervisor struct {
@@ -49,7 +52,7 @@ func (u *UpdateSupervisor) Update(ctx context.Context, cluster *api.Cluster, ser
 	id := service.ID
 
 	if update, ok := u.updates[id]; ok {
-		if !update.isServiceDirty(service) {
+		if reflect.DeepEqual(service.Spec, update.newService.Spec) {
 			// There's already an update working towards this goal.
 			return
 		}
@@ -87,6 +90,9 @@ type Updater struct {
 	cluster    *api.Cluster
 	newService *api.Service
 
+	updatedTasks   map[string]time.Time // task ID to creation time
+	updatedTasksMu sync.Mutex
+
 	// stopChan signals to the state machine to stop running.
 	stopChan chan struct{}
 	// doneChan is closed when the state machine terminates.
@@ -96,13 +102,14 @@ type Updater struct {
 // NewUpdater creates a new Updater.
 func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor, cluster *api.Cluster, newService *api.Service) *Updater {
 	return &Updater{
-		store:      store,
-		watchQueue: store.WatchQueue(),
-		restarts:   restartSupervisor,
-		cluster:    cluster.Copy(),
-		newService: newService.Copy(),
-		stopChan:   make(chan struct{}),
-		doneChan:   make(chan struct{}),
+		store:        store,
+		watchQueue:   store.WatchQueue(),
+		restarts:     restartSupervisor,
+		cluster:      cluster.Copy(),
+		newService:   newService.Copy(),
+		updatedTasks: make(map[string]time.Time),
+		stopChan:     make(chan struct{}),
+		doneChan:     make(chan struct{}),
 	}
 }
 
@@ -119,7 +126,9 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
 	service := u.newService
 
 	// If the update is in a PAUSED state, we should not do anything.
-	if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_PAUSED {
+	if service.UpdateStatus != nil &&
+		(service.UpdateStatus.State == api.UpdateStatus_PAUSED ||
+			service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED) {
 		return
 	}
 
@@ -131,7 +140,9 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
 	}
 	// Abort immediately if all tasks are clean.
 	if len(dirtySlots) == 0 {
-		if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_UPDATING {
+		if service.UpdateStatus != nil &&
+			(service.UpdateStatus.State == api.UpdateStatus_UPDATING ||
+				service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) {
 			u.completeUpdate(ctx, service.ID)
 		}
 		return
@@ -163,9 +174,26 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
 		}()
 	}
 
+	failureAction := api.UpdateConfig_PAUSE
+	allowedFailureFraction := float32(0)
+	monitoringPeriod := defaultMonitor
+
+	if service.Spec.Update != nil {
+		failureAction = service.Spec.Update.FailureAction
+		allowedFailureFraction = service.Spec.Update.AllowedFailureFraction
+
+		if service.Spec.Update.Monitor != nil {
+			var err error
+			monitoringPeriod, err = ptypes.Duration(service.Spec.Update.Monitor)
+			if err != nil {
+				monitoringPeriod = defaultMonitor
+			}
+		}
+	}
+
 	var failedTaskWatch chan events.Event
 
-	if service.Spec.Update == nil || service.Spec.Update.FailureAction == api.UpdateConfig_PAUSE {
+	if failureAction != api.UpdateConfig_CONTINUE {
 		var cancelWatch func()
 		failedTaskWatch, cancelWatch = state.Watch(
 			u.store.WatchQueue(),
@@ -178,6 +206,49 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
 	}
 
 	stopped := false
+	failedTasks := make(map[string]struct{})
+	totalFailures := 0
+
+	failureTriggersAction := func(failedTask *api.Task) bool {
+		// Ignore tasks we have already seen as failures.
+		if _, found := failedTasks[failedTask.ID]; found {
+			return false
+		}
+
+		// If this failed/completed task is one that we
+		// created as part of this update, we should
+		// follow the failure action.
+		u.updatedTasksMu.Lock()
+		startedAt, found := u.updatedTasks[failedTask.ID]
+		u.updatedTasksMu.Unlock()
+
+		if found && (startedAt.IsZero() || time.Since(startedAt) <= monitoringPeriod) {
+			failedTasks[failedTask.ID] = struct{}{}
+			totalFailures++
+			if float32(totalFailures)/float32(len(dirtySlots)) > allowedFailureFraction {
+				switch failureAction {
+				case api.UpdateConfig_PAUSE:
+					stopped = true
+					message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
+					u.pauseUpdate(ctx, service.ID, message)
+					return true
+				case api.UpdateConfig_ROLLBACK:
+					// Never roll back a rollback
+					if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
+						message := fmt.Sprintf("rollback paused due to failure or early termination of task %s", failedTask.ID)
+						u.pauseUpdate(ctx, service.ID, message)
+						return true
+					}
+					stopped = true
+					message := fmt.Sprintf("update rolled back due to failure or early termination of task %s", failedTask.ID)
+					u.rollbackUpdate(ctx, service.ID, message)
+					return true
+				}
+			}
+		}
+
+		return false
+	}
 
 slotsLoop:
 	for _, slot := range dirtySlots {
@@ -189,15 +260,7 @@ slotsLoop:
 				stopped = true
 				break slotsLoop
 			case ev := <-failedTaskWatch:
-				failedTask := ev.(state.EventUpdateTask).Task
-
-				// If this failed/completed task has a spec matching
-				// the one we're updating to, we should pause the
-				// update.
-				if !u.isTaskDirty(failedTask) {
-					stopped = true
-					message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
-					u.pauseUpdate(ctx, service.ID, message)
+				if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
 					break slotsLoop
 				}
 			case slotQueue <- slot:
@@ -209,6 +272,29 @@ slotsLoop:
 	close(slotQueue)
 	wg.Wait()
 
+	if !stopped {
+		// Keep watching for task failures for one more monitoringPeriod,
+		// before declaring the update complete.
+		doneMonitoring := time.After(monitoringPeriod)
+	monitorLoop:
+		for {
+			select {
+			case <-u.stopChan:
+				stopped = true
+				break monitorLoop
+			case <-doneMonitoring:
+				break monitorLoop
+			case ev := <-failedTaskWatch:
+				if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
+					break monitorLoop
+				}
+			}
+		}
+	}
+
+	// TODO(aaronl): Potentially roll back the service if not enough tasks
+	// have reached RUNNING by this point.
+
 	if !stopped {
 		u.completeUpdate(ctx, service.ID)
 	}
@@ -237,9 +323,13 @@ func (u *Updater) worker(ctx context.Context, queue <-chan slot) {
 			}
 		}
 		if runningTask != nil {
-			u.useExistingTask(ctx, slot, runningTask)
+			if err := u.useExistingTask(ctx, slot, runningTask); err != nil {
+				log.G(ctx).WithError(err).Error("update failed")
+			}
 		} else if cleanTask != nil {
-			u.useExistingTask(ctx, slot, cleanTask)
+			if err := u.useExistingTask(ctx, slot, cleanTask); err != nil {
+				log.G(ctx).WithError(err).Error("update failed")
+			}
 		} else {
 			updated := newTask(u.cluster, u.newService, slot[0].Slot)
 			updated.DesiredState = api.TaskStateReady
@@ -275,10 +365,22 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
 	})
 	defer cancel()
 
+	// Create an empty entry for this task, so the updater knows a failure
+	// should count towards the failure count. The timestamp is added
+	// if/when the task reaches RUNNING.
+	u.updatedTasksMu.Lock()
+	u.updatedTasks[updated.ID] = time.Time{}
+	u.updatedTasksMu.Unlock()
+
 	var delayStartCh <-chan struct{}
 	// Atomically create the updated task and bring down the old one.
 	_, err := u.store.Batch(func(batch *store.Batch) error {
-		err := batch.Update(func(tx store.Tx) error {
+		oldTask, err := u.removeOldTasks(ctx, batch, slot)
+		if err != nil {
+			return err
+		}
+
+		err = batch.Update(func(tx store.Tx) error {
 			if err := store.CreateTask(tx, updated); err != nil {
 				return err
 			}
@@ -288,7 +390,6 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
 			return err
 		}
 
-		oldTask := u.removeOldTasks(ctx, batch, slot)
 		delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true)
 
 		return nil
@@ -309,6 +410,9 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
 		case e := <-taskUpdates:
 			updated = e.(state.EventUpdateTask).Task
 			if updated.Status.State >= api.TaskStateRunning {
+				u.updatedTasksMu.Lock()
+				u.updatedTasks[updated.ID] = time.Now()
+				u.updatedTasksMu.Unlock()
 				return nil
 			}
 		case <-u.stopChan:
@@ -317,7 +421,7 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
 	}
 }
 
-func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) {
+func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) error {
 	var removeTasks []*api.Task
 	for _, t := range slot {
 		if t != existing {
@@ -327,7 +431,14 @@ func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.
 	if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
 		var delayStartCh <-chan struct{}
 		_, err := u.store.Batch(func(batch *store.Batch) error {
-			oldTask := u.removeOldTasks(ctx, batch, removeTasks)
+			var oldTask *api.Task
+			if len(removeTasks) != 0 {
+				var err error
+				oldTask, err = u.removeOldTasks(ctx, batch, removeTasks)
+				if err != nil {
+					return err
+				}
+			}
 
 			if existing.DesiredState != api.TaskStateRunning {
 				delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, existing.ID, 0, true)
@@ -335,19 +446,24 @@ func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.
 			return nil
 		})
 		if err != nil {
-			log.G(ctx).WithError(err).Error("updater batch transaction failed")
+			return err
 		}
 
 		if delayStartCh != nil {
 			<-delayStartCh
 		}
 	}
+
+	return nil
 }
 
 // removeOldTasks shuts down the given tasks and returns one of the tasks that
-// was shut down, or nil.
-func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) *api.Task {
-	var removedTask *api.Task
+// was shut down, or an error.
+func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
+	var (
+		lastErr     error
+		removedTask *api.Task
+	)
 	for _, original := range removeTasks {
 		err := batch.Update(func(tx store.Tx) error {
 			t := store.GetTask(tx, original.ID)
@@ -361,13 +477,16 @@ func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, remove
 			return store.UpdateTask(tx, t)
 		})
 		if err != nil {
-			log.G(ctx).WithError(err).Errorf("shutting down stale task %s failed", original.ID)
+			lastErr = err
 		} else {
 			removedTask = original
 		}
 	}
 
-	return removedTask
+	if removedTask == nil {
+		return nil, lastErr
+	}
+	return removedTask, nil
 }
 
 func (u *Updater) isTaskDirty(t *api.Task) bool {
@@ -375,11 +494,6 @@ func (u *Updater) isTaskDirty(t *api.Task) bool {
 		(t.Endpoint != nil && !reflect.DeepEqual(u.newService.Spec.Endpoint, t.Endpoint.Spec))
 }
 
-func (u *Updater) isServiceDirty(service *api.Service) bool {
-	return !reflect.DeepEqual(u.newService.Spec.Task, service.Spec.Task) ||
-		!reflect.DeepEqual(u.newService.Spec.Endpoint, service.Spec.Endpoint)
-}
-
 func (u *Updater) isSlotDirty(slot slot) bool {
 	return len(slot) > 1 || (len(slot) == 1 && u.isTaskDirty(slot[0]))
 }
@@ -421,7 +535,11 @@ func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
 			return nil
 		}
 
-		service.UpdateStatus.State = api.UpdateStatus_PAUSED
+		if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
+			service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED
+		} else {
+			service.UpdateStatus.State = api.UpdateStatus_PAUSED
+		}
 		service.UpdateStatus.Message = message
 
 		return store.UpdateService(tx, service)
@@ -432,6 +550,38 @@ func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
 	}
 }
 
+func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) {
+	log.G(ctx).Debugf("starting rollback of service %s", serviceID)
+
+	var service *api.Service
+	err := u.store.Update(func(tx store.Tx) error {
+		service = store.GetService(tx, serviceID)
+		if service == nil {
+			return nil
+		}
+		if service.UpdateStatus == nil {
+			// The service was updated since we started this update
+			return nil
+		}
+
+		service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED
+		service.UpdateStatus.Message = message
+
+		if service.PreviousSpec == nil {
+			return errors.New("cannot roll back service because no previous spec is available")
+		}
+		service.Spec = *service.PreviousSpec
+		service.PreviousSpec = nil
+
+		return store.UpdateService(tx, service)
+	})
+
+	if err != nil {
+		log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID)
+		return
+	}
+}
+
 func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
 	log.G(ctx).Debugf("update of service %s complete", serviceID)
 
@@ -444,9 +594,13 @@ func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
 			// The service was changed since we started this update
 			return nil
 		}
-
-		service.UpdateStatus.State = api.UpdateStatus_COMPLETED
-		service.UpdateStatus.Message = "update completed"
+		if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
+			service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED
+			service.UpdateStatus.Message = "rollback completed"
+		} else {
+			service.UpdateStatus.State = api.UpdateStatus_COMPLETED
+			service.UpdateStatus.Message = "update completed"
+		}
 		service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
 
 		return store.UpdateService(tx, service)

+ 0 - 12
vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go

@@ -1,12 +0,0 @@
-package raftpicker
-
-// AddrSelector is interface which should track cluster for its leader address.
-type AddrSelector interface {
-	LeaderAddr() (string, error)
-}
-
-// RaftCluster is interface which combines useful methods for clustering.
-type RaftCluster interface {
-	AddrSelector
-	IsLeader() bool
-}

+ 0 - 127
vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go

@@ -1,127 +0,0 @@
-package raftpicker
-
-import (
-	"sync"
-	"time"
-
-	"github.com/Sirupsen/logrus"
-
-	"google.golang.org/grpc"
-)
-
-// Interface is interface to replace implementation with controlapi/hackpicker.
-// TODO: it should be done cooler.
-type Interface interface {
-	Conn() (*grpc.ClientConn, error)
-	Reset() error
-}
-
-// ConnSelector is struct for obtaining connection connected to cluster leader.
-type ConnSelector struct {
-	mu      sync.Mutex
-	cluster RaftCluster
-	opts    []grpc.DialOption
-
-	cc   *grpc.ClientConn
-	addr string
-
-	stop chan struct{}
-}
-
-// NewConnSelector returns new ConnSelector with cluster and grpc.DialOpts which
-// will be used for connection create.
-func NewConnSelector(cluster RaftCluster, opts ...grpc.DialOption) *ConnSelector {
-	cs := &ConnSelector{
-		cluster: cluster,
-		opts:    opts,
-		stop:    make(chan struct{}),
-	}
-	go cs.updateLoop()
-	return cs
-}
-
-// Conn returns *grpc.ClientConn which connected to cluster leader.
-// It can return error if cluster wasn't ready at the moment of initial call.
-func (c *ConnSelector) Conn() (*grpc.ClientConn, error) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.cc != nil {
-		return c.cc, nil
-	}
-	addr, err := c.cluster.LeaderAddr()
-	if err != nil {
-		return nil, err
-	}
-	cc, err := grpc.Dial(addr, c.opts...)
-	if err != nil {
-		return nil, err
-	}
-	c.cc = cc
-	c.addr = addr
-	return cc, nil
-}
-
-// Reset recreates underlying connection.
-func (c *ConnSelector) Reset() error {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.cc != nil {
-		c.cc.Close()
-		c.cc = nil
-	}
-	addr, err := c.cluster.LeaderAddr()
-	if err != nil {
-		logrus.WithError(err).Errorf("error obtaining leader address")
-		return err
-	}
-	cc, err := grpc.Dial(addr, c.opts...)
-	if err != nil {
-		logrus.WithError(err).Errorf("error reestabilishing connection to leader")
-		return err
-	}
-	c.cc = cc
-	c.addr = addr
-	return nil
-}
-
-// Stop cancels updating connection loop.
-func (c *ConnSelector) Stop() {
-	close(c.stop)
-}
-
-func (c *ConnSelector) updateConn() error {
-	addr, err := c.cluster.LeaderAddr()
-	if err != nil {
-		return err
-	}
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.addr != addr {
-		if c.cc != nil {
-			c.cc.Close()
-			c.cc = nil
-		}
-		conn, err := grpc.Dial(addr, c.opts...)
-		if err != nil {
-			return err
-		}
-		c.cc = conn
-		c.addr = addr
-	}
-	return nil
-}
-
-func (c *ConnSelector) updateLoop() {
-	ticker := time.NewTicker(1 * time.Second)
-	defer ticker.Stop()
-	for {
-		select {
-		case <-ticker.C:
-			if err := c.updateConn(); err != nil {
-				logrus.WithError(err).Errorf("error reestabilishing connection to leader")
-			}
-		case <-c.stop:
-			return
-		}
-	}
-}

+ 20 - 0
vendor/src/github.com/docker/swarmkit/manager/raftselector/raftselector.go

@@ -0,0 +1,20 @@
+package raftselector
+
+import (
+	"errors"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/grpc"
+)
+
+// ConnProvider is basic interface for connecting API package(raft proxy in particular)
+// to manager/state/raft package without import cycles. It provides only one
+// method for obtaining connection to leader.
+type ConnProvider interface {
+	LeaderConn(ctx context.Context) (*grpc.ClientConn, error)
+}
+
+// ErrIsLeader is returned from LeaderConn method when current machine is leader.
+// It's just shim between packages to avoid import cycles.
+var ErrIsLeader = errors.New("current node is leader")

+ 0 - 153
vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go

@@ -1,153 +0,0 @@
-package scheduler
-
-import (
-	"container/heap"
-	"errors"
-
-	"github.com/docker/swarmkit/api"
-)
-
-var errNodeNotFound = errors.New("node not found in scheduler heap")
-
-// A nodeHeap implements heap.Interface for nodes. It also includes an index
-// by node id.
-type nodeHeap struct {
-	heap  []NodeInfo
-	index map[string]int // map from node id to heap index
-}
-
-func (nh nodeHeap) Len() int {
-	return len(nh.heap)
-}
-
-func (nh nodeHeap) Less(i, j int) bool {
-	return len(nh.heap[i].Tasks) < len(nh.heap[j].Tasks)
-}
-
-func (nh nodeHeap) Swap(i, j int) {
-	nh.heap[i], nh.heap[j] = nh.heap[j], nh.heap[i]
-	nh.index[nh.heap[i].ID] = i
-	nh.index[nh.heap[j].ID] = j
-}
-
-func (nh *nodeHeap) Push(x interface{}) {
-	n := len(nh.heap)
-	item := x.(NodeInfo)
-	nh.index[item.ID] = n
-	nh.heap = append(nh.heap, item)
-}
-
-func (nh *nodeHeap) Pop() interface{} {
-	old := nh.heap
-	n := len(old)
-	item := old[n-1]
-	delete(nh.index, item.ID)
-	nh.heap = old[0 : n-1]
-	return item
-}
-
-func (nh *nodeHeap) alloc(n int) {
-	nh.heap = make([]NodeInfo, 0, n)
-	nh.index = make(map[string]int, n)
-}
-
-// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
-func (nh *nodeHeap) nodeInfo(nodeID string) (NodeInfo, error) {
-	index, ok := nh.index[nodeID]
-	if ok {
-		return nh.heap[index], nil
-	}
-	return NodeInfo{}, errNodeNotFound
-}
-
-// addOrUpdateNode sets the number of tasks for a given node. It adds the node
-// to the heap if it wasn't already tracked.
-func (nh *nodeHeap) addOrUpdateNode(n NodeInfo) {
-	index, ok := nh.index[n.ID]
-	if ok {
-		nh.heap[index] = n
-		heap.Fix(nh, index)
-	} else {
-		heap.Push(nh, n)
-	}
-}
-
-// updateNode sets the number of tasks for a given node. It ignores the update
-// if the node isn't already tracked in the heap.
-func (nh *nodeHeap) updateNode(n NodeInfo) {
-	index, ok := nh.index[n.ID]
-	if ok {
-		nh.heap[index] = n
-		heap.Fix(nh, index)
-	}
-}
-
-func (nh *nodeHeap) remove(nodeID string) {
-	index, ok := nh.index[nodeID]
-	if ok {
-		heap.Remove(nh, index)
-	}
-}
-
-func (nh *nodeHeap) findMin(meetsConstraints func(*NodeInfo) bool, scanAllNodes bool) (*api.Node, int) {
-	if scanAllNodes {
-		return nh.scanAllToFindMin(meetsConstraints)
-	}
-	return nh.searchHeapToFindMin(meetsConstraints)
-}
-
-// Scan All nodes to find the best node which meets the constraints && has lightest workloads
-func (nh *nodeHeap) scanAllToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
-	var bestNode *api.Node
-	minTasks := int(^uint(0) >> 1) // max int
-
-	for i := 0; i < len(nh.heap); i++ {
-		heapEntry := &nh.heap[i]
-		if meetsConstraints(heapEntry) && len(heapEntry.Tasks) < minTasks {
-			bestNode = heapEntry.Node
-			minTasks = len(heapEntry.Tasks)
-		}
-	}
-
-	return bestNode, minTasks
-}
-
-// Search in heap to find the best node which meets the constraints && has lightest workloads
-func (nh *nodeHeap) searchHeapToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
-	var bestNode *api.Node
-	minTasks := int(^uint(0) >> 1) // max int
-
-	if nh == nil || len(nh.heap) == 0 {
-		return bestNode, minTasks
-	}
-
-	// push root to stack for search
-	stack := []int{0}
-
-	for len(stack) != 0 {
-		// pop an element
-		idx := stack[len(stack)-1]
-		stack = stack[0 : len(stack)-1]
-
-		heapEntry := &nh.heap[idx]
-
-		if len(heapEntry.Tasks) >= minTasks {
-			continue
-		}
-
-		if meetsConstraints(heapEntry) {
-			// meet constraints, update results
-			bestNode = heapEntry.Node
-			minTasks = len(heapEntry.Tasks)
-		} else {
-			// otherwise, push 2 children to stack for further search
-			if 2*idx+1 < len(nh.heap) {
-				stack = append(stack, 2*idx+1)
-			}
-			if 2*idx+2 < len(nh.heap) {
-				stack = append(stack, 2*idx+2)
-			}
-		}
-	}
-	return bestNode, minTasks
-}

+ 48 - 13
vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go

@@ -5,15 +5,18 @@ import "github.com/docker/swarmkit/api"
 // NodeInfo contains a node and some additional metadata.
 type NodeInfo struct {
 	*api.Node
-	Tasks              map[string]*api.Task
-	AvailableResources api.Resources
+	Tasks                             map[string]*api.Task
+	DesiredRunningTasksCount          int
+	DesiredRunningTasksCountByService map[string]int
+	AvailableResources                api.Resources
 }
 
 func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
 	nodeInfo := NodeInfo{
-		Node:               n,
-		Tasks:              make(map[string]*api.Task),
-		AvailableResources: availableResources,
+		Node:  n,
+		Tasks: make(map[string]*api.Task),
+		DesiredRunningTasksCountByService: make(map[string]int),
+		AvailableResources:                availableResources,
 	}
 
 	for _, t := range tasks {
@@ -22,15 +25,23 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api
 	return nodeInfo
 }
 
+// addTask removes a task from nodeInfo if it's tracked there, and returns true
+// if nodeInfo was modified.
 func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
 	if nodeInfo.Tasks == nil {
 		return false
 	}
-	if _, ok := nodeInfo.Tasks[t.ID]; !ok {
+	oldTask, ok := nodeInfo.Tasks[t.ID]
+	if !ok {
 		return false
 	}
 
 	delete(nodeInfo.Tasks, t.ID)
+	if oldTask.DesiredState == api.TaskStateRunning {
+		nodeInfo.DesiredRunningTasksCount--
+		nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]--
+	}
+
 	reservations := taskReservations(t.Spec)
 	nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
 	nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
@@ -38,19 +49,43 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
 	return true
 }
 
+// addTask adds or updates a task on nodeInfo, and returns true if nodeInfo was
+// modified.
 func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
 	if nodeInfo.Tasks == nil {
 		nodeInfo.Tasks = make(map[string]*api.Task)
 	}
-	if _, ok := nodeInfo.Tasks[t.ID]; !ok {
-		nodeInfo.Tasks[t.ID] = t
-		reservations := taskReservations(t.Spec)
-		nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
-		nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
-		return true
+	if nodeInfo.DesiredRunningTasksCountByService == nil {
+		nodeInfo.DesiredRunningTasksCountByService = make(map[string]int)
+	}
+
+	oldTask, ok := nodeInfo.Tasks[t.ID]
+	if ok {
+		if t.DesiredState == api.TaskStateRunning && oldTask.DesiredState != api.TaskStateRunning {
+			nodeInfo.Tasks[t.ID] = t
+			nodeInfo.DesiredRunningTasksCount++
+			nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]++
+			return true
+		} else if t.DesiredState != api.TaskStateRunning && oldTask.DesiredState == api.TaskStateRunning {
+			nodeInfo.Tasks[t.ID] = t
+			nodeInfo.DesiredRunningTasksCount--
+			nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]--
+			return true
+		}
+		return false
+	}
+
+	nodeInfo.Tasks[t.ID] = t
+	reservations := taskReservations(t.Spec)
+	nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
+	nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
+
+	if t.DesiredState == api.TaskStateRunning {
+		nodeInfo.DesiredRunningTasksCount++
+		nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]++
 	}
 
-	return false
+	return true
 }
 
 func taskReservations(spec api.TaskSpec) (reservations api.Resources) {

+ 115 - 0
vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeset.go

@@ -0,0 +1,115 @@
+package scheduler
+
+import (
+	"container/heap"
+	"errors"
+)
+
+var errNodeNotFound = errors.New("node not found in scheduler dataset")
+
+type nodeSet struct {
+	nodes map[string]NodeInfo // map from node id to node info
+}
+
+func (ns *nodeSet) alloc(n int) {
+	ns.nodes = make(map[string]NodeInfo, n)
+}
+
+// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
+func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) {
+	node, ok := ns.nodes[nodeID]
+	if ok {
+		return node, nil
+	}
+	return NodeInfo{}, errNodeNotFound
+}
+
+// addOrUpdateNode sets the number of tasks for a given node. It adds the node
+// to the set if it wasn't already tracked.
+func (ns *nodeSet) addOrUpdateNode(n NodeInfo) {
+	ns.nodes[n.ID] = n
+}
+
+// updateNode sets the number of tasks for a given node. It ignores the update
+// if the node isn't already tracked in the set.
+func (ns *nodeSet) updateNode(n NodeInfo) {
+	_, ok := ns.nodes[n.ID]
+	if ok {
+		ns.nodes[n.ID] = n
+	}
+}
+
+func (ns *nodeSet) remove(nodeID string) {
+	delete(ns.nodes, nodeID)
+}
+
+type nodeMaxHeap struct {
+	nodes    []NodeInfo
+	lessFunc func(*NodeInfo, *NodeInfo) bool
+	length   int
+}
+
+func (h nodeMaxHeap) Len() int {
+	return h.length
+}
+
+func (h nodeMaxHeap) Swap(i, j int) {
+	h.nodes[i], h.nodes[j] = h.nodes[j], h.nodes[i]
+}
+
+func (h nodeMaxHeap) Less(i, j int) bool {
+	// reversed to make a max-heap
+	return h.lessFunc(&h.nodes[j], &h.nodes[i])
+}
+
+func (h *nodeMaxHeap) Push(x interface{}) {
+	h.nodes = append(h.nodes, x.(NodeInfo))
+	h.length++
+}
+
+func (h *nodeMaxHeap) Pop() interface{} {
+	h.length--
+	// return value is never used
+	return nil
+}
+
+// findBestNodes returns n nodes (or < n if fewer nodes are available) that
+// rank best (lowest) according to the sorting function.
+func (ns *nodeSet) findBestNodes(n int, meetsConstraints func(*NodeInfo) bool, nodeLess func(*NodeInfo, *NodeInfo) bool) []NodeInfo {
+	if n == 0 {
+		return []NodeInfo{}
+	}
+
+	nodeHeap := nodeMaxHeap{lessFunc: nodeLess}
+
+	// TODO(aaronl): Is is possible to avoid checking constraints on every
+	// node? Perhaps we should try to schedule with n*2 nodes that weren't
+	// prescreened, and repeat the selection if there weren't enough nodes
+	// meeting the constraints.
+	for _, node := range ns.nodes {
+		// If there are fewer then n nodes in the heap, we add this
+		// node if it meets the constraints. Otherwise, the heap has
+		// n nodes, and if this node is better than the worst node in
+		// the heap, we replace the worst node and then fix the heap.
+		if nodeHeap.Len() < n {
+			if meetsConstraints(&node) {
+				heap.Push(&nodeHeap, node)
+			}
+		} else if nodeLess(&node, &nodeHeap.nodes[0]) {
+			if meetsConstraints(&node) {
+				nodeHeap.nodes[0] = node
+				heap.Fix(&nodeHeap, 0)
+			}
+		}
+	}
+
+	// Popping every element orders the nodes from best to worst. The
+	// first pop gets the worst node (since this a max-heap), and puts it
+	// at position n-1. Then the next pop puts the next-worst at n-2, and
+	// so on.
+	for nodeHeap.Len() > 0 {
+		heap.Pop(&nodeHeap)
+	}
+
+	return nodeHeap.nodes
+}

+ 130 - 59
vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go

@@ -1,7 +1,6 @@
 package scheduler
 
 import (
-	"container/heap"
 	"container/list"
 	"time"
 
@@ -24,7 +23,7 @@ type Scheduler struct {
 	unassignedTasks *list.List
 	// preassignedTasks already have NodeID, need resource validation
 	preassignedTasks map[string]*api.Task
-	nodeHeap         nodeHeap
+	nodeSet          nodeSet
 	allTasks         map[string]*api.Task
 	pipeline         *Pipeline
 
@@ -32,11 +31,6 @@ type Scheduler struct {
 	stopChan chan struct{}
 	// doneChan is closed when the state machine terminates
 	doneChan chan struct{}
-
-	// This currently exists only for benchmarking. It tells the scheduler
-	// scan the whole heap instead of taking the minimum-valued node
-	// blindly.
-	scanAllNodes bool
 }
 
 // New creates a new scheduler.
@@ -83,7 +77,7 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
 		tasksByNode[t.NodeID][t.ID] = t
 	}
 
-	if err := s.buildNodeHeap(tx, tasksByNode); err != nil {
+	if err := s.buildNodeSet(tx, tasksByNode); err != nil {
 		return err
 	}
 
@@ -152,7 +146,7 @@ func (s *Scheduler) Run(ctx context.Context) error {
 				s.createOrUpdateNode(v.Node)
 				pendingChanges++
 			case state.EventDeleteNode:
-				s.nodeHeap.remove(v.Node.ID)
+				s.nodeSet.remove(v.Node.ID)
 			case state.EventCommit:
 				if commitDebounceTimer != nil {
 					if time.Since(debouncingStarted) > maxLatency {
@@ -210,9 +204,9 @@ func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
 		return 0
 	}
 
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
 	if err == nil && nodeInfo.addTask(t) {
-		s.nodeHeap.updateNode(nodeInfo)
+		s.nodeSet.updateNode(nodeInfo)
 	}
 
 	return 0
@@ -257,9 +251,9 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
 	}
 
 	s.allTasks[t.ID] = t
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
 	if err == nil && nodeInfo.addTask(t) {
-		s.nodeHeap.updateNode(nodeInfo)
+		s.nodeSet.updateNode(nodeInfo)
 	}
 
 	return 0
@@ -268,14 +262,14 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
 func (s *Scheduler) deleteTask(ctx context.Context, t *api.Task) {
 	delete(s.allTasks, t.ID)
 	delete(s.preassignedTasks, t.ID)
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
 	if err == nil && nodeInfo.removeTask(t) {
-		s.nodeHeap.updateNode(nodeInfo)
+		s.nodeSet.updateNode(nodeInfo)
 	}
 }
 
 func (s *Scheduler) createOrUpdateNode(n *api.Node) {
-	nodeInfo, _ := s.nodeHeap.nodeInfo(n.ID)
+	nodeInfo, _ := s.nodeSet.nodeInfo(n.ID)
 	var resources api.Resources
 	if n.Description != nil && n.Description.Resources != nil {
 		resources = *n.Description.Resources
@@ -288,7 +282,7 @@ func (s *Scheduler) createOrUpdateNode(n *api.Node) {
 	}
 	nodeInfo.Node = n
 	nodeInfo.AvailableResources = resources
-	s.nodeHeap.addOrUpdateNode(nodeInfo)
+	s.nodeSet.addOrUpdateNode(nodeInfo)
 }
 
 func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
@@ -308,44 +302,60 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
 	}
 	for _, decision := range failed {
 		s.allTasks[decision.old.ID] = decision.old
-		nodeInfo, err := s.nodeHeap.nodeInfo(decision.new.NodeID)
+		nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID)
 		if err == nil && nodeInfo.removeTask(decision.new) {
-			s.nodeHeap.updateNode(nodeInfo)
+			s.nodeSet.updateNode(nodeInfo)
 		}
 	}
 }
 
 // tick attempts to schedule the queue.
 func (s *Scheduler) tick(ctx context.Context) {
+	tasksByCommonSpec := make(map[string]map[string]*api.Task)
 	schedulingDecisions := make(map[string]schedulingDecision, s.unassignedTasks.Len())
 
 	var next *list.Element
 	for e := s.unassignedTasks.Front(); e != nil; e = next {
 		next = e.Next()
-		id := e.Value.(*api.Task).ID
-		if _, ok := schedulingDecisions[id]; ok {
-			s.unassignedTasks.Remove(e)
-			continue
-		}
 		t := s.allTasks[e.Value.(*api.Task).ID]
 		if t == nil || t.NodeID != "" {
 			// task deleted or already assigned
 			s.unassignedTasks.Remove(e)
 			continue
 		}
-		if newT := s.scheduleTask(ctx, t); newT != nil {
-			schedulingDecisions[id] = schedulingDecision{old: t, new: newT}
-			s.unassignedTasks.Remove(e)
+
+		// Group common tasks with common specs by marshalling the spec
+		// into taskKey and using it as a map key.
+		// TODO(aaronl): Once specs are versioned, this will allow a
+		// much more efficient fast path.
+		fieldsToMarshal := api.Task{
+			ServiceID: t.ServiceID,
+			Spec:      t.Spec,
 		}
+		marshalled, err := fieldsToMarshal.Marshal()
+		if err != nil {
+			panic(err)
+		}
+		taskGroupKey := string(marshalled)
+
+		if tasksByCommonSpec[taskGroupKey] == nil {
+			tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task)
+		}
+		tasksByCommonSpec[taskGroupKey][t.ID] = t
+		s.unassignedTasks.Remove(e)
+	}
+
+	for _, taskGroup := range tasksByCommonSpec {
+		s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions)
 	}
 
 	_, failed := s.applySchedulingDecisions(ctx, schedulingDecisions)
 	for _, decision := range failed {
 		s.allTasks[decision.old.ID] = decision.old
 
-		nodeInfo, err := s.nodeHeap.nodeInfo(decision.new.NodeID)
+		nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID)
 		if err == nil && nodeInfo.removeTask(decision.new) {
-			s.nodeHeap.updateNode(nodeInfo)
+			s.nodeSet.updateNode(nodeInfo)
 		}
 
 		// enqueue task for next scheduling attempt
@@ -401,11 +411,11 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
 	return
 }
 
-// taskFitNode checks if a node has enough resource to accommodate a task
+// taskFitNode checks if a node has enough resources to accommodate a task.
 func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task {
-	nodeInfo, err := s.nodeHeap.nodeInfo(nodeID)
+	nodeInfo, err := s.nodeSet.nodeInfo(nodeID)
 	if err != nil {
-		// node does not exist in heap (it may have been deleted)
+		// node does not exist in set (it may have been deleted)
 		return nil
 	}
 	s.pipeline.SetTask(t)
@@ -422,57 +432,118 @@ func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string)
 	s.allTasks[t.ID] = &newT
 
 	if nodeInfo.addTask(&newT) {
-		s.nodeHeap.updateNode(nodeInfo)
+		s.nodeSet.updateNode(nodeInfo)
 	}
 	return &newT
 }
 
-// scheduleTask schedules a single task.
-func (s *Scheduler) scheduleTask(ctx context.Context, t *api.Task) *api.Task {
+// scheduleTaskGroup schedules a batch of tasks that are part of the same
+// service and share the same version of the spec.
+func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
+	// Pick at task at random from taskGroup to use for constraint
+	// evaluation. It doesn't matter which one we pick because all the
+	// tasks in the group are equal in terms of the fields the constraint
+	// filters consider.
+	var t *api.Task
+	for _, t = range taskGroup {
+		break
+	}
+
 	s.pipeline.SetTask(t)
-	n, _ := s.nodeHeap.findMin(s.pipeline.Process, s.scanAllNodes)
-	if n == nil {
-		log.G(ctx).WithField("task.id", t.ID).Debug("No suitable node available for task")
-		return nil
+
+	nodeLess := func(a *NodeInfo, b *NodeInfo) bool {
+		tasksByServiceA := a.DesiredRunningTasksCountByService[t.ServiceID]
+		tasksByServiceB := b.DesiredRunningTasksCountByService[t.ServiceID]
+
+		if tasksByServiceA < tasksByServiceB {
+			return true
+		}
+		if tasksByServiceA > tasksByServiceB {
+			return false
+		}
+
+		// Total number of tasks breaks ties.
+		return a.DesiredRunningTasksCount < b.DesiredRunningTasksCount
 	}
 
-	log.G(ctx).WithField("task.id", t.ID).Debugf("Assigning to node %s", n.ID)
-	newT := *t
-	newT.NodeID = n.ID
-	newT.Status = api.TaskStatus{
-		State:     api.TaskStateAssigned,
-		Timestamp: ptypes.MustTimestampProto(time.Now()),
-		Message:   "scheduler assigned task to node",
+	nodes := s.nodeSet.findBestNodes(len(taskGroup), s.pipeline.Process, nodeLess)
+	if len(nodes) == 0 {
+		for _, t := range taskGroup {
+			log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
+			s.enqueue(t)
+		}
+		return
 	}
-	s.allTasks[t.ID] = &newT
 
-	nodeInfo, err := s.nodeHeap.nodeInfo(n.ID)
-	if err == nil && nodeInfo.addTask(&newT) {
-		s.nodeHeap.updateNode(nodeInfo)
+	failedConstraints := make(map[int]bool) // key is index in nodes slice
+	nodeIter := 0
+	for taskID, t := range taskGroup {
+		n := &nodes[nodeIter%len(nodes)]
+
+		log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", n.ID)
+		newT := *t
+		newT.NodeID = n.ID
+		newT.Status = api.TaskStatus{
+			State:     api.TaskStateAssigned,
+			Timestamp: ptypes.MustTimestampProto(time.Now()),
+			Message:   "scheduler assigned task to node",
+		}
+		s.allTasks[t.ID] = &newT
+
+		nodeInfo, err := s.nodeSet.nodeInfo(n.ID)
+		if err == nil && nodeInfo.addTask(&newT) {
+			s.nodeSet.updateNode(nodeInfo)
+			nodes[nodeIter%len(nodes)] = nodeInfo
+		}
+
+		schedulingDecisions[taskID] = schedulingDecision{old: t, new: &newT}
+		delete(taskGroup, taskID)
+
+		if nodeIter+1 < len(nodes) {
+			// First pass fills the nodes until they have the same
+			// number of tasks from this service.
+			nextNode := nodes[(nodeIter+1)%len(nodes)]
+			if nodeLess(&nextNode, &nodeInfo) {
+				nodeIter++
+				continue
+			}
+		} else {
+			// In later passes, we just assign one task at a time
+			// to each node that still meets the constraints.
+			nodeIter++
+		}
+
+		origNodeIter := nodeIter
+		for failedConstraints[nodeIter%len(nodes)] || !s.pipeline.Process(&nodes[nodeIter%len(nodes)]) {
+			failedConstraints[nodeIter%len(nodes)] = true
+			nodeIter++
+			if nodeIter-origNodeIter == len(nodes) {
+				// None of the nodes meet the constraints anymore.
+				for _, t := range taskGroup {
+					log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
+					s.enqueue(t)
+				}
+				return
+			}
+		}
 	}
-	return &newT
 }
 
-func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
+func (s *Scheduler) buildNodeSet(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
 	nodes, err := store.FindNodes(tx, store.All)
 	if err != nil {
 		return err
 	}
 
-	s.nodeHeap.alloc(len(nodes))
+	s.nodeSet.alloc(len(nodes))
 
-	i := 0
 	for _, n := range nodes {
 		var resources api.Resources
 		if n.Description != nil && n.Description.Resources != nil {
 			resources = *n.Description.Resources
 		}
-		s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
-		s.nodeHeap.index[n.ID] = i
-		i++
+		s.nodeSet.addOrUpdateNode(newNodeInfo(n, tasksByNode[n.ID], resources))
 	}
 
-	heap.Init(&s.nodeHeap)
-
 	return nil
 }

+ 3 - 0
vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go

@@ -74,6 +74,9 @@ func (c *Cluster) Tick() {
 		m.tick++
 		if m.tick > c.heartbeatTicks {
 			m.active = false
+			if m.Conn != nil {
+				m.Conn.Close()
+			}
 		}
 	}
 }

+ 114 - 56
vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go

@@ -26,6 +26,7 @@ import (
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/ca"
 	"github.com/docker/swarmkit/log"
+	"github.com/docker/swarmkit/manager/raftselector"
 	"github.com/docker/swarmkit/manager/state/raft/membership"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/docker/swarmkit/manager/state/watch"
@@ -82,7 +83,7 @@ type Node struct {
 	Server         *grpc.Server
 	Ctx            context.Context
 	cancel         func()
-	tlsCredentials credentials.TransportAuthenticator
+	tlsCredentials credentials.TransportCredentials
 
 	Address  string
 	StateDir string
@@ -152,7 +153,7 @@ type NewNodeOptions struct {
 	// SendTimeout is the timeout on the sending messages to other raft
 	// nodes. Leave this as 0 to get the default value.
 	SendTimeout    time.Duration
-	TLSCredentials credentials.TransportAuthenticator
+	TLSCredentials credentials.TransportCredentials
 }
 
 func init() {
@@ -176,7 +177,7 @@ func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
 	n := &Node{
 		Ctx:            ctx,
 		cancel:         cancel,
-		cluster:        membership.NewCluster(cfg.ElectionTick),
+		cluster:        membership.NewCluster(2 * cfg.ElectionTick),
 		tlsCredentials: opts.TLSCredentials,
 		raftStore:      raftStore,
 		Address:        opts.Addr,
@@ -395,39 +396,55 @@ func (n *Node) Run(ctx context.Context) error {
 				n.confState = rd.Snapshot.Metadata.ConfState
 			}
 
-			// Process committed entries
-			for _, entry := range rd.CommittedEntries {
-				if err := n.processCommitted(entry); err != nil {
-					n.Config.Logger.Error(err)
-				}
-			}
+			// If we cease to be the leader, we must cancel any
+			// proposals that are currently waiting for a quorum to
+			// acknowledge them. It is still possible for these to
+			// become committed, but if that happens we will apply
+			// them as any follower would.
 
-			// Trigger a snapshot every once in awhile
-			if n.snapshotInProgress == nil &&
-				raftConfig.SnapshotInterval > 0 &&
-				n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
-				n.doSnapshot(&raftConfig)
-			}
+			// It is important that we cancel these proposals before
+			// calling processCommitted, so processCommitted does
+			// not deadlock.
 
-			// If we cease to be the leader, we must cancel
-			// any proposals that are currently waiting for
-			// a quorum to acknowledge them. It is still
-			// possible for these to become committed, but
-			// if that happens we will apply them as any
-			// follower would.
 			if rd.SoftState != nil {
 				if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
 					wasLeader = false
-					n.wait.cancelAll()
 					if atomic.LoadUint32(&n.signalledLeadership) == 1 {
 						atomic.StoreUint32(&n.signalledLeadership, 0)
 						n.leadershipBroadcast.Publish(IsFollower)
 					}
+
+					// It is important that we set n.signalledLeadership to 0
+					// before calling n.wait.cancelAll. When a new raft
+					// request is registered, it checks n.signalledLeadership
+					// afterwards, and cancels the registration if it is 0.
+					// If cancelAll was called first, this call might run
+					// before the new request registers, but
+					// signalledLeadership would be set after the check.
+					// Setting signalledLeadership before calling cancelAll
+					// ensures that if a new request is registered during
+					// this transition, it will either be cancelled by
+					// cancelAll, or by its own check of signalledLeadership.
+					n.wait.cancelAll()
 				} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
 					wasLeader = true
 				}
 			}
 
+			// Process committed entries
+			for _, entry := range rd.CommittedEntries {
+				if err := n.processCommitted(entry); err != nil {
+					n.Config.Logger.Error(err)
+				}
+			}
+
+			// Trigger a snapshot every once in awhile
+			if n.snapshotInProgress == nil &&
+				raftConfig.SnapshotInterval > 0 &&
+				n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
+				n.doSnapshot(&raftConfig)
+			}
+
 			if wasLeader && atomic.LoadUint32(&n.signalledLeadership) != 1 {
 				// If all the entries in the log have become
 				// committed, broadcast our leadership status.
@@ -539,11 +556,11 @@ func (n *Node) Leader() (uint64, error) {
 	defer n.stopMu.RUnlock()
 
 	if !n.IsMember() {
-		return 0, ErrNoRaftMember
+		return raft.None, ErrNoRaftMember
 	}
 	leader := n.leader()
-	if leader == 0 {
-		return 0, ErrNoClusterLeader
+	if leader == raft.None {
+		return raft.None, ErrNoClusterLeader
 	}
 
 	return leader, nil
@@ -658,6 +675,12 @@ func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Durati
 		return err
 	}
 
+	if timeout != 0 {
+		tctx, cancel := context.WithTimeout(ctx, timeout)
+		defer cancel()
+		ctx = tctx
+	}
+
 	client := api.NewHealthClient(conn)
 	defer conn.Close()
 
@@ -828,25 +851,54 @@ func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressReques
 	return &api.ResolveAddressResponse{Addr: member.Addr}, nil
 }
 
-// LeaderAddr returns address of current cluster leader.
-// With this method Node satisfies raftpicker.AddrSelector interface.
-func (n *Node) LeaderAddr() (string, error) {
-	ctx, cancel := context.WithTimeout(n.Ctx, 10*time.Second)
-	defer cancel()
-	if err := WaitForLeader(ctx, n); err != nil {
-		return "", ErrNoClusterLeader
+func (n *Node) getLeaderConn() (*grpc.ClientConn, error) {
+	leader, err := n.Leader()
+	if err != nil {
+		return nil, err
 	}
-	n.stopMu.RLock()
-	defer n.stopMu.RUnlock()
-	if !n.IsMember() {
-		return "", ErrNoRaftMember
+
+	if leader == n.Config.ID {
+		return nil, raftselector.ErrIsLeader
 	}
-	ms := n.cluster.Members()
-	l := ms[n.leader()]
+	l := n.cluster.GetMember(leader)
 	if l == nil {
-		return "", ErrNoClusterLeader
+		return nil, fmt.Errorf("no leader found")
+	}
+	if !n.cluster.Active(leader) {
+		return nil, fmt.Errorf("leader marked as inactive")
+	}
+	if l.Conn == nil {
+		return nil, fmt.Errorf("no connection to leader in member list")
+	}
+	return l.Conn, nil
+}
+
+// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader
+// if current machine is leader.
+func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
+	cc, err := n.getLeaderConn()
+	if err == nil {
+		return cc, nil
+	}
+	if err == raftselector.ErrIsLeader {
+		return nil, err
+	}
+	ticker := time.NewTicker(1 * time.Second)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			cc, err := n.getLeaderConn()
+			if err == nil {
+				return cc, nil
+			}
+			if err == raftselector.ErrIsLeader {
+				return nil, err
+			}
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
 	}
-	return l.Addr, nil
 }
 
 // registerNode registers a new node on the cluster memberlist
@@ -943,7 +995,7 @@ func (n *Node) GetMemberlist() map[uint64]*api.RaftMember {
 	members := n.cluster.Members()
 	leaderID, err := n.Leader()
 	if err != nil {
-		leaderID = 0
+		leaderID = raft.None
 	}
 
 	for id, member := range members {
@@ -1163,7 +1215,11 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
 
 	r.ID = n.reqIDGen.Next()
 
-	ch := n.wait.register(r.ID, cb)
+	// This must be derived from the context which is cancelled by stop()
+	// to avoid a deadlock on shutdown.
+	waitCtx, cancel := context.WithCancel(n.Ctx)
+
+	ch := n.wait.register(r.ID, cb, cancel)
 
 	// Do this check after calling register to avoid a race.
 	if atomic.LoadUint32(&n.signalledLeadership) != 1 {
@@ -1182,24 +1238,19 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
 		return nil, ErrRequestTooLarge
 	}
 
-	// This must use the context which is cancelled by stop() to avoid a
-	// deadlock on shutdown.
-	err = n.Propose(n.Ctx, data)
+	err = n.Propose(waitCtx, data)
 	if err != nil {
 		n.wait.cancel(r.ID)
 		return nil, err
 	}
 
 	select {
-	case x, ok := <-ch:
-		if ok {
-			res := x.(*applyResult)
-			return res.resp, res.err
-		}
-		return nil, ErrLostLeadership
-	case <-n.Ctx.Done():
+	case x := <-ch:
+		res := x.(*applyResult)
+		return res.resp, res.err
+	case <-waitCtx.Done():
 		n.wait.cancel(r.ID)
-		return nil, ErrStopped
+		return nil, ErrLostLeadership
 	case <-ctx.Done():
 		n.wait.cancel(r.ID)
 		return nil, ctx.Err()
@@ -1211,10 +1262,12 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
 // until the change is performed or there is an error.
 func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
 	cc.ID = n.reqIDGen.Next()
-	ch := n.wait.register(cc.ID, nil)
+
+	ctx, cancel := context.WithCancel(ctx)
+	ch := n.wait.register(cc.ID, nil, cancel)
 
 	if err := n.ProposeConfChange(ctx, cc); err != nil {
-		n.wait.trigger(cc.ID, nil)
+		n.wait.cancel(cc.ID)
 		return err
 	}
 
@@ -1228,7 +1281,7 @@ func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
 		}
 		return nil
 	case <-ctx.Done():
-		n.wait.trigger(cc.ID, nil)
+		n.wait.cancel(cc.ID)
 		return ctx.Err()
 	case <-n.Ctx.Done():
 		return ErrStopped
@@ -1271,6 +1324,11 @@ func (n *Node) processEntry(entry raftpb.Entry) error {
 		// position and cancelling the transaction. Create a new
 		// transaction to commit the data.
 
+		// It should not be possible for processInternalRaftRequest
+		// to be running in this situation, but out of caution we
+		// cancel any current invocations to avoid a deadlock.
+		n.wait.cancelAll()
+
 		err := n.memoryStore.ApplyStoreActions(r.Action)
 		if err != nil {
 			log.G(context.Background()).Errorf("error applying actions from raft: %v", err)

+ 1 - 1
vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go

@@ -13,7 +13,7 @@ import (
 )
 
 // dial returns a grpc client connection
-func dial(addr string, protocol string, creds credentials.TransportAuthenticator, timeout time.Duration) (*grpc.ClientConn, error) {
+func dial(addr string, protocol string, creds credentials.TransportCredentials, timeout time.Duration) (*grpc.ClientConn, error) {
 	grpcOptions := []grpc.DialOption{
 		grpc.WithBackoffMaxDelay(2 * time.Second),
 		grpc.WithTransportCredentials(creds),

+ 9 - 6
vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go

@@ -10,6 +10,8 @@ type waitItem struct {
 	ch chan interface{}
 	// callback which is called synchronously when the wait is triggered
 	cb func()
+	// callback which is called to cancel a waiter
+	cancel func()
 }
 
 type wait struct {
@@ -21,13 +23,13 @@ func newWait() *wait {
 	return &wait{m: make(map[uint64]waitItem)}
 }
 
-func (w *wait) register(id uint64, cb func()) <-chan interface{} {
+func (w *wait) register(id uint64, cb func(), cancel func()) <-chan interface{} {
 	w.l.Lock()
 	defer w.l.Unlock()
 	_, ok := w.m[id]
 	if !ok {
 		ch := make(chan interface{}, 1)
-		w.m[id] = waitItem{ch: ch, cb: cb}
+		w.m[id] = waitItem{ch: ch, cb: cb, cancel: cancel}
 		return ch
 	}
 	panic(fmt.Sprintf("duplicate id %x", id))
@@ -43,7 +45,6 @@ func (w *wait) trigger(id uint64, x interface{}) bool {
 			waitItem.cb()
 		}
 		waitItem.ch <- x
-		close(waitItem.ch)
 		return true
 	}
 	return false
@@ -54,8 +55,8 @@ func (w *wait) cancel(id uint64) {
 	waitItem, ok := w.m[id]
 	delete(w.m, id)
 	w.l.Unlock()
-	if ok {
-		close(waitItem.ch)
+	if ok && waitItem.cancel != nil {
+		waitItem.cancel()
 	}
 }
 
@@ -65,6 +66,8 @@ func (w *wait) cancelAll() {
 
 	for id, waitItem := range w.m {
 		delete(w.m, id)
-		close(waitItem.ch)
+		if waitItem.cancel != nil {
+			waitItem.cancel()
+		}
 	}
 }

+ 24 - 1
vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go

@@ -6,6 +6,29 @@ import (
 	"github.com/docker/go-events"
 )
 
+// dropErrClosed is a sink that suppresses ErrSinkClosed from Write, to avoid
+// debug log messages that may be confusing. It is possible that the queue
+// will try to write an event to its destination channel while the queue is
+// being removed from the broadcaster. Since the channel is closed before the
+// queue, there is a narrow window when this is possible. In some event-based
+// dropping events when a sink is removed from a broadcaster is a problem, but
+// for the usage in this watch package that's the expected behavior.
+type dropErrClosed struct {
+	sink events.Sink
+}
+
+func (s dropErrClosed) Write(event events.Event) error {
+	err := s.sink.Write(event)
+	if err == events.ErrSinkClosed {
+		return nil
+	}
+	return err
+}
+
+func (s dropErrClosed) Close() error {
+	return s.sink.Close()
+}
+
 // Queue is the structure used to publish events and watch for them.
 type Queue struct {
 	mu          sync.Mutex
@@ -35,7 +58,7 @@ func (q *Queue) Watch() (eventq chan events.Event, cancel func()) {
 // close the channel.
 func (q *Queue) CallbackWatch(matcher events.Matcher) (eventq chan events.Event, cancel func()) {
 	ch := events.NewChannel(0)
-	sink := events.Sink(events.NewQueue(ch))
+	sink := events.Sink(events.NewQueue(dropErrClosed{sink: ch}))
 
 	if matcher != nil {
 		sink = events.NewFilter(sink, matcher)

+ 9 - 4
vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go

@@ -33,7 +33,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type TLSAuthorization struct {
 	// Roles contains the acceptable TLS OU roles for the handler.
@@ -96,11 +98,12 @@ func valueToGoStringPlugin(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringPlugin(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringPlugin(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -110,7 +113,7 @@ func extensionToGoStringPlugin(e map[int32]github_com_gogo_protobuf_proto.Extens
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }
 func (m *TLSAuthorization) Marshal() (data []byte, err error) {
@@ -443,6 +446,8 @@ var (
 	ErrIntOverflowPlugin   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
+
 var fileDescriptorPlugin = []byte{
 	// 259 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,

+ 3 - 3
vendor/src/github.com/gogo/protobuf/LICENSE

@@ -1,7 +1,7 @@
-Extensions for Protocol Buffers to create more go like structures.
+Protocol Buffers for Go with Gadgets
 
-Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-http://github.com/gogo/protobuf/gogoproto
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+http://github.com/gogo/protobuf
 
 Go support for Protocol Buffers - Google's data interchange format
 

+ 3 - 3
vendor/src/github.com/gogo/protobuf/gogoproto/Makefile

@@ -1,7 +1,7 @@
-# Extensions for Protocol Buffers to create more go like structures.
+# Protocol Buffers for Go with Gadgets
 #
-# Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-# http://github.com/gogo/protobuf/gogoproto
+# Copyright (c) 2013, The GoGo Authors. All rights reserved.
+# http://github.com/gogo/protobuf
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are

+ 3 - 3
vendor/src/github.com/gogo/protobuf/gogoproto/doc.go

@@ -1,7 +1,7 @@
-// Extensions for Protocol Buffers to create more go like structures.
+// Protocol Buffers for Go with Gadgets
 //
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 75 - 71
vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go

@@ -24,7 +24,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
 	ExtendedType:  (*google_protobuf.EnumOptions)(nil),
@@ -587,75 +589,77 @@ func init() {
 	proto.RegisterExtension(E_Castvalue)
 }
 
+func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
+
 var fileDescriptorGogo = []byte{
-	// 1096 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xcb, 0x6f, 0xdc, 0x54,
-	0x14, 0x87, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55,
-	0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46,
-	0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78,
-	0x23, 0x41, 0x4b, 0xcb, 0x63, 0xc1, 0xfb, 0x59, 0x1e, 0x7b, 0x36, 0xc0, 0x9a, 0xff, 0x81, 0x0d,
-	0x10, 0x5e, 0x52, 0x76, 0xd9, 0xf4, 0x1e, 0xfb, 0x1c, 0xcf, 0xb5, 0x67, 0xa4, 0x7b, 0x67, 0xe7,
-	0x64, 0xee, 0xf7, 0xcd, 0xf5, 0x39, 0xbe, 0xe7, 0x37, 0x06, 0x08, 0x64, 0x20, 0x97, 0xe3, 0x44,
-	0xa6, 0xb2, 0xd5, 0xc4, 0xeb, 0xfc, 0xf2, 0xd0, 0xe1, 0x40, 0xca, 0x20, 0x14, 0x47, 0xf3, 0xbf,
-	0x3a, 0xd9, 0xf6, 0xd1, 0x2d, 0xa1, 0xba, 0x49, 0x2f, 0x4e, 0x65, 0x52, 0x2c, 0xf6, 0x1e, 0x80,
-	0x05, 0x5a, 0xdc, 0x16, 0x51, 0xd6, 0x6f, 0xc7, 0x89, 0xd8, 0xee, 0x5d, 0x68, 0xdd, 0xb6, 0x5c,
-	0x90, 0xcb, 0x4c, 0x2e, 0xaf, 0xe8, 0x4f, 0x1f, 0x8c, 0xd3, 0x9e, 0x8c, 0xd4, 0xc1, 0x6b, 0xbf,
-	0xdf, 0x78, 0xf8, 0x86, 0x3b, 0x1b, 0x1b, 0xf3, 0x84, 0xe2, 0x67, 0xeb, 0x39, 0xe8, 0x6d, 0xc0,
-	0xcd, 0x15, 0x9f, 0x4a, 0x93, 0x5e, 0x14, 0x88, 0xc4, 0x62, 0xfc, 0x99, 0x8c, 0x0b, 0x86, 0xf1,
-	0x21, 0x42, 0xbd, 0x53, 0x30, 0x33, 0x8e, 0xeb, 0x17, 0x72, 0x4d, 0x0b, 0x53, 0xb2, 0x0a, 0x73,
-	0xb9, 0xa4, 0x9b, 0xa9, 0x54, 0xf6, 0x23, 0xbf, 0x2f, 0x2c, 0x9a, 0x5f, 0x73, 0x4d, 0x73, 0x63,
-	0x16, 0xb1, 0x53, 0x25, 0xe5, 0x9d, 0x85, 0x45, 0xfc, 0xcf, 0x79, 0x3f, 0xcc, 0x84, 0x69, 0x3b,
-	0x32, 0xd2, 0x76, 0x16, 0x97, 0xb1, 0xf2, 0xb7, 0x8b, 0x13, 0xb9, 0x72, 0xa1, 0x14, 0x18, 0x5e,
-	0xa3, 0x13, 0x81, 0x48, 0x53, 0x91, 0xa8, 0xb6, 0x1f, 0x86, 0x23, 0x36, 0x79, 0xba, 0x17, 0x96,
-	0xc6, 0x4b, 0xbb, 0xd5, 0x4e, 0xac, 0x16, 0xe4, 0x89, 0x30, 0xf4, 0x36, 0xe1, 0x96, 0x11, 0x9d,
-	0x75, 0x70, 0x5e, 0x26, 0xe7, 0xe2, 0x50, 0x77, 0x51, 0xbb, 0x0e, 0xfc, 0xff, 0xb2, 0x1f, 0x0e,
-	0xce, 0x77, 0xc9, 0xd9, 0x22, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x60, 0xfe, 0xbc, 0x48, 0x3a, 0x52,
-	0x89, 0xb6, 0x78, 0x2a, 0xf3, 0x43, 0x07, 0xdd, 0x15, 0xd2, 0xcd, 0x11, 0xb8, 0x82, 0x1c, 0xba,
-	0x8e, 0x41, 0x63, 0xdb, 0xef, 0x0a, 0x07, 0xc5, 0x55, 0x52, 0x4c, 0xe2, 0x7a, 0x44, 0x4f, 0xc0,
-	0x74, 0x20, 0x8b, 0x5b, 0x72, 0xc0, 0xdf, 0x23, 0x7c, 0x8a, 0x19, 0x52, 0xc4, 0x32, 0xce, 0x42,
-	0x3f, 0x75, 0xd9, 0xc1, 0xfb, 0xac, 0x60, 0x86, 0x14, 0x63, 0x94, 0xf5, 0x03, 0x56, 0x28, 0xa3,
-	0x9e, 0xf7, 0xc2, 0x94, 0x8c, 0xc2, 0x1d, 0x19, 0xb9, 0x6c, 0xe2, 0x43, 0x32, 0x00, 0x21, 0x28,
-	0x38, 0x0e, 0x4d, 0xd7, 0x46, 0x7c, 0x44, 0x78, 0x43, 0x70, 0x07, 0xf4, 0x39, 0xe3, 0x21, 0xa3,
-	0x57, 0x38, 0x28, 0x3e, 0x26, 0xc5, 0xac, 0x81, 0xd1, 0x6d, 0xa4, 0x42, 0xa5, 0x81, 0x70, 0x91,
-	0x7c, 0xc2, 0xb7, 0x41, 0x08, 0x95, 0xb2, 0x23, 0xa2, 0xee, 0x39, 0x37, 0xc3, 0xa7, 0x5c, 0x4a,
-	0x66, 0x50, 0xa1, 0x27, 0x4f, 0xdf, 0x4f, 0xd4, 0x39, 0x3f, 0x74, 0x6a, 0xc7, 0x67, 0xe4, 0x98,
-	0x2e, 0x21, 0xaa, 0x48, 0x16, 0x8d, 0xa3, 0xf9, 0x9c, 0x2b, 0x62, 0x60, 0x74, 0xf4, 0x54, 0xea,
-	0x77, 0x42, 0xd1, 0x1e, 0xc7, 0xf6, 0x05, 0x1f, 0xbd, 0x82, 0x5d, 0x33, 0x8d, 0xba, 0xd3, 0xaa,
-	0xf7, 0xb4, 0x93, 0xe6, 0x4b, 0xee, 0x74, 0x0e, 0x20, 0xfc, 0x18, 0xdc, 0x3a, 0x72, 0xd4, 0x3b,
-	0xc8, 0xbe, 0x22, 0xd9, 0xd2, 0x88, 0x71, 0x4f, 0x23, 0x61, 0x5c, 0xe5, 0xd7, 0x3c, 0x12, 0x44,
-	0xcd, 0xa5, 0xab, 0x96, 0x45, 0xca, 0xdf, 0x1e, 0xaf, 0x6a, 0xdf, 0x70, 0xd5, 0x0a, 0xb6, 0x52,
-	0xb5, 0x87, 0x61, 0x89, 0x8c, 0xe3, 0xf5, 0xf5, 0x5b, 0x1e, 0xac, 0x05, 0xbd, 0x59, 0xed, 0xee,
-	0xe3, 0x70, 0xa8, 0x2c, 0xe7, 0x85, 0x54, 0x44, 0x0a, 0x19, 0xbd, 0xe7, 0xd8, 0xc1, 0x7c, 0x8d,
-	0xcc, 0x3c, 0xf1, 0x57, 0x4a, 0xc1, 0x9a, 0x1f, 0xa3, 0xfc, 0x51, 0x38, 0xc8, 0xf2, 0x2c, 0x4a,
-	0x44, 0x57, 0x06, 0x91, 0x6e, 0xe3, 0x96, 0x83, 0xfa, 0xbb, 0x5a, 0xab, 0x36, 0x0d, 0x1c, 0xcd,
-	0x67, 0xe0, 0xa6, 0xf2, 0xf7, 0x46, 0xbb, 0xd7, 0x8f, 0x65, 0x92, 0x5a, 0x8c, 0xdf, 0x73, 0xa7,
-	0x4a, 0xee, 0x4c, 0x8e, 0x79, 0x2b, 0x30, 0x9b, 0xff, 0xe9, 0xfa, 0x48, 0xfe, 0x40, 0xa2, 0x99,
-	0x01, 0x45, 0x83, 0xa3, 0x2b, 0xfb, 0xb1, 0x9f, 0xb8, 0xcc, 0xbf, 0x1f, 0x79, 0x70, 0x10, 0x52,
-	0x3c, 0x7d, 0x73, 0xb5, 0x24, 0x6e, 0xdd, 0x31, 0x24, 0x59, 0x13, 0x4a, 0xf9, 0x41, 0xe9, 0x79,
-	0x66, 0x8f, 0xce, 0x6c, 0x35, 0x88, 0xbd, 0xfb, 0xb1, 0x3c, 0xd5, 0xb8, 0xb4, 0xcb, 0x2e, 0xee,
-	0x95, 0x15, 0xaa, 0xa4, 0xa5, 0x77, 0x1a, 0x66, 0x2a, 0x51, 0x69, 0x57, 0x3d, 0x4b, 0xaa, 0x69,
-	0x33, 0x29, 0xbd, 0xbb, 0x60, 0x02, 0x63, 0xcf, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc, 0xbb, 0x07,
-	0x1a, 0x1c, 0x77, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x8e, 0x3a, 0x3b, 0xfe, 0x02, 0xe3,
-	0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x71, 0xc5, 0xb5, 0x3b, 0x0e, 0x93, 0x94,
-	0x71, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f, 0x26, 0xb4,
-	0x58, 0xaf, 0x13, 0x64, 0xca, 0xc8, 0x35, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b, 0xa7, 0x5c,
-	0xb3, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x48, 0xb3, 0xd3, 0xaf, 0x71, 0xd5, 0x19,
-	0xd1, 0xa7, 0xa9, 0x59, 0x8e, 0x29, 0x3b, 0xff, 0x3a, 0xf1, 0x03, 0x06, 0x2b, 0x60, 0x8c, 0x49,
-	0xbb, 0xe2, 0x0d, 0xae, 0x80, 0x41, 0xe1, 0x31, 0xaa, 0x47, 0x9f, 0xdd, 0xf4, 0x26, 0x1f, 0xa3,
-	0x5a, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc5, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0xea,
-	0x59, 0x62, 0x77, 0xbc, 0xcd, 0xdb, 0xa8, 0x45, 0x89, 0x4e, 0xa6, 0xd6, 0x70, 0x8e, 0xd8, 0x7d,
-	0xef, 0x90, 0x6f, 0x7e, 0x28, 0x46, 0xbc, 0x47, 0x60, 0x69, 0x74, 0x86, 0xd8, 0xad, 0x97, 0xf6,
-	0x6a, 0xbf, 0xfa, 0xcd, 0x08, 0xd1, 0x91, 0xb7, 0x38, 0x2a, 0x3f, 0xec, 0xda, 0xcb, 0x7b, 0xd5,
-	0x17, 0x3b, 0x33, 0x3e, 0xf4, 0x2f, 0x34, 0x18, 0x8c, 0x6e, 0xbb, 0xeb, 0x0a, 0xb9, 0x0c, 0x08,
-	0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x95, 0x8f, 0x06, 0x11, 0x1a, 0x6e, 0x44, 0x59, 0x18, 0xe2,
-	0xc3, 0xd1, 0xba, 0x7d, 0x44, 0x4c, 0x88, 0x70, 0x8b, 0xd9, 0x3f, 0xf6, 0xe9, 0x60, 0x30, 0xa0,
-	0x67, 0xe8, 0x01, 0xd1, 0xef, 0xe8, 0x1a, 0x58, 0xc8, 0x3f, 0xf7, 0x79, 0x20, 0xe0, 0x6a, 0x7d,
-	0x9e, 0xa0, 0x78, 0x69, 0x4c, 0x77, 0x62, 0xeb, 0xb7, 0xfe, 0xb5, 0x5f, 0xbc, 0x83, 0x1a, 0xc8,
-	0x40, 0x90, 0xbf, 0x75, 0x5a, 0x04, 0xbb, 0x55, 0x41, 0xfe, 0xa2, 0x79, 0x0c, 0x26, 0x9f, 0x50,
-	0x32, 0x4a, 0xfd, 0xc0, 0x46, 0xff, 0x4d, 0x34, 0xaf, 0xc7, 0x82, 0xf5, 0x65, 0x22, 0xf4, 0xa5,
-	0xb2, 0xb1, 0xff, 0x10, 0x5b, 0x02, 0x08, 0x77, 0x7d, 0x95, 0xba, 0xdc, 0xf7, 0xbf, 0x0c, 0x33,
-	0x80, 0x9b, 0xc6, 0xeb, 0x27, 0xc5, 0x8e, 0x8d, 0xfd, 0x8f, 0x37, 0x4d, 0xeb, 0xf5, 0x00, 0x6c,
-	0xe2, 0x65, 0xfe, 0xbe, 0x6d, 0x83, 0xff, 0x27, 0x78, 0x40, 0x9c, 0x3c, 0x02, 0x0b, 0xfa, 0x79,
-	0xa9, 0x63, 0x27, 0x61, 0x55, 0xae, 0xca, 0xf5, 0xfc, 0x41, 0xbc, 0x1e, 0x00, 0x00, 0xff, 0xff,
-	0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
+	// 1098 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
+	0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9,
+	0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87,
+	0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
+	0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
+	0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7,
+	0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e,
+	0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff,
+	0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20,
+	0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62,
+	0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3,
+	0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06,
+	0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18,
+	0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66,
+	0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31,
+	0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63,
+	0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35,
+	0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69,
+	0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb,
+	0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce,
+	0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb,
+	0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e,
+	0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2,
+	0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97,
+	0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf,
+	0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b,
+	0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a,
+	0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9,
+	0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc,
+	0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2,
+	0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7,
+	0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7,
+	0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83,
+	0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e,
+	0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52,
+	0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7,
+	0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d,
+	0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28,
+	0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c,
+	0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8,
+	0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68,
+	0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42,
+	0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4,
+	0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b,
+	0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35,
+	0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14,
+	0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6,
+	0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94,
+	0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a,
+	0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae,
+	0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c,
+	0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2,
+	0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1,
+	0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06,
+	0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7,
+	0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd,
+	0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d,
+	0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c,
+	0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16,
+	0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18,
+	0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab,
+	0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35,
+	0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f,
+	0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53,
+	0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87,
+	0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38,
+	0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae,
+	0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
 }

+ 4 - 2
vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 3 - 1
vendor/src/github.com/gogo/protobuf/gogoproto/helper.go

@@ -1,4 +1,6 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
 // http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without

+ 10 - 4
vendor/src/github.com/gogo/protobuf/proto/clone.go

@@ -84,14 +84,20 @@ func mergeStruct(out, in reflect.Value) {
 		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
 	}
 
-	if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
-		emOut := out.Addr().Interface().(extensionsMap)
-		mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
-	} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+	if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
 		emOut := out.Addr().Interface().(extensionsBytes)
 		bIn := emIn.GetExtensions()
 		bOut := emOut.GetExtensions()
 		*bOut = append(*bOut, *bIn...)
+	} else if emIn, ok := extendable(in.Addr().Interface()); ok {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
 	}
 
 	uf := in.FieldByName("XXX_unrecognized")

+ 22 - 12
vendor/src/github.com/gogo/protobuf/proto/decode.go

@@ -378,6 +378,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
 		wire := int(u & 0x7)
 		if wire == WireEndGroup {
 			if is_group {
+				if required > 0 {
+					// Not enough information to determine the exact field.
+					// (See below.)
+					return &RequiredNotSetError{"{Unknown}"}
+				}
 				return nil // input is satisfied
 			}
 			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
@@ -390,16 +395,20 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
 		if !ok {
 			// Maybe it's an extension?
 			if prop.extendable {
-				if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
-					if err = o.skip(st, tag, wire); err == nil {
-						if ee, eok := e.(extensionsMap); eok {
-							ext := ee.ExtensionMap()[int32(tag)] // may be missing
-							ext.enc = append(ext.enc, o.buf[oi:o.index]...)
-							ee.ExtensionMap()[int32(tag)] = ext
-						} else if ee, eok := e.(extensionsBytes); eok {
-							ext := ee.GetExtensions()
+				if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
+					if isExtensionField(e, int32(tag)) {
+						if err = o.skip(st, tag, wire); err == nil {
+							ext := e.GetExtensions()
 							*ext = append(*ext, o.buf[oi:o.index]...)
 						}
+						continue
+					}
+				} else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+					if err = o.skip(st, tag, wire); err == nil {
+						extmap := e.extensionsWrite()
+						ext := extmap[int32(tag)] // may be missing
+						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+						extmap[int32(tag)] = ext
 					}
 					continue
 				}
@@ -773,10 +782,11 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
 		}
 	}
 	keyelem, valelem := keyptr.Elem(), valptr.Elem()
-	if !keyelem.IsValid() || !valelem.IsValid() {
-		// We did not decode the key or the value in the map entry.
-		// Either way, it's an invalid map entry.
-		return fmt.Errorf("proto: bad map data: missing key/val")
+	if !keyelem.IsValid() {
+		keyelem = reflect.Zero(p.mtype.Key())
+	}
+	if !valelem.IsValid() {
+		valelem = reflect.Zero(p.mtype.Elem())
 	}
 
 	v.SetMapIndex(keyelem, valelem)

+ 4 - 2
vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 49 - 11
vendor/src/github.com/gogo/protobuf/proto/encode.go

@@ -64,8 +64,16 @@ var (
 	// a struct with a repeated field containing a nil element.
 	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
 
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
 	// ErrNil is the error returned if Marshal is called with nil.
 	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
 )
 
 // The fundamental encoders that put bytes on the wire.
@@ -74,6 +82,10 @@ var (
 
 const maxVarintBytes = 10 // maximum length of a varint
 
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
 // EncodeVarint returns the varint encoding of x.
 // This is the format for the
 // int32, int64, uint32, uint64, bool, and enum
@@ -273,6 +285,9 @@ func (p *Buffer) Marshal(pb Message) error {
 		stats.Encode++
 	}
 
+	if len(p.buf) > maxMarshalSize {
+		return ErrTooLarge
+	}
 	return err
 }
 
@@ -1058,10 +1073,25 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
 
 // Encode an extension map.
 func (o *Buffer) enc_map(p *Properties, base structPointer) error {
-	v := *structPointer_ExtMap(base, p.field)
-	if err := encodeExtensionMap(v); err != nil {
+	exts := structPointer_ExtMap(base, p.field)
+	if err := encodeExtensionsMap(*exts); err != nil {
+		return err
+	}
+
+	return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+	exts := structPointer_Extensions(base, p.field)
+	if err := encodeExtensions(exts); err != nil {
 		return err
 	}
+	v, _ := exts.extensionsRead()
+
+	return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
 	// Fast-path for common cases: zero or one extensions.
 	if len(v) <= 1 {
 		for _, e := range v {
@@ -1084,8 +1114,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
 }
 
 func size_map(p *Properties, base structPointer) int {
-	v := *structPointer_ExtMap(base, p.field)
-	return sizeExtensionMap(v)
+	v := structPointer_ExtMap(base, p.field)
+	return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+	v := structPointer_Extensions(base, p.field)
+	return extensionsSize(v)
 }
 
 // Encode a map field.
@@ -1114,7 +1149,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
 		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
 			return err
 		}
-		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
 			return err
 		}
 		return nil
@@ -1124,11 +1159,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
 	for _, key := range v.MapKeys() {
 		val := v.MapIndex(key)
 
-		// The only illegal map entry values are nil message pointers.
-		if val.Kind() == reflect.Ptr && val.IsNil() {
-			return errors.New("proto: map has nil element")
-		}
-
 		keycopy.Set(key)
 		valcopy.Set(val)
 
@@ -1216,13 +1246,18 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
 					return err
 				}
 			}
+			if len(o.buf) > maxMarshalSize {
+				return ErrTooLarge
+			}
 		}
 	}
 
 	// Do oneof fields.
 	if prop.oneofMarshaler != nil {
 		m := structPointer_Interface(base, prop.stype).(Message)
-		if err := prop.oneofMarshaler(m, o); err != nil {
+		if err := prop.oneofMarshaler(m, o); err == ErrNil {
+			return errOneofHasNil
+		} else if err != nil {
 			return err
 		}
 	}
@@ -1230,6 +1265,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
 	// Add unrecognized fields at the end.
 	if prop.unrecField.IsValid() {
 		v := *structPointer_Bytes(base, prop.unrecField)
+		if len(o.buf)+len(v) > maxMarshalSize {
+			return ErrTooLarge
+		}
 		if len(v) > 0 {
 			o.buf = append(o.buf, v...)
 		}

+ 3 - 3
vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go

@@ -1,7 +1,7 @@
-// Extensions for Protocol Buffers to create more go like structures.
+// Protocol Buffers for Go with Gadgets
 //
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Go support for Protocol Buffers - Google's data interchange format
 //

+ 23 - 3
vendor/src/github.com/gogo/protobuf/proto/equal.go

@@ -121,9 +121,16 @@ func equalStruct(v1, v2 reflect.Value) bool {
 		}
 	}
 
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
 	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
 		em2 := v2.FieldByName("XXX_extensions")
-		if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
 			return false
 		}
 	}
@@ -184,6 +191,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
 		}
 		return true
 	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
 		return equalAny(v1.Elem(), v2.Elem(), prop)
 	case reflect.Slice:
 		if v1.Type().Elem().Kind() == reflect.Uint8 {
@@ -223,8 +237,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
 }
 
 // base is the struct type that the extensions are based on.
-// em1 and em2 are extension maps.
-func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
 	if len(em1) != len(em2) {
 		return false
 	}

+ 290 - 119
vendor/src/github.com/gogo/protobuf/proto/extensions.go

@@ -52,23 +52,112 @@ type ExtensionRange struct {
 	Start, End int32 // both inclusive
 }
 
-// extendableProto is an interface implemented by any protocol buffer that may be extended.
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
 type extendableProto interface {
 	Message
 	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
 }
 
-type extensionsMap interface {
-	extendableProto
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
 	ExtensionMap() map[int32]Extension
 }
 
 type extensionsBytes interface {
-	extendableProto
+	Message
+	ExtensionRangeArray() []ExtensionRange
 	GetExtensions() *[]byte
 }
 
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+	if ep, ok := p.(extendableProto); ok {
+		return ep, ok
+	}
+	if ep, ok := p.(extendableProtoV1); ok {
+		return extensionAdapter{ep}, ok
+	}
+	return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+type extensionRange interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+}
+
 var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
+var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
 
 // ExtensionDesc represents an extension specification.
 // Used in generated code from the protocol compiler.
@@ -101,20 +190,23 @@ type Extension struct {
 }
 
 // SetRawExtension is for testing only.
-func SetRawExtension(base extendableProto, id int32, b []byte) {
-	if ebase, ok := base.(extensionsMap); ok {
-		ebase.ExtensionMap()[id] = Extension{enc: b}
-	} else if ebase, ok := base.(extensionsBytes); ok {
+func SetRawExtension(base Message, id int32, b []byte) {
+	if ebase, ok := base.(extensionsBytes); ok {
 		clearExtension(base, id)
 		ext := ebase.GetExtensions()
 		*ext = append(*ext, b...)
-	} else {
-		panic("unreachable")
+		return
+	}
+	epb, ok := extendable(base)
+	if !ok {
+		return
 	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
 }
 
 // isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
+func isExtensionField(pb extensionRange, field int32) bool {
 	for _, er := range pb.ExtensionRangeArray() {
 		if er.Start <= field && field <= er.End {
 			return true
@@ -125,8 +217,12 @@ func isExtensionField(pb extendableProto, field int32) bool {
 
 // checkExtensionTypes checks that the given extension is valid for pb.
 func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
 	// Check the extended type.
-	if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
 		return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
 	}
 	// Check the range.
@@ -172,43 +268,57 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
 	return prop
 }
 
-// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
-func encodeExtensionMap(m map[int32]Extension) error {
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return nil // fast path
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
 	for k, e := range m {
-		err := encodeExtension(&e)
-		if err != nil {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		et := reflect.TypeOf(e.desc.ExtensionType)
+		props := extensionProperties(e.desc)
+
+		p := NewBuffer(nil)
+		// If e.value has type T, the encoder expects a *struct{ X T }.
+		// Pass a *T with a zero field and hope it all works out.
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(e.value))
+		if err := props.enc(p, props, toStructPointer(x)); err != nil {
 			return err
 		}
+		e.enc = p.buf
 		m[k] = e
 	}
 	return nil
 }
 
-func encodeExtension(e *Extension) error {
-	if e.value == nil || e.desc == nil {
-		// Extension is only in its encoded form.
-		return nil
-	}
-	// We don't skip extensions that have an encoded form set,
-	// because the extension value may have been mutated after
-	// the last time this function was called.
-
-	et := reflect.TypeOf(e.desc.ExtensionType)
-	props := extensionProperties(e.desc)
-
-	p := NewBuffer(nil)
-	// If e.value has type T, the encoder expects a *struct{ X T }.
-	// Pass a *T with a zero field and hope it all works out.
-	x := reflect.New(et)
-	x.Elem().Set(reflect.ValueOf(e.value))
-	if err := props.enc(p, props, toStructPointer(x)); err != nil {
-		return err
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return 0
 	}
-	e.enc = p.buf
-	return nil
+	mu.Lock()
+	defer mu.Unlock()
+	return extensionsMapSize(m)
 }
 
-func sizeExtensionMap(m map[int32]Extension) (n int) {
+func extensionsMapSize(m map[int32]Extension) (n int) {
 	for _, e := range m {
 		if e.value == nil || e.desc == nil {
 			// Extension is only in its encoded form.
@@ -233,12 +343,8 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
 }
 
 // HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
-	// TODO: Check types, field numbers, etc.?
-	if epb, doki := pb.(extensionsMap); doki {
-		_, ok := epb.ExtensionMap()[extension.Field]
-		return ok
-	} else if epb, doki := pb.(extensionsBytes); doki {
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	if epb, doki := pb.(extensionsBytes); doki {
 		ext := epb.GetExtensions()
 		buf := *ext
 		o := 0
@@ -258,7 +364,19 @@ func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
 		}
 		return false
 	}
-	panic("unreachable")
+	// TODO: Check types, field numbers, etc.?
+	epb, ok := extendable(pb)
+	if !ok {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok = extmap[extension.Field]
+	mu.Unlock()
+	return ok
 }
 
 func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
@@ -281,64 +399,32 @@ func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
 	return -1
 }
 
-func clearExtension(pb extendableProto, fieldNum int32) {
-	if epb, doki := pb.(extensionsMap); doki {
-		delete(epb.ExtensionMap(), fieldNum)
-	} else if epb, doki := pb.(extensionsBytes); doki {
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+	if epb, doki := pb.(extensionsBytes); doki {
 		offset := 0
 		for offset != -1 {
 			offset = deleteExtension(epb, fieldNum, offset)
 		}
-	} else {
-		panic("unreachable")
+		return
+	}
+	epb, ok := extendable(pb)
+	if !ok {
+		return
 	}
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
 	// TODO: Check types, field numbers, etc.?
-	clearExtension(pb, extension.Field)
+	extmap := epb.extensionsWrite()
+	delete(extmap, fieldNum)
 }
 
 // GetExtension parses and returns the given extension of pb.
-// If the extension is not present it returns ErrMissingExtension.
-func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
-	if err := checkExtensionTypes(pb, extension); err != nil {
-		return nil, err
-	}
-
-	if epb, doki := pb.(extensionsMap); doki {
-		emap := epb.ExtensionMap()
-		e, ok := emap[extension.Field]
-		if !ok {
-			// defaultExtensionValue returns the default value or
-			// ErrMissingExtension if there is no default.
-			return defaultExtensionValue(extension)
-		}
-		if e.value != nil {
-			// Already decoded. Check the descriptor, though.
-			if e.desc != extension {
-				// This shouldn't happen. If it does, it means that
-				// GetExtension was called twice with two different
-				// descriptors with the same field number.
-				return nil, errors.New("proto: descriptor conflict")
-			}
-			return e.value, nil
-		}
-
-		v, err := decodeExtension(e.enc, extension)
-		if err != nil {
-			return nil, err
-		}
-
-		// Remember the decoded version and drop the encoded version.
-		// That way it is safe to mutate what we return.
-		e.value = v
-		e.desc = extension
-		e.enc = nil
-		emap[extension.Field] = e
-		return e.value, nil
-	} else if epb, doki := pb.(extensionsBytes); doki {
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	if epb, doki := pb.(extensionsBytes); doki {
 		ext := epb.GetExtensions()
 		o := 0
 		for o < len(*ext) {
@@ -360,7 +446,50 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
 		}
 		return defaultExtensionValue(extension)
 	}
-	panic("unreachable")
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return nil, err
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
 }
 
 // defaultExtensionValue returns the default value for extension.
@@ -434,14 +563,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
 // GetExtensions returns a slice of the extensions present in pb that are also listed in es.
 // The returned slice has the same length as es; missing extensions will appear as nil elements.
 func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
-	epb, ok := pb.(extendableProto)
-	if !ok {
-		err = errors.New("proto: not an extendable proto")
-		return
-	}
 	extensions = make([]interface{}, len(es))
 	for i, e := range es {
-		extensions[i], err = GetExtension(epb, e)
+		extensions[i], err = GetExtension(pb, e)
 		if err == ErrMissingExtension {
 			err = nil
 		}
@@ -452,9 +576,55 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
 	return
 }
 
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
 // SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
-	if err := checkExtensionTypes(pb, extension); err != nil {
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ClearExtension(pb, extension)
+		ext := epb.GetExtensions()
+		et := reflect.TypeOf(extension.ExtensionType)
+		props := extensionProperties(extension)
+		p := NewBuffer(nil)
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(value))
+		if err := props.enc(p, props, toStructPointer(x)); err != nil {
+			return err
+		}
+		*ext = append(*ext, p.buf...)
+		return nil
+	}
+	epb, ok := extendable(pb)
+	if !ok {
+		return errors.New("proto: not an extendable proto")
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
 		return err
 	}
 	typ := reflect.TypeOf(extension.ExtensionType)
@@ -469,26 +639,27 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
 	if reflect.ValueOf(value).IsNil() {
 		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
 	}
-	return setExtension(pb, extension, value)
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
 }
 
-func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
-	if epb, doki := pb.(extensionsMap); doki {
-		epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
-	} else if epb, doki := pb.(extensionsBytes); doki {
-		ClearExtension(pb, extension)
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	if epb, doki := pb.(extensionsBytes); doki {
 		ext := epb.GetExtensions()
-		et := reflect.TypeOf(extension.ExtensionType)
-		props := extensionProperties(extension)
-		p := NewBuffer(nil)
-		x := reflect.New(et)
-		x.Elem().Set(reflect.ValueOf(value))
-		if err := props.enc(p, props, toStructPointer(x)); err != nil {
-			return err
-		}
-		*ext = append(*ext, p.buf...)
+		*ext = []byte{}
+		return
+	}
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
 	}
-	return nil
 }
 
 // A global registry of extensions.

+ 73 - 15
vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
@@ -33,9 +35,10 @@ import (
 	"reflect"
 	"sort"
 	"strings"
+	"sync"
 )
 
-func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
 	if reflect.ValueOf(pb).IsNil() {
 		return ifnotset
 	}
@@ -60,8 +63,12 @@ func (this *Extension) Compare(that *Extension) int {
 	return bytes.Compare(this.enc, that.enc)
 }
 
+func SizeOfInternalExtension(m extendableProto) (n int) {
+	return SizeOfExtensionMap(m.extensionsWrite())
+}
+
 func SizeOfExtensionMap(m map[int32]Extension) (n int) {
-	return sizeExtensionMap(m)
+	return extensionsMapSize(m)
 }
 
 type sortableMapElem struct {
@@ -94,6 +101,10 @@ func (this sortableExtensions) String() string {
 	return "map[" + strings.Join(ss, ",") + "]"
 }
 
+func StringFromInternalExtension(m extendableProto) string {
+	return StringFromExtensionsMap(m.extensionsWrite())
+}
+
 func StringFromExtensionsMap(m map[int32]Extension) string {
 	return newSortableExtensionsFromMap(m).String()
 }
@@ -106,8 +117,12 @@ func StringFromExtensionsBytes(ext []byte) string {
 	return StringFromExtensionsMap(m)
 }
 
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
 func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
-	if err := encodeExtensionMap(m); err != nil {
+	if err := encodeExtensionsMap(m); err != nil {
 		return 0, err
 	}
 	keys := make([]int, 0, len(m))
@@ -125,7 +140,7 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
 	if m[id].value == nil || m[id].desc == nil {
 		return m[id].enc, nil
 	}
-	if err := encodeExtensionMap(m); err != nil {
+	if err := encodeExtensionsMap(m); err != nil {
 		return nil, err
 	}
 	return m[id].enc, nil
@@ -189,17 +204,44 @@ func NewExtension(e []byte) Extension {
 	return ee
 }
 
-func AppendExtension(e extendableProto, tag int32, buf []byte) {
-	if ee, eok := e.(extensionsMap); eok {
-		ext := ee.ExtensionMap()[int32(tag)] // may be missing
-		ext.enc = append(ext.enc, buf...)
-		ee.ExtensionMap()[int32(tag)] = ext
-	} else if ee, eok := e.(extensionsBytes); eok {
+func AppendExtension(e Message, tag int32, buf []byte) {
+	if ee, eok := e.(extensionsBytes); eok {
 		ext := ee.GetExtensions()
 		*ext = append(*ext, buf...)
+		return
+	}
+	if ee, eok := e.(extendableProto); eok {
+		m := ee.extensionsWrite()
+		ext := m[int32(tag)] // may be missing
+		ext.enc = append(ext.enc, buf...)
+		m[int32(tag)] = ext
 	}
 }
 
+func encodeExtension(e *Extension) error {
+	if e.value == nil || e.desc == nil {
+		// Extension is only in its encoded form.
+		return nil
+	}
+	// We don't skip extensions that have an encoded form set,
+	// because the extension value may have been mutated after
+	// the last time this function was called.
+
+	et := reflect.TypeOf(e.desc.ExtensionType)
+	props := extensionProperties(e.desc)
+
+	p := NewBuffer(nil)
+	// If e.value has type T, the encoder expects a *struct{ X T }.
+	// Pass a *T with a zero field and hope it all works out.
+	x := reflect.New(et)
+	x.Elem().Set(reflect.ValueOf(e.value))
+	if err := props.enc(p, props, toStructPointer(x)); err != nil {
+		return err
+	}
+	e.enc = p.buf
+	return nil
+}
+
 func (this Extension) GoString() string {
 	if this.enc == nil {
 		if err := encodeExtension(&this); err != nil {
@@ -209,7 +251,7 @@ func (this Extension) GoString() string {
 	return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
 }
 
-func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
 	typ := reflect.TypeOf(pb).Elem()
 	ext, ok := extensionMaps[typ]
 	if !ok {
@@ -219,10 +261,10 @@ func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) e
 	if !ok {
 		return errors.New("proto: bad extension number; not in declared ranges")
 	}
-	return setExtension(pb, desc, value)
+	return SetExtension(pb, desc, value)
 }
 
-func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
 	typ := reflect.TypeOf(pb).Elem()
 	ext, ok := extensionMaps[typ]
 	if !ok {
@@ -234,3 +276,19 @@ func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error)
 	}
 	return GetExtension(pb, desc)
 }
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+	x := &XXX_InternalExtensions{
+		p: new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		}),
+	}
+	x.p.extensionMap = m
+	return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+	pb := extendable.(extendableProto)
+	return pb.extensionsWrite()
+}

+ 4 - 0
vendor/src/github.com/gogo/protobuf/proto/lib.go

@@ -889,6 +889,10 @@ func isProto3Zero(v reflect.Value) bool {
 	return false
 }
 
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion2 = true
+
 // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
 // to assert that that code is compatible with this version of the proto package.
 const GoGoProtoPackageIsVersion1 = true

+ 4 - 2
vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 37 - 6
vendor/src/github.com/gogo/protobuf/proto/message_set.go

@@ -149,9 +149,21 @@ func skipVarint(buf []byte) []byte {
 
 // MarshalMessageSet encodes the extension map represented by m in the message set wire format.
 // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
-	if err := encodeExtensionMap(m); err != nil {
-		return nil, err
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		if err := encodeExtensions(exts); err != nil {
+			return nil, err
+		}
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		if err := encodeExtensionsMap(exts); err != nil {
+			return nil, err
+		}
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
 	}
 
 	// Sort extension IDs to provide a deterministic encoding.
@@ -178,7 +190,17 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
 
 // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
 // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
 	ms := new(messageSet)
 	if err := Unmarshal(buf, ms); err != nil {
 		return err
@@ -209,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
 
 // MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
 // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
 	var b bytes.Buffer
 	b.WriteByte('{')
 
@@ -252,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
 
 // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
 // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
 	// Common-case fast path.
 	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
 		return nil

+ 6 - 1
vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go

@@ -29,7 +29,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// +build appengine
+// +build appengine js
 
 // This file contains an implementation of proto field accesses using package reflect.
 // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
 	return structPointer_ifield(p, f).(*[]string)
 }
 
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
 // ExtMap returns the address of an extension map field in the struct.
 func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
 	return structPointer_ifield(p, f).(*map[int32]Extension)

+ 5 - 1
vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go

@@ -29,7 +29,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// +build !appengine
+// +build !appengine,!js
 
 // This file contains the implementation of the proto field accesses using package unsafe.
 
@@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
 }
 
 // ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
 func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
 	return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
 }

+ 9 - 10
vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
@@ -70,16 +72,13 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
 
 func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
 	size := typ.Elem().Size()
+
 	oldHeader := structPointer_GetSliceHeader(base, f)
+	oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem()
 	newLen := oldHeader.Len + 1
-	slice := reflect.MakeSlice(typ, newLen, newLen)
-	bas := toStructPointer(slice)
-	for i := 0; i < oldHeader.Len; i++ {
-		newElemptr := uintptr(bas) + uintptr(i)*size
-		oldElemptr := oldHeader.Data + uintptr(i)*size
-		copyUintPtr(oldElemptr, newElemptr, int(size))
-	}
-
+	newSlice := reflect.MakeSlice(typ, newLen, newLen)
+	reflect.Copy(newSlice, oldSlice)
+	bas := toStructPointer(newSlice)
 	oldHeader.Data = uintptr(bas)
 	oldHeader.Len = newLen
 	oldHeader.Cap = newLen

+ 35 - 18
vendor/src/github.com/gogo/protobuf/proto/properties.go

@@ -1,7 +1,7 @@
-// Extensions for Protocol Buffers to create more go like structures.
+// Protocol Buffers for Go with Gadgets
 //
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Go support for Protocol Buffers - Google's data interchange format
 //
@@ -542,17 +542,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
 			p.dec = (*Buffer).dec_slice_int64
 			p.packedDec = (*Buffer).dec_slice_packed_int64
 		case reflect.Uint8:
-			p.enc = (*Buffer).enc_slice_byte
 			p.dec = (*Buffer).dec_slice_byte
-			p.size = size_slice_byte
-			// This is a []byte, which is either a bytes field,
-			// or the value of a map field. In the latter case,
-			// we always encode an empty []byte, so we should not
-			// use the proto3 enc/size funcs.
-			// f == nil iff this is the key/value of a map field.
-			if p.proto3 && f != nil {
+			if p.proto3 {
 				p.enc = (*Buffer).enc_proto3_slice_byte
 				p.size = size_proto3_slice_byte
+			} else {
+				p.enc = (*Buffer).enc_slice_byte
+				p.size = size_slice_byte
 			}
 		case reflect.Float32, reflect.Float64:
 			switch t2.Bits() {
@@ -744,7 +740,9 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 	propertiesMap[t] = prop
 
 	// build properties
-	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+		reflect.PtrTo(t).Implements(extendableProtoV1Type) ||
+		reflect.PtrTo(t).Implements(extendableBytesType)
 	prop.unrecField = invalidField
 	prop.Prop = make([]*Properties, t.NumField())
 	prop.order = make([]int, t.NumField())
@@ -756,7 +754,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 		name := f.Name
 		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
 
-		if f.Name == "XXX_extensions" { // special case
+		if f.Name == "XXX_InternalExtensions" { // special case
+			p.enc = (*Buffer).enc_exts
+			p.dec = nil // not needed
+			p.size = size_exts
+		} else if f.Name == "XXX_extensions" { // special case
 			if len(f.Tag.Get("protobuf")) > 0 {
 				p.enc = (*Buffer).enc_ext_slice_byte
 				p.dec = nil // not needed
@@ -766,13 +768,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 				p.dec = nil // not needed
 				p.size = size_map
 			}
-		}
-		if f.Name == "XXX_unrecognized" { // special case
+		} else if f.Name == "XXX_unrecognized" { // special case
 			prop.unrecField = toField(&f)
 		}
-		oneof := f.Tag.Get("protobuf_oneof") != "" // special case
-		if oneof {
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
 			isOneofMessage = true
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
 		}
 		prop.Prop[i] = p
 		prop.order[i] = i
@@ -783,7 +786,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 			}
 			print("\n")
 		}
-		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
 			fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
 		}
 	}
@@ -921,3 +924,17 @@ func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
 
 // MessageType returns the message type (pointer to struct) for a named message.
 func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }

+ 4 - 2
vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 4 - 2
vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 52 - 30
vendor/src/github.com/gogo/protobuf/proto/text.go

@@ -1,7 +1,7 @@
-// Extensions for Protocol Buffers to create more go like structures.
+// Protocol Buffers for Go with Gadgets
 //
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Go support for Protocol Buffers - Google's data interchange format
 //
@@ -50,6 +50,7 @@ import (
 	"reflect"
 	"sort"
 	"strings"
+	"sync"
 )
 
 var (
@@ -159,7 +160,7 @@ func (w *textWriter) indent() { w.ind++ }
 
 func (w *textWriter) unindent() {
 	if w.ind == 0 {
-		log.Printf("proto: textWriter unindented too far")
+		log.Print("proto: textWriter unindented too far")
 		return
 	}
 	w.ind--
@@ -335,7 +336,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 				}
 				inner := fv.Elem().Elem() // interface -> *T -> T
 				tag := inner.Type().Field(0).Tag.Get("protobuf")
-				props.Parse(tag) // Overwrite the outer props.
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
 				// Write the value in the oneof, not the oneof itself.
 				fv = inner.Field(0)
 
@@ -386,7 +388,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
 		pv = reflect.New(sv.Type())
 		pv.Elem().Set(sv)
 	}
-	if pv.Type().Implements(extendableProtoType) {
+	if pv.Type().Implements(extensionRangeType) {
 		if err := writeExtensions(w, pv); err != nil {
 			return err
 		}
@@ -634,28 +636,37 @@ func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
 // pv is assumed to be a pointer to a protocol message struct that is extendable.
 func writeExtensions(w *textWriter, pv reflect.Value) error {
 	emap := extensionMaps[pv.Type().Elem()]
-	ep := pv.Interface().(extendableProto)
+	e := pv.Interface().(Message)
 
-	// Order the extensions by ID.
-	// This isn't strictly necessary, but it will give us
-	// canonical output, which will also make testing easier.
 	var m map[int32]Extension
-	if em, ok := ep.(extensionsMap); ok {
-		m = em.ExtensionMap()
-	} else if em, ok := ep.(extensionsBytes); ok {
+	var mu sync.Locker
+	if em, ok := e.(extensionsBytes); ok {
 		eb := em.GetExtensions()
 		var err error
 		m, err = BytesToExtensionsMap(*eb)
 		if err != nil {
 			return err
 		}
+		mu = notLocker{}
+	} else if _, ok := e.(extendableProto); ok {
+		ep, _ := extendable(e)
+		m, mu = ep.extensionsRead()
+		if m == nil {
+			return nil
+		}
 	}
 
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+
+	mu.Lock()
 	ids := make([]int32, 0, len(m))
 	for id := range m {
 		ids = append(ids, id)
 	}
 	sort.Sort(int32Slice(ids))
+	mu.Unlock()
 
 	for _, extNum := range ids {
 		ext := m[extNum]
@@ -671,7 +682,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
 			continue
 		}
 
-		pb, err := GetExtension(ep, desc)
+		pb, err := GetExtension(e, desc)
 		if err != nil {
 			return fmt.Errorf("failed getting extension: %v", err)
 		}
@@ -727,7 +738,14 @@ func (w *textWriter) writeIndent() {
 	w.complete = false
 }
 
-func marshalText(w io.Writer, pb Message, compact bool) error {
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact bool // use compact text format (one line).
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
 	val := reflect.ValueOf(pb)
 	if pb == nil || val.IsNil() {
 		w.Write([]byte("<nil>"))
@@ -742,7 +760,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
 	aw := &textWriter{
 		w:        ww,
 		complete: true,
-		compact:  compact,
+		compact:  m.Compact,
 	}
 
 	if tm, ok := pb.(encoding.TextMarshaler); ok {
@@ -769,25 +787,29 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
 	return nil
 }
 
+// Text is the same as Marshal, but returns the string directly.
+func (m *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	m.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
 // MarshalText writes a given protocol buffer in text format.
 // The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error {
-	return marshalText(w, pb, false)
-}
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
 
 // MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string {
-	var buf bytes.Buffer
-	marshalText(&buf, pb, false)
-	return buf.String()
-}
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
 
 // CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
 
 // CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string {
-	var buf bytes.Buffer
-	marshalText(&buf, pb, true)
-	return buf.String()
-}
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

+ 4 - 2
vendor/src/github.com/gogo/protobuf/proto/text_gogo.go

@@ -1,5 +1,7 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are

+ 42 - 33
vendor/src/github.com/gogo/protobuf/proto/text_parser.go

@@ -1,7 +1,7 @@
-// Extensions for Protocol Buffers to create more go like structures.
+// Protocol Buffers for Go with Gadgets
 //
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
-// http://github.com/gogo/protobuf/gogoproto
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
 //
 // Go support for Protocol Buffers - Google's data interchange format
 //
@@ -519,7 +519,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 				}
 				reqFieldErr = err
 			}
-			ep := sv.Addr().Interface().(extendableProto)
+			ep := sv.Addr().Interface().(Message)
 			if !rep {
 				SetExtension(ep, desc, ext.Interface())
 			} else {
@@ -571,8 +571,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 
 			// The map entry should be this sequence of tokens:
 			//	< key : KEY value : VALUE >
-			// Technically the "key" and "value" could come in any order,
-			// but in practice they won't.
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
 
 			tok := p.next()
 			var terminator string
@@ -584,32 +585,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 			default:
 				return p.errorf("expected '{' or '<', found %q", tok.value)
 			}
-			if err := p.consumeToken("key"); err != nil {
-				return err
-			}
-			if err := p.consumeToken(":"); err != nil {
-				return err
-			}
-			if err := p.readAny(key, props.mkeyprop); err != nil {
-				return err
-			}
-			if err := p.consumeOptionalSeparator(); err != nil {
-				return err
-			}
-			if err := p.consumeToken("value"); err != nil {
-				return err
-			}
-			if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
-				return err
-			}
-			if err := p.readAny(val, props.mvalprop); err != nil {
-				return err
-			}
-			if err := p.consumeOptionalSeparator(); err != nil {
-				return err
-			}
-			if err := p.consumeToken(terminator); err != nil {
-				return err
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.mkeyprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.mvalprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
 			}
 
 			dst.SetMapIndex(key, val)
@@ -632,7 +640,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 				return err
 			}
 			reqFieldErr = err
-		} else if props.Required {
+		}
+		if props.Required {
 			reqCount--
 		}
 

+ 3 - 0
vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile

@@ -30,4 +30,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
 	protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
+	go install github.com/gogo/protobuf/protoc-gen-gostring
+	protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto

+ 173 - 209
vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go

@@ -41,7 +41,9 @@ var _ = math.Inf
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type FieldDescriptorProto_Type int32
 
@@ -955,9 +957,9 @@ type FileOptions struct {
 	// suffixed package.
 	JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *FileOptions) Reset()                    { *m = FileOptions{} }
@@ -966,18 +968,12 @@ func (*FileOptions) ProtoMessage()               {}
 func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} }
 
 var extRange_FileOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_FileOptions
 }
-func (m *FileOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_FileOptions_JavaMultipleFiles bool = false
 const Default_FileOptions_JavaGenerateEqualsAndHash bool = false
@@ -1153,9 +1149,9 @@ type MessageOptions struct {
 	// parser.
 	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *MessageOptions) Reset()                    { *m = MessageOptions{} }
@@ -1164,18 +1160,12 @@ func (*MessageOptions) ProtoMessage()               {}
 func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} }
 
 var extRange_MessageOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_MessageOptions
 }
-func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_MessageOptions_MessageSetWireFormat bool = false
 const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
@@ -1275,9 +1265,9 @@ type FieldOptions struct {
 	// For Google-internal migration only. Do not use.
 	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *FieldOptions) Reset()                    { *m = FieldOptions{} }
@@ -1286,18 +1276,12 @@ func (*FieldOptions) ProtoMessage()               {}
 func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} }
 
 var extRange_FieldOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_FieldOptions
 }
-func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
 const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
@@ -1364,9 +1348,9 @@ type EnumOptions struct {
 	// is a formalization for deprecating enums.
 	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *EnumOptions) Reset()                    { *m = EnumOptions{} }
@@ -1375,18 +1359,12 @@ func (*EnumOptions) ProtoMessage()               {}
 func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} }
 
 var extRange_EnumOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_EnumOptions
 }
-func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_EnumOptions_Deprecated bool = false
 
@@ -1418,9 +1396,9 @@ type EnumValueOptions struct {
 	// this is a formalization for deprecating enum values.
 	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *EnumValueOptions) Reset()                    { *m = EnumValueOptions{} }
@@ -1429,18 +1407,12 @@ func (*EnumValueOptions) ProtoMessage()               {}
 func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} }
 
 var extRange_EnumValueOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_EnumValueOptions
 }
-func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_EnumValueOptions_Deprecated bool = false
 
@@ -1465,9 +1437,9 @@ type ServiceOptions struct {
 	// this is a formalization for deprecating services.
 	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *ServiceOptions) Reset()                    { *m = ServiceOptions{} }
@@ -1476,18 +1448,12 @@ func (*ServiceOptions) ProtoMessage()               {}
 func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} }
 
 var extRange_ServiceOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_ServiceOptions
 }
-func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_ServiceOptions_Deprecated bool = false
 
@@ -1512,9 +1478,9 @@ type MethodOptions struct {
 	// this is a formalization for deprecating methods.
 	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption []*UninterpretedOption    `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_extensions      map[int32]proto.Extension `json:"-"`
-	XXX_unrecognized    []byte                    `json:"-"`
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
 }
 
 func (m *MethodOptions) Reset()                    { *m = MethodOptions{} }
@@ -1523,18 +1489,12 @@ func (*MethodOptions) ProtoMessage()               {}
 func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} }
 
 var extRange_MethodOptions = []proto.ExtensionRange{
-	{1000, 536870911},
+	{Start: 1000, End: 536870911},
 }
 
 func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_MethodOptions
 }
-func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension {
-	if m.XXX_extensions == nil {
-		m.XXX_extensions = make(map[int32]proto.Extension)
-	}
-	return m.XXX_extensions
-}
 
 const Default_MethodOptions_Deprecated bool = false
 
@@ -1875,143 +1835,147 @@ func init() {
 	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
 }
 
+func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) }
+
 var fileDescriptorDescriptor = []byte{
-	// 2192 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6,
-	0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7,
-	0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8,
-	0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5,
-	0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef,
-	0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24,
-	0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb,
-	0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5,
-	0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b,
-	0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a,
-	0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70,
-	0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4,
-	0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6,
-	0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b,
-	0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b,
-	0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b,
-	0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7,
-	0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9,
-	0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc,
-	0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed,
-	0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1,
-	0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57,
-	0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0,
-	0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83,
-	0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21,
-	0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a,
-	0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26,
-	0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4,
-	0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75,
-	0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12,
-	0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a,
-	0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72,
-	0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28,
-	0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44,
-	0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32,
-	0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83,
-	0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58,
-	0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b,
-	0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56,
-	0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12,
-	0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65,
-	0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8,
-	0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09,
-	0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e,
-	0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e,
-	0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b,
-	0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d,
-	0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a,
-	0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c,
-	0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66,
-	0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73,
-	0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83,
-	0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23,
-	0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2,
-	0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41,
-	0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3,
-	0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6,
-	0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f,
-	0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc,
-	0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29,
-	0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe,
-	0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde,
-	0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39,
-	0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2,
-	0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6,
-	0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e,
-	0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb,
-	0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d,
-	0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59,
-	0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce,
-	0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f,
-	0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a,
-	0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0,
-	0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e,
-	0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a,
-	0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c,
-	0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85,
-	0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7,
-	0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e,
-	0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a,
-	0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4,
-	0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19,
-	0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09,
-	0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d,
-	0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01,
-	0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34,
-	0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa,
-	0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89,
-	0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73,
-	0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c,
-	0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1,
-	0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79,
-	0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75,
-	0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38,
-	0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6,
-	0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9,
-	0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c,
-	0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33,
-	0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08,
-	0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6,
-	0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62,
-	0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c,
-	0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8,
-	0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5,
-	0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb,
-	0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85,
-	0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4,
-	0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1,
-	0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89,
-	0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe,
-	0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46,
-	0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3,
-	0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd,
-	0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6,
-	0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a,
-	0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe,
-	0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f,
-	0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31,
-	0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb,
-	0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7,
-	0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1,
-	0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea,
-	0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12,
-	0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00,
-	0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69,
-	0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68,
-	0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c,
-	0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a,
-	0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e,
-	0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35,
-	0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20,
-	0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a,
-	0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1,
-	0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35,
-	0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33,
-	0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19,
-	0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00,
+	// 2211 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xc6,
+	0x15, 0x0f, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0xad, 0x56, 0x8a, 0x03, 0xcb, 0x76, 0x2c, 0x33, 0x76,
+	0x2c, 0xdb, 0xad, 0x9c, 0x91, 0xff, 0x44, 0x51, 0x3a, 0xe9, 0x50, 0x24, 0xac, 0xd0, 0x43, 0x89,
+	0x2c, 0x28, 0xb6, 0x4e, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x0b, 0x14, 0x00, 0x6d, 0x2b,
+	0x27, 0xcf, 0xf4, 0xd4, 0x6f, 0xd0, 0x69, 0x3b, 0x3d, 0xe4, 0x92, 0x99, 0x7e, 0x80, 0x1e, 0x7a,
+	0xef, 0xb5, 0x87, 0x9e, 0x7b, 0xec, 0x4c, 0xfb, 0x0d, 0x7a, 0xed, 0xec, 0x2e, 0x00, 0x82, 0x7f,
+	0x14, 0xab, 0x99, 0x49, 0xd3, 0x93, 0xb4, 0xbf, 0xf7, 0x7b, 0x8f, 0x6f, 0xdf, 0xfe, 0xb0, 0xef,
+	0x01, 0x80, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xbf, 0xed, 0xf9, 0x6e, 0xe8, 0xe2, 0x95,
+	0xa1, 0xeb, 0x0e, 0x1d, 0x2a, 0x57, 0x27, 0xe3, 0x41, 0xed, 0x10, 0x56, 0x9f, 0xd8, 0x0e, 0x6d,
+	0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x2e, 0xe4, 0x06, 0xb6, 0x43, 0x55, 0x65, 0x33, 0xbb, 0x55, 0xde,
+	0xb9, 0xb9, 0x3d, 0xe3, 0xb4, 0x3d, 0xed, 0xd1, 0xe5, 0xb0, 0x2e, 0x3c, 0x6a, 0xff, 0xc8, 0xc1,
+	0xda, 0x02, 0x2b, 0xc6, 0x90, 0x63, 0x64, 0xc4, 0x23, 0x2a, 0x5b, 0x25, 0x5d, 0xfc, 0x8f, 0x55,
+	0x58, 0xf2, 0x88, 0xf9, 0x82, 0x0c, 0xa9, 0x9a, 0x11, 0x70, 0xbc, 0xc4, 0xef, 0x03, 0x58, 0xd4,
+	0xa3, 0xcc, 0xa2, 0xcc, 0x3c, 0x53, 0xb3, 0x9b, 0xd9, 0xad, 0x92, 0x9e, 0x42, 0xf0, 0x3d, 0x58,
+	0xf5, 0xc6, 0x27, 0x8e, 0x6d, 0x1a, 0x29, 0x1a, 0x6c, 0x66, 0xb7, 0xf2, 0x3a, 0x92, 0x86, 0xe6,
+	0x84, 0x7c, 0x1b, 0x56, 0x5e, 0x51, 0xf2, 0x22, 0x4d, 0x2d, 0x0b, 0x6a, 0x95, 0xc3, 0x29, 0x62,
+	0x03, 0x2a, 0x23, 0x1a, 0x04, 0x64, 0x48, 0x8d, 0xf0, 0xcc, 0xa3, 0x6a, 0x4e, 0xec, 0x7e, 0x73,
+	0x6e, 0xf7, 0xb3, 0x3b, 0x2f, 0x47, 0x5e, 0xc7, 0x67, 0x1e, 0xc5, 0x75, 0x28, 0x51, 0x36, 0x1e,
+	0xc9, 0x08, 0xf9, 0x73, 0xea, 0xa7, 0xb1, 0xf1, 0x68, 0x36, 0x4a, 0x91, 0xbb, 0x45, 0x21, 0x96,
+	0x02, 0xea, 0xbf, 0xb4, 0x4d, 0xaa, 0x16, 0x44, 0x80, 0xdb, 0x73, 0x01, 0x7a, 0xd2, 0x3e, 0x1b,
+	0x23, 0xf6, 0xc3, 0x0d, 0x28, 0xd1, 0xd7, 0x21, 0x65, 0x81, 0xed, 0x32, 0x75, 0x49, 0x04, 0xb9,
+	0xb5, 0xe0, 0x14, 0xa9, 0x63, 0xcd, 0x86, 0x98, 0xf8, 0xe1, 0xc7, 0xb0, 0xe4, 0x7a, 0xa1, 0xed,
+	0xb2, 0x40, 0x2d, 0x6e, 0x2a, 0x5b, 0xe5, 0x9d, 0xab, 0x0b, 0x85, 0xd0, 0x91, 0x1c, 0x3d, 0x26,
+	0xe3, 0x16, 0xa0, 0xc0, 0x1d, 0xfb, 0x26, 0x35, 0x4c, 0xd7, 0xa2, 0x86, 0xcd, 0x06, 0xae, 0x5a,
+	0x12, 0x01, 0xae, 0xcf, 0x6f, 0x44, 0x10, 0x1b, 0xae, 0x45, 0x5b, 0x6c, 0xe0, 0xea, 0xd5, 0x60,
+	0x6a, 0x8d, 0x2f, 0x41, 0x21, 0x38, 0x63, 0x21, 0x79, 0xad, 0x56, 0x84, 0x42, 0xa2, 0x55, 0xed,
+	0xdf, 0x79, 0x58, 0xb9, 0x88, 0xc4, 0x3e, 0x85, 0xfc, 0x80, 0xef, 0x52, 0xcd, 0xfc, 0x37, 0x35,
+	0x90, 0x3e, 0xd3, 0x45, 0x2c, 0x7c, 0xc7, 0x22, 0xd6, 0xa1, 0xcc, 0x68, 0x10, 0x52, 0x4b, 0x2a,
+	0x22, 0x7b, 0x41, 0x4d, 0x81, 0x74, 0x9a, 0x97, 0x54, 0xee, 0x3b, 0x49, 0xea, 0x19, 0xac, 0x24,
+	0x29, 0x19, 0x3e, 0x61, 0xc3, 0x58, 0x9b, 0xf7, 0xdf, 0x96, 0xc9, 0xb6, 0x16, 0xfb, 0xe9, 0xdc,
+	0x4d, 0xaf, 0xd2, 0xa9, 0x35, 0x6e, 0x02, 0xb8, 0x8c, 0xba, 0x03, 0xc3, 0xa2, 0xa6, 0xa3, 0x16,
+	0xcf, 0xa9, 0x52, 0x87, 0x53, 0xe6, 0xaa, 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0x27, 0x13, 0xa9, 0x2d,
+	0x9d, 0xa3, 0x94, 0x43, 0xf9, 0x90, 0xcd, 0xa9, 0xad, 0x0f, 0x55, 0x9f, 0x72, 0xdd, 0x53, 0x2b,
+	0xda, 0x59, 0x49, 0x24, 0xb1, 0xfd, 0xd6, 0x9d, 0xe9, 0x91, 0x9b, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e,
+	0xe2, 0x0f, 0x20, 0x01, 0x0c, 0x21, 0x2b, 0x10, 0xb7, 0x50, 0x25, 0x06, 0x8f, 0xc8, 0x88, 0x6e,
+	0xec, 0x42, 0x75, 0xba, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0xa8, 0x30, 0xaf, 0xcb,
+	0x05, 0x46, 0x90, 0xa5, 0xcc, 0x12, 0xb7, 0x5c, 0x5e, 0xe7, 0xff, 0x6e, 0x7c, 0x0c, 0xcb, 0x53,
+	0x3f, 0x7f, 0x51, 0xc7, 0xda, 0x6f, 0x0a, 0xb0, 0xbe, 0x48, 0x73, 0x0b, 0xe5, 0x7f, 0x09, 0x0a,
+	0x6c, 0x3c, 0x3a, 0xa1, 0xbe, 0x9a, 0x15, 0x11, 0xa2, 0x15, 0xae, 0x43, 0xde, 0x21, 0x27, 0xd4,
+	0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xde, 0x85, 0x54, 0xbd, 0xdd, 0xe6, 0x2e, 0xba, 0xf4,
+	0xc4, 0x9f, 0x41, 0x2e, 0xba, 0xe2, 0x78, 0x84, 0xbb, 0x17, 0x8b, 0xc0, 0xb5, 0xa8, 0x0b, 0x3f,
+	0x7c, 0x05, 0x4a, 0xfc, 0xaf, 0xac, 0x6d, 0x41, 0xe4, 0x5c, 0xe4, 0x00, 0xaf, 0x2b, 0xde, 0x80,
+	0xa2, 0x90, 0x99, 0x45, 0xe3, 0xd6, 0x90, 0xac, 0xf9, 0xc1, 0x58, 0x74, 0x40, 0xc6, 0x4e, 0x68,
+	0xbc, 0x24, 0xce, 0x98, 0x0a, 0xc1, 0x94, 0xf4, 0x4a, 0x04, 0xfe, 0x9c, 0x63, 0xf8, 0x3a, 0x94,
+	0xa5, 0x2a, 0x6d, 0x66, 0xd1, 0xd7, 0xe2, 0xf6, 0xc9, 0xeb, 0x52, 0xa8, 0x2d, 0x8e, 0xf0, 0x9f,
+	0x7f, 0x1e, 0xb8, 0x2c, 0x3e, 0x5a, 0xf1, 0x13, 0x1c, 0x10, 0x3f, 0xff, 0xf1, 0xec, 0xc5, 0x77,
+	0x6d, 0xf1, 0xf6, 0x66, 0xb5, 0x58, 0xfb, 0x53, 0x06, 0x72, 0xe2, 0x79, 0x5b, 0x81, 0xf2, 0xf1,
+	0x17, 0x5d, 0xcd, 0x68, 0x76, 0xfa, 0xfb, 0x6d, 0x0d, 0x29, 0xb8, 0x0a, 0x20, 0x80, 0x27, 0xed,
+	0x4e, 0xfd, 0x18, 0x65, 0x92, 0x75, 0xeb, 0xe8, 0xf8, 0xf1, 0x43, 0x94, 0x4d, 0x1c, 0xfa, 0x12,
+	0xc8, 0xa5, 0x09, 0x0f, 0x76, 0x50, 0x1e, 0x23, 0xa8, 0xc8, 0x00, 0xad, 0x67, 0x5a, 0xf3, 0xf1,
+	0x43, 0x54, 0x98, 0x46, 0x1e, 0xec, 0xa0, 0x25, 0xbc, 0x0c, 0x25, 0x81, 0xec, 0x77, 0x3a, 0x6d,
+	0x54, 0x4c, 0x62, 0xf6, 0x8e, 0xf5, 0xd6, 0xd1, 0x01, 0x2a, 0x25, 0x31, 0x0f, 0xf4, 0x4e, 0xbf,
+	0x8b, 0x20, 0x89, 0x70, 0xa8, 0xf5, 0x7a, 0xf5, 0x03, 0x0d, 0x95, 0x13, 0xc6, 0xfe, 0x17, 0xc7,
+	0x5a, 0x0f, 0x55, 0xa6, 0xd2, 0x7a, 0xb0, 0x83, 0x96, 0x93, 0x9f, 0xd0, 0x8e, 0xfa, 0x87, 0xa8,
+	0x8a, 0x57, 0x61, 0x59, 0xfe, 0x44, 0x9c, 0xc4, 0xca, 0x0c, 0xf4, 0xf8, 0x21, 0x42, 0x93, 0x44,
+	0x64, 0x94, 0xd5, 0x29, 0xe0, 0xf1, 0x43, 0x84, 0x6b, 0x0d, 0xc8, 0x0b, 0x75, 0x61, 0x0c, 0xd5,
+	0x76, 0x7d, 0x5f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, 0x73, 0x54, 0x6f, 0x23, 0x65, 0x82, 0xe9,
+	0xda, 0xcf, 0xfa, 0x2d, 0x5d, 0x6b, 0xa2, 0x4c, 0x1a, 0xeb, 0x6a, 0xf5, 0x63, 0xad, 0x89, 0xb2,
+	0xb5, 0xbb, 0xb0, 0xbe, 0xe8, 0x9e, 0x59, 0xf4, 0x64, 0xd4, 0xbe, 0x56, 0x60, 0x6d, 0xc1, 0x95,
+	0xb9, 0xf0, 0x29, 0xfa, 0x29, 0xe4, 0xa5, 0xd2, 0x64, 0x13, 0xb9, 0xb3, 0xf0, 0xee, 0x15, 0xba,
+	0x9b, 0x6b, 0x24, 0xc2, 0x2f, 0xdd, 0x48, 0xb3, 0xe7, 0x34, 0x52, 0x1e, 0x62, 0x4e, 0x4e, 0xbf,
+	0x52, 0x40, 0x3d, 0x2f, 0xf6, 0x5b, 0x9e, 0xf7, 0xcc, 0xd4, 0xf3, 0xfe, 0xe9, 0x6c, 0x02, 0x37,
+	0xce, 0xdf, 0xc3, 0x5c, 0x16, 0xdf, 0x28, 0x70, 0x69, 0xf1, 0xbc, 0xb1, 0x30, 0x87, 0xcf, 0xa0,
+	0x30, 0xa2, 0xe1, 0xa9, 0x1b, 0xf7, 0xdc, 0x0f, 0x17, 0xdc, 0xe4, 0xdc, 0x3c, 0x5b, 0xab, 0xc8,
+	0x2b, 0xdd, 0x0a, 0xb2, 0xe7, 0x0d, 0x0d, 0x32, 0x9b, 0xb9, 0x4c, 0x7f, 0x9d, 0x81, 0x77, 0x17,
+	0x06, 0x5f, 0x98, 0xe8, 0x35, 0x00, 0x9b, 0x79, 0xe3, 0x50, 0xf6, 0x55, 0x79, 0xcd, 0x94, 0x04,
+	0x22, 0x1e, 0x61, 0x7e, 0x85, 0x8c, 0xc3, 0xc4, 0x9e, 0x15, 0x76, 0x90, 0x90, 0x20, 0xec, 0x4e,
+	0x12, 0xcd, 0x89, 0x44, 0xdf, 0x3f, 0x67, 0xa7, 0x73, 0x2d, 0xeb, 0x23, 0x40, 0xa6, 0x63, 0x53,
+	0x16, 0x1a, 0x41, 0xe8, 0x53, 0x32, 0xb2, 0xd9, 0x50, 0xdc, 0xa3, 0xc5, 0xbd, 0xfc, 0x80, 0x38,
+	0x01, 0xd5, 0x57, 0xa4, 0xb9, 0x17, 0x5b, 0xb9, 0x87, 0x68, 0x16, 0x7e, 0xca, 0xa3, 0x30, 0xe5,
+	0x21, 0xcd, 0x89, 0x47, 0xed, 0x6f, 0x4b, 0x50, 0x4e, 0x4d, 0x67, 0xf8, 0x06, 0x54, 0x9e, 0x93,
+	0x97, 0xc4, 0x88, 0x27, 0x6e, 0x59, 0x89, 0x32, 0xc7, 0xba, 0xd1, 0xd4, 0xfd, 0x11, 0xac, 0x0b,
+	0x8a, 0x3b, 0x0e, 0xa9, 0x6f, 0x98, 0x0e, 0x09, 0x02, 0x51, 0xb4, 0xa2, 0xa0, 0x62, 0x6e, 0xeb,
+	0x70, 0x53, 0x23, 0xb6, 0xe0, 0x47, 0xb0, 0x26, 0x3c, 0x46, 0x63, 0x27, 0xb4, 0x3d, 0x87, 0x1a,
+	0xfc, 0x1d, 0x20, 0x10, 0xf7, 0x69, 0x92, 0xd9, 0x2a, 0x67, 0x1c, 0x46, 0x04, 0x9e, 0x51, 0x80,
+	0x0f, 0xe0, 0x9a, 0x70, 0x1b, 0x52, 0x46, 0x7d, 0x12, 0x52, 0x83, 0xfe, 0x72, 0x4c, 0x9c, 0xc0,
+	0x20, 0xcc, 0x32, 0x4e, 0x49, 0x70, 0xaa, 0xae, 0xa7, 0x03, 0x5c, 0xe6, 0xdc, 0x83, 0x88, 0xaa,
+	0x09, 0x66, 0x9d, 0x59, 0x9f, 0x93, 0xe0, 0x14, 0xef, 0xc1, 0x25, 0x11, 0x28, 0x08, 0x7d, 0x9b,
+	0x0d, 0x0d, 0xf3, 0x94, 0x9a, 0x2f, 0x8c, 0x71, 0x38, 0xd8, 0x55, 0xaf, 0xa4, 0x23, 0x88, 0x24,
+	0x7b, 0x82, 0xd3, 0xe0, 0x94, 0x7e, 0x38, 0xd8, 0xc5, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xd9, 0x5f,
+	0x51, 0x63, 0xe0, 0xfa, 0xa2, 0x47, 0x54, 0x17, 0x3c, 0xdc, 0xa9, 0x22, 0x6e, 0x77, 0x22, 0x87,
+	0x43, 0xd7, 0xa2, 0x7b, 0xf9, 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0x8e, 0xf2, 0xc4, 0xf5, 0xb9,
+	0xa6, 0x86, 0x6e, 0x52, 0xe3, 0xb2, 0xd4, 0xd4, 0xd0, 0x8d, 0x2b, 0xfc, 0x08, 0xd6, 0x4c, 0x53,
+	0x6e, 0xdb, 0x36, 0x8d, 0x68, 0x58, 0x0f, 0x54, 0x34, 0x55, 0x2f, 0xd3, 0x3c, 0x90, 0x84, 0x48,
+	0xe6, 0x01, 0xfe, 0x04, 0xde, 0x9d, 0xd4, 0x2b, 0xed, 0xb8, 0x3a, 0xb7, 0xcb, 0x59, 0xd7, 0x47,
+	0xb0, 0xe6, 0x9d, 0xcd, 0x3b, 0xe2, 0xa9, 0x5f, 0xf4, 0xce, 0x66, 0xdd, 0x6e, 0x89, 0x17, 0x30,
+	0x9f, 0x9a, 0x24, 0xa4, 0x96, 0xfa, 0x5e, 0x9a, 0x9d, 0x32, 0xe0, 0xfb, 0x80, 0x4c, 0xd3, 0xa0,
+	0x8c, 0x9c, 0x38, 0xd4, 0x20, 0x3e, 0x65, 0x24, 0x50, 0xaf, 0xa7, 0xc9, 0x55, 0xd3, 0xd4, 0x84,
+	0xb5, 0x2e, 0x8c, 0xf8, 0x2e, 0xac, 0xba, 0x27, 0xcf, 0x4d, 0x29, 0x2e, 0xc3, 0xf3, 0xe9, 0xc0,
+	0x7e, 0xad, 0xde, 0x14, 0x65, 0x5a, 0xe1, 0x06, 0x21, 0xad, 0xae, 0x80, 0xf1, 0x1d, 0x40, 0x66,
+	0x70, 0x4a, 0x7c, 0x4f, 0x34, 0xe9, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x49, 0xaa, 0xc4, 0x8f, 0x62,
+	0x18, 0x6b, 0x70, 0x9d, 0x6f, 0x9e, 0x11, 0xe6, 0x1a, 0xe3, 0x80, 0x1a, 0x93, 0x14, 0x93, 0xb3,
+	0xf8, 0x90, 0xa7, 0xa5, 0x5f, 0x8d, 0x69, 0xfd, 0x80, 0x36, 0x13, 0x52, 0x7c, 0x3c, 0xcf, 0x60,
+	0x7d, 0xcc, 0x6c, 0x16, 0x52, 0xdf, 0xf3, 0x29, 0x77, 0x96, 0x0f, 0xac, 0xfa, 0xcf, 0xa5, 0x73,
+	0x86, 0xee, 0x7e, 0x9a, 0x2d, 0x45, 0xa2, 0xaf, 0x8d, 0xe7, 0xc1, 0xda, 0x1e, 0x54, 0xd2, 0xda,
+	0xc1, 0x25, 0x90, 0xea, 0x41, 0x0a, 0xef, 0xa8, 0x8d, 0x4e, 0x93, 0xf7, 0xc2, 0x2f, 0x35, 0x94,
+	0xe1, 0x3d, 0xb9, 0xdd, 0x3a, 0xd6, 0x0c, 0xbd, 0x7f, 0x74, 0xdc, 0x3a, 0xd4, 0x50, 0xf6, 0x6e,
+	0xa9, 0xf8, 0xaf, 0x25, 0xf4, 0xe6, 0xcd, 0x9b, 0x37, 0x99, 0xda, 0x5f, 0x32, 0x50, 0x9d, 0x9e,
+	0x83, 0xf1, 0x4f, 0xe0, 0xbd, 0xf8, 0xa5, 0x35, 0xa0, 0xa1, 0xf1, 0xca, 0xf6, 0x85, 0x9c, 0x47,
+	0x44, 0x4e, 0x92, 0xc9, 0x49, 0xac, 0x47, 0xac, 0x1e, 0x0d, 0x7f, 0x61, 0xfb, 0x5c, 0xac, 0x23,
+	0x12, 0xe2, 0x36, 0x5c, 0x67, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xc9, 0xe7, 0x02,
+	0x83, 0x98, 0x26, 0x0d, 0x02, 0x57, 0x76, 0x92, 0x24, 0xca, 0x55, 0xe6, 0xf6, 0x22, 0xf2, 0xe4,
+	0x8a, 0xad, 0x47, 0xd4, 0x19, 0xd5, 0x64, 0xcf, 0x53, 0xcd, 0x15, 0x28, 0x8d, 0x88, 0x67, 0x50,
+	0x16, 0xfa, 0x67, 0x62, 0x7a, 0x2b, 0xea, 0xc5, 0x11, 0xf1, 0x34, 0xbe, 0xfe, 0xfe, 0xce, 0x20,
+	0x5d, 0xc7, 0xbf, 0x67, 0xa1, 0x92, 0x9e, 0xe0, 0xf8, 0x40, 0x6c, 0x8a, 0x6b, 0x5e, 0x11, 0xb7,
+	0xc0, 0x07, 0xdf, 0x3a, 0xef, 0x6d, 0x37, 0xf8, 0xfd, 0xbf, 0x57, 0x90, 0x73, 0x95, 0x2e, 0x3d,
+	0x79, 0xef, 0xe5, 0x5a, 0xa3, 0x72, 0x5a, 0x2f, 0xea, 0xd1, 0x0a, 0x1f, 0x40, 0xe1, 0x79, 0x20,
+	0x62, 0x17, 0x44, 0xec, 0x9b, 0xdf, 0x1e, 0xfb, 0x69, 0x4f, 0x04, 0x2f, 0x3d, 0xed, 0x19, 0x47,
+	0x1d, 0xfd, 0xb0, 0xde, 0xd6, 0x23, 0x77, 0x7c, 0x19, 0x72, 0x0e, 0xf9, 0xea, 0x6c, 0xba, 0x53,
+	0x08, 0xe8, 0xa2, 0x85, 0xbf, 0x0c, 0xb9, 0x57, 0x94, 0xbc, 0x98, 0xbe, 0x9f, 0x05, 0xf4, 0x3d,
+	0x4a, 0xff, 0x3e, 0xe4, 0x45, 0xbd, 0x30, 0x40, 0x54, 0x31, 0xf4, 0x0e, 0x2e, 0x42, 0xae, 0xd1,
+	0xd1, 0xb9, 0xfc, 0x11, 0x54, 0x24, 0x6a, 0x74, 0x5b, 0x5a, 0x43, 0x43, 0x99, 0xda, 0x23, 0x28,
+	0xc8, 0x22, 0xf0, 0x47, 0x23, 0x29, 0x03, 0x7a, 0x27, 0x5a, 0x46, 0x31, 0x94, 0xd8, 0xda, 0x3f,
+	0xdc, 0xd7, 0x74, 0x94, 0x49, 0x1f, 0xef, 0x9f, 0x15, 0x28, 0xa7, 0x06, 0x2a, 0xde, 0xca, 0x89,
+	0xe3, 0xb8, 0xaf, 0x0c, 0xe2, 0xd8, 0x24, 0x88, 0xce, 0x07, 0x04, 0x54, 0xe7, 0xc8, 0x45, 0xeb,
+	0xf7, 0x3f, 0xd1, 0xe6, 0x1f, 0x14, 0x40, 0xb3, 0xc3, 0xd8, 0x4c, 0x82, 0xca, 0x0f, 0x9a, 0xe0,
+	0xef, 0x15, 0xa8, 0x4e, 0x4f, 0x60, 0x33, 0xe9, 0xdd, 0xf8, 0x41, 0xd3, 0xfb, 0x9d, 0x02, 0xcb,
+	0x53, 0x73, 0xd7, 0xff, 0x55, 0x76, 0xbf, 0xcd, 0xc2, 0xda, 0x02, 0x3f, 0x5c, 0x8f, 0x06, 0x54,
+	0x39, 0x33, 0xff, 0xf8, 0x22, 0xbf, 0xb5, 0xcd, 0xfb, 0x5f, 0x97, 0xf8, 0x61, 0x34, 0xcf, 0xde,
+	0x01, 0x64, 0x5b, 0x94, 0x85, 0xf6, 0xc0, 0xa6, 0x7e, 0xf4, 0x6e, 0x2c, 0xa7, 0xd6, 0x95, 0x09,
+	0x2e, 0x5f, 0x8f, 0x7f, 0x04, 0xd8, 0x73, 0x03, 0x3b, 0xb4, 0x5f, 0x52, 0xc3, 0x66, 0xf1, 0x8b,
+	0x34, 0x9f, 0x62, 0x73, 0x3a, 0x8a, 0x2d, 0x2d, 0x16, 0x26, 0x6c, 0x46, 0x87, 0x64, 0x86, 0xcd,
+	0xaf, 0xa1, 0xac, 0x8e, 0x62, 0x4b, 0xc2, 0xbe, 0x01, 0x15, 0xcb, 0x1d, 0xf3, 0x81, 0x40, 0xf2,
+	0xf8, 0xad, 0xa7, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0x62, 0x9b, 0xbc, 0xc1, 0x57, 0xf4, 0xb2,
+	0xc4, 0x24, 0xe5, 0x36, 0xac, 0x90, 0xe1, 0xd0, 0xe7, 0xc1, 0xe3, 0x40, 0x72, 0x0c, 0xad, 0x26,
+	0xb0, 0x20, 0x6e, 0x3c, 0x85, 0x62, 0x5c, 0x07, 0xde, 0x58, 0x78, 0x25, 0x0c, 0x4f, 0x7e, 0x47,
+	0xc9, 0xf0, 0x97, 0x7a, 0x16, 0x1b, 0x6f, 0x40, 0xc5, 0x0e, 0x8c, 0xc9, 0x07, 0xbd, 0xcc, 0x66,
+	0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xf9, 0x82, 0x53, 0xfb, 0x26, 0x03, 0xd5, 0xe9, 0x0f, 0x92,
+	0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x10, 0x82, 0xfc, 0x1a, 0xbe, 0xf5, 0x96, 0x6f, 0x98, 0xdb,
+	0xed, 0x88, 0xaf, 0x27, 0x9e, 0x1b, 0x7f, 0x55, 0xa0, 0x18, 0xc3, 0xf8, 0x12, 0xe4, 0x3c, 0x12,
+	0x9e, 0x8a, 0x70, 0xf9, 0xfd, 0x0c, 0x52, 0x74, 0xb1, 0xe6, 0x78, 0xe0, 0x11, 0x26, 0x24, 0x10,
+	0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, 0x03, 0xae, 0x3b, 0x1a, 0x51, 0x16, 0x06, 0xf1,
+	0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x3d, 0x58, 0x0d, 0x7d, 0x62, 0x3b, 0x53, 0xdc, 0x9c, 0xe0,
+	0xa2, 0xd8, 0x90, 0x90, 0xf7, 0xe0, 0x72, 0x1c, 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x71,
+	0x2a, 0x88, 0xaf, 0x5d, 0xef, 0x45, 0x84, 0x66, 0x64, 0x8f, 0x7d, 0xf7, 0x9f, 0xc1, 0x9a, 0xe9,
+	0x8e, 0x66, 0x2b, 0xb1, 0x8f, 0x66, 0xde, 0xbb, 0x82, 0xcf, 0x95, 0x2f, 0x61, 0x32, 0x54, 0x7c,
+	0x9d, 0xc9, 0x1e, 0x74, 0xf7, 0xff, 0x98, 0xd9, 0x38, 0x90, 0x7e, 0xdd, 0xb8, 0x82, 0x3a, 0x1d,
+	0x38, 0xd4, 0xe4, 0xd5, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3,
+	0x18, 0x00, 0x00,
 }

+ 48 - 25
vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go → vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go

@@ -1,12 +1,48 @@
+// Code generated by protoc-gen-gogo.
+// source: descriptor.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+	descriptor.proto
+
+It has these top-level messages:
+	FileDescriptorSet
+	FileDescriptorProto
+	DescriptorProto
+	FieldDescriptorProto
+	OneofDescriptorProto
+	EnumDescriptorProto
+	EnumValueDescriptorProto
+	ServiceDescriptorProto
+	MethodDescriptorProto
+	FileOptions
+	MessageOptions
+	FieldOptions
+	EnumOptions
+	EnumValueOptions
+	ServiceOptions
+	MethodOptions
+	UninterpretedOption
+	SourceCodeInfo
+*/
 package descriptor
 
 import fmt "fmt"
-
 import strings "strings"
 import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
 import sort "sort"
 import strconv "strconv"
 import reflect "reflect"
+import proto "github.com/gogo/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
 
 func (this *FileDescriptorSet) GoString() string {
 	if this == nil {
@@ -353,9 +389,7 @@ func (this *FileOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -383,9 +417,7 @@ func (this *MessageOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -419,9 +451,7 @@ func (this *FieldOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -443,9 +473,7 @@ func (this *EnumOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -464,9 +492,7 @@ func (this *EnumValueOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -485,9 +511,7 @@ func (this *ServiceOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -506,9 +530,7 @@ func (this *MethodOptions) GoString() string {
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
-	if this.XXX_extensions != nil {
-		s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n")
-	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
 	if this.XXX_unrecognized != nil {
 		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
 	}
@@ -616,11 +638,12 @@ func valueToGoStringDescriptor(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringDescriptor(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
+func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
 	if e == nil {
 		return "nil"
 	}
-	s := "map[int32]proto.Extension{"
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
 	keys := make([]int, 0, len(e))
 	for k := range e {
 		keys = append(keys, int(k))
@@ -630,6 +653,6 @@ func extensionToGoStringDescriptor(e map[int32]github_com_gogo_protobuf_proto.Ex
 	for _, k := range keys {
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
 	}
-	s += strings.Join(ss, ",") + "}"
+	s += strings.Join(ss, ",") + "})"
 	return s
 }

+ 3 - 1
vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go

@@ -1,4 +1,6 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
 // http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without

+ 3 - 1
vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go

@@ -1,4 +1,6 @@
-// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
 // http://github.com/gogo/protobuf
 //
 // Redistribution and use in source and binary forms, with or without

+ 1 - 1
vendor/src/github.com/golang/protobuf/proto/Makefile

@@ -39,5 +39,5 @@ test: install generate-test-pbs
 generate-test-pbs:
 	make install
 	make -C testdata
-	protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+	protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
 	make

+ 9 - 3
vendor/src/github.com/golang/protobuf/proto/clone.go

@@ -84,9 +84,15 @@ func mergeStruct(out, in reflect.Value) {
 		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
 	}
 
-	if emIn, ok := in.Addr().Interface().(extendableProto); ok {
-		emOut := out.Addr().Interface().(extendableProto)
-		mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+	if emIn, ok := extendable(in.Addr().Interface()); ok {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
 	}
 
 	uf := in.FieldByName("XXX_unrecognized")

+ 14 - 7
vendor/src/github.com/golang/protobuf/proto/decode.go

@@ -378,6 +378,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
 		wire := int(u & 0x7)
 		if wire == WireEndGroup {
 			if is_group {
+				if required > 0 {
+					// Not enough information to determine the exact field.
+					// (See below.)
+					return &RequiredNotSetError{"{Unknown}"}
+				}
 				return nil // input is satisfied
 			}
 			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
@@ -390,11 +395,12 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
 		if !ok {
 			// Maybe it's an extension?
 			if prop.extendable {
-				if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+				if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
 					if err = o.skip(st, tag, wire); err == nil {
-						ext := e.ExtensionMap()[int32(tag)] // may be missing
+						extmap := e.extensionsWrite()
+						ext := extmap[int32(tag)] // may be missing
 						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
-						e.ExtensionMap()[int32(tag)] = ext
+						extmap[int32(tag)] = ext
 					}
 					continue
 				}
@@ -768,10 +774,11 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
 		}
 	}
 	keyelem, valelem := keyptr.Elem(), valptr.Elem()
-	if !keyelem.IsValid() || !valelem.IsValid() {
-		// We did not decode the key or the value in the map entry.
-		// Either way, it's an invalid map entry.
-		return fmt.Errorf("proto: bad map data: missing key/val")
+	if !keyelem.IsValid() {
+		keyelem = reflect.Zero(p.mtype.Key())
+	}
+	if !valelem.IsValid() {
+		valelem = reflect.Zero(p.mtype.Elem())
 	}
 
 	v.SetMapIndex(keyelem, valelem)

+ 49 - 11
vendor/src/github.com/golang/protobuf/proto/encode.go

@@ -64,8 +64,16 @@ var (
 	// a struct with a repeated field containing a nil element.
 	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
 
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
 	// ErrNil is the error returned if Marshal is called with nil.
 	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
 )
 
 // The fundamental encoders that put bytes on the wire.
@@ -74,6 +82,10 @@ var (
 
 const maxVarintBytes = 10 // maximum length of a varint
 
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
 // EncodeVarint returns the varint encoding of x.
 // This is the format for the
 // int32, int64, uint32, uint64, bool, and enum
@@ -273,6 +285,9 @@ func (p *Buffer) Marshal(pb Message) error {
 		stats.Encode++
 	}
 
+	if len(p.buf) > maxMarshalSize {
+		return ErrTooLarge
+	}
 	return err
 }
 
@@ -1058,10 +1073,25 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
 
 // Encode an extension map.
 func (o *Buffer) enc_map(p *Properties, base structPointer) error {
-	v := *structPointer_ExtMap(base, p.field)
-	if err := encodeExtensionMap(v); err != nil {
+	exts := structPointer_ExtMap(base, p.field)
+	if err := encodeExtensionsMap(*exts); err != nil {
+		return err
+	}
+
+	return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+	exts := structPointer_Extensions(base, p.field)
+	if err := encodeExtensions(exts); err != nil {
 		return err
 	}
+	v, _ := exts.extensionsRead()
+
+	return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
 	// Fast-path for common cases: zero or one extensions.
 	if len(v) <= 1 {
 		for _, e := range v {
@@ -1084,8 +1114,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
 }
 
 func size_map(p *Properties, base structPointer) int {
-	v := *structPointer_ExtMap(base, p.field)
-	return sizeExtensionMap(v)
+	v := structPointer_ExtMap(base, p.field)
+	return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+	v := structPointer_Extensions(base, p.field)
+	return extensionsSize(v)
 }
 
 // Encode a map field.
@@ -1114,7 +1149,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
 		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
 			return err
 		}
-		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
 			return err
 		}
 		return nil
@@ -1124,11 +1159,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
 	for _, key := range v.MapKeys() {
 		val := v.MapIndex(key)
 
-		// The only illegal map entry values are nil message pointers.
-		if val.Kind() == reflect.Ptr && val.IsNil() {
-			return errors.New("proto: map has nil element")
-		}
-
 		keycopy.Set(key)
 		valcopy.Set(val)
 
@@ -1216,13 +1246,18 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
 					return err
 				}
 			}
+			if len(o.buf) > maxMarshalSize {
+				return ErrTooLarge
+			}
 		}
 	}
 
 	// Do oneof fields.
 	if prop.oneofMarshaler != nil {
 		m := structPointer_Interface(base, prop.stype).(Message)
-		if err := prop.oneofMarshaler(m, o); err != nil {
+		if err := prop.oneofMarshaler(m, o); err == ErrNil {
+			return errOneofHasNil
+		} else if err != nil {
 			return err
 		}
 	}
@@ -1230,6 +1265,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
 	// Add unrecognized fields at the end.
 	if prop.unrecField.IsValid() {
 		v := *structPointer_Bytes(base, prop.unrecField)
+		if len(o.buf)+len(v) > maxMarshalSize {
+			return ErrTooLarge
+		}
 		if len(v) > 0 {
 			o.buf = append(o.buf, v...)
 		}

+ 29 - 5
vendor/src/github.com/golang/protobuf/proto/equal.go

@@ -54,13 +54,17 @@ Equality is defined in this way:
     in a proto3 .proto file, fields are not "set"; specifically,
     zero length proto3 "bytes" fields are equal (nil == {}).
   - Two repeated fields are equal iff their lengths are the same,
-    and their corresponding elements are equal (a "bytes" field,
-    although represented by []byte, is not a repeated field)
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
   - Two unset fields are equal.
   - Two unknown field sets are equal if their current
     encoded state is equal.
   - Two extension sets are equal iff they have corresponding
     elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
   - Every other combination of things are not equal.
 
 The return value is undefined if a and b are not protocol buffers.
@@ -121,9 +125,16 @@ func equalStruct(v1, v2 reflect.Value) bool {
 		}
 	}
 
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
 	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
 		em2 := v2.FieldByName("XXX_extensions")
-		if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
 			return false
 		}
 	}
@@ -184,6 +195,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
 		}
 		return true
 	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
 		return equalAny(v1.Elem(), v2.Elem(), prop)
 	case reflect.Slice:
 		if v1.Type().Elem().Kind() == reflect.Uint8 {
@@ -223,8 +241,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
 }
 
 // base is the struct type that the extensions are based on.
-// em1 and em2 are extension maps.
-func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
 	if len(em1) != len(em2) {
 		return false
 	}

+ 207 - 20
vendor/src/github.com/golang/protobuf/proto/extensions.go

@@ -52,14 +52,99 @@ type ExtensionRange struct {
 	Start, End int32 // both inclusive
 }
 
-// extendableProto is an interface implemented by any protocol buffer that may be extended.
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
 type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
 	Message
 	ExtensionRangeArray() []ExtensionRange
 	ExtensionMap() map[int32]Extension
 }
 
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+	if ep, ok := p.(extendableProto); ok {
+		return ep, ok
+	}
+	if ep, ok := p.(extendableProtoV1); ok {
+		return extensionAdapter{ep}, ok
+	}
+	return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
 var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
 
 // ExtensionDesc represents an extension specification.
 // Used in generated code from the protocol compiler.
@@ -92,8 +177,13 @@ type Extension struct {
 }
 
 // SetRawExtension is for testing only.
-func SetRawExtension(base extendableProto, id int32, b []byte) {
-	base.ExtensionMap()[id] = Extension{enc: b}
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, ok := extendable(base)
+	if !ok {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
 }
 
 // isExtensionField returns true iff the given field number is in an extension range.
@@ -108,8 +198,12 @@ func isExtensionField(pb extendableProto, field int32) bool {
 
 // checkExtensionTypes checks that the given extension is valid for pb.
 func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
 	// Check the extended type.
-	if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
 		return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
 	}
 	// Check the range.
@@ -155,8 +249,19 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
 	return prop
 }
 
-// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
-func encodeExtensionMap(m map[int32]Extension) error {
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return nil // fast path
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
 	for k, e := range m {
 		if e.value == nil || e.desc == nil {
 			// Extension is only in its encoded form.
@@ -184,7 +289,17 @@ func encodeExtensionMap(m map[int32]Extension) error {
 	return nil
 }
 
-func sizeExtensionMap(m map[int32]Extension) (n int) {
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
 	for _, e := range m {
 		if e.value == nil || e.desc == nil {
 			// Extension is only in its encoded form.
@@ -209,26 +324,51 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
 }
 
 // HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
 	// TODO: Check types, field numbers, etc.?
-	_, ok := pb.ExtensionMap()[extension.Field]
+	epb, ok := extendable(pb)
+	if !ok {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok = extmap[extension.Field]
+	mu.Unlock()
 	return ok
 }
 
 // ClearExtension removes the given extension from pb.
-func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
 	// TODO: Check types, field numbers, etc.?
-	delete(pb.ExtensionMap(), extension.Field)
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
 }
 
 // GetExtension parses and returns the given extension of pb.
 // If the extension is not present and has no default value it returns ErrMissingExtension.
-func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
-	if err := checkExtensionTypes(pb, extension); err != nil {
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+
+	if err := checkExtensionTypes(epb, extension); err != nil {
 		return nil, err
 	}
 
-	emap := pb.ExtensionMap()
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
 	e, ok := emap[extension.Field]
 	if !ok {
 		// defaultExtensionValue returns the default value or
@@ -332,10 +472,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
 // GetExtensions returns a slice of the extensions present in pb that are also listed in es.
 // The returned slice has the same length as es; missing extensions will appear as nil elements.
 func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
-	epb, ok := pb.(extendableProto)
+	epb, ok := extendable(pb)
 	if !ok {
-		err = errors.New("proto: not an extendable proto")
-		return
+		return nil, errors.New("proto: not an extendable proto")
 	}
 	extensions = make([]interface{}, len(es))
 	for i, e := range es {
@@ -350,9 +489,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
 	return
 }
 
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
 // SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
-	if err := checkExtensionTypes(pb, extension); err != nil {
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, ok := extendable(pb)
+	if !ok {
+		return errors.New("proto: not an extendable proto")
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
 		return err
 	}
 	typ := reflect.TypeOf(extension.ExtensionType)
@@ -368,10 +542,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
 		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
 	}
 
-	pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
 	return nil
 }
 
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
 // A global registry of extensions.
 // The generated code will register the generated descriptors by calling RegisterExtension.
 

+ 5 - 1
vendor/src/github.com/golang/protobuf/proto/lib.go

@@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
 // temporary Buffer and are fine for most applications.
 type Buffer struct {
 	buf   []byte // encode/decode byte stream
-	index int    // write point
+	index int    // read point
 
 	// pools of basic types to amortize allocation.
 	bools   []bool
@@ -889,6 +889,10 @@ func isProto3Zero(v reflect.Value) bool {
 	return false
 }
 
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
 // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
 // to assert that that code is compatible with this version of the proto package.
 const ProtoPackageIsVersion1 = true

+ 37 - 6
vendor/src/github.com/golang/protobuf/proto/message_set.go

@@ -149,9 +149,21 @@ func skipVarint(buf []byte) []byte {
 
 // MarshalMessageSet encodes the extension map represented by m in the message set wire format.
 // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
-	if err := encodeExtensionMap(m); err != nil {
-		return nil, err
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		if err := encodeExtensions(exts); err != nil {
+			return nil, err
+		}
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		if err := encodeExtensionsMap(exts); err != nil {
+			return nil, err
+		}
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
 	}
 
 	// Sort extension IDs to provide a deterministic encoding.
@@ -178,7 +190,17 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
 
 // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
 // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
 	ms := new(messageSet)
 	if err := Unmarshal(buf, ms); err != nil {
 		return err
@@ -209,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
 
 // MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
 // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
 	var b bytes.Buffer
 	b.WriteByte('{')
 
@@ -252,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
 
 // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
 // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
 	// Common-case fast path.
 	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
 		return nil

+ 6 - 1
vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go

@@ -29,7 +29,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// +build appengine
+// +build appengine js
 
 // This file contains an implementation of proto field accesses using package reflect.
 // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
 	return structPointer_ifield(p, f).(*[]string)
 }
 
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
 // ExtMap returns the address of an extension map field in the struct.
 func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
 	return structPointer_ifield(p, f).(*map[int32]Extension)

+ 5 - 1
vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go

@@ -29,7 +29,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// +build !appengine
+// +build !appengine,!js
 
 // This file contains the implementation of the proto field accesses using package unsafe.
 
@@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
 }
 
 // ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
 func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
 	return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
 }

+ 41 - 15
vendor/src/github.com/golang/protobuf/proto/properties.go

@@ -473,17 +473,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
 			p.dec = (*Buffer).dec_slice_int64
 			p.packedDec = (*Buffer).dec_slice_packed_int64
 		case reflect.Uint8:
-			p.enc = (*Buffer).enc_slice_byte
 			p.dec = (*Buffer).dec_slice_byte
-			p.size = size_slice_byte
-			// This is a []byte, which is either a bytes field,
-			// or the value of a map field. In the latter case,
-			// we always encode an empty []byte, so we should not
-			// use the proto3 enc/size funcs.
-			// f == nil iff this is the key/value of a map field.
-			if p.proto3 && f != nil {
+			if p.proto3 {
 				p.enc = (*Buffer).enc_proto3_slice_byte
 				p.size = size_proto3_slice_byte
+			} else {
+				p.enc = (*Buffer).enc_slice_byte
+				p.size = size_slice_byte
 			}
 		case reflect.Float32, reflect.Float64:
 			switch t2.Bits() {
@@ -682,7 +678,8 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 	propertiesMap[t] = prop
 
 	// build properties
-	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+		reflect.PtrTo(t).Implements(extendableProtoV1Type)
 	prop.unrecField = invalidField
 	prop.Prop = make([]*Properties, t.NumField())
 	prop.order = make([]int, t.NumField())
@@ -693,15 +690,22 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 		name := f.Name
 		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
 
-		if f.Name == "XXX_extensions" { // special case
+		if f.Name == "XXX_InternalExtensions" { // special case
+			p.enc = (*Buffer).enc_exts
+			p.dec = nil // not needed
+			p.size = size_exts
+		} else if f.Name == "XXX_extensions" { // special case
 			p.enc = (*Buffer).enc_map
 			p.dec = nil // not needed
 			p.size = size_map
-		}
-		if f.Name == "XXX_unrecognized" { // special case
+		} else if f.Name == "XXX_unrecognized" { // special case
 			prop.unrecField = toField(&f)
 		}
-		oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
 		prop.Prop[i] = p
 		prop.order[i] = i
 		if debug {
@@ -711,7 +715,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 			}
 			print("\n")
 		}
-		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
 			fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
 		}
 	}
@@ -840,7 +844,29 @@ func RegisterType(x Message, name string) {
 }
 
 // MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
 
 // MessageType returns the message type (pointer to struct) for a named message.
 func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }

Some files were not shown because too many files changed in this diff