소스 검색

Merge pull request #25058 from tiborvass/additional-cherrypicks-for-1.12.0-rc5

[bump_v1.12.0] Additional cherrypicks for 1.12.0 rc5
Tibor Vass 9 년 전
부모
커밋
4ed7f63f0d

+ 1 - 0
api/client/service/create.go

@@ -30,6 +30,7 @@ func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command {
 	addServiceFlags(cmd, opts)
 
 	flags.VarP(&opts.labels, flagLabel, "l", "Service labels")
+	flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels")
 	flags.VarP(&opts.env, flagEnv, "e", "Set environment variables")
 	flags.Var(&opts.mounts, flagMount, "Attach a mount to the service")
 	flags.StringSliceVar(&opts.constraints, flagConstraint, []string{}, "Placement constraints")

+ 54 - 48
api/client/service/opts.go

@@ -392,14 +392,15 @@ func ValidatePort(value string) (string, error) {
 }
 
 type serviceOptions struct {
-	name    string
-	labels  opts.ListOpts
-	image   string
-	args    []string
-	env     opts.ListOpts
-	workdir string
-	user    string
-	mounts  MountOpt
+	name            string
+	labels          opts.ListOpts
+	containerLabels opts.ListOpts
+	image           string
+	args            []string
+	env             opts.ListOpts
+	workdir         string
+	user            string
+	mounts          MountOpt
 
 	resources resourceOptions
 	stopGrace DurationOpt
@@ -420,8 +421,9 @@ type serviceOptions struct {
 
 func newServiceOptions() *serviceOptions {
 	return &serviceOptions{
-		labels: opts.NewListOpts(runconfigopts.ValidateEnv),
-		env:    opts.NewListOpts(runconfigopts.ValidateEnv),
+		labels:          opts.NewListOpts(runconfigopts.ValidateEnv),
+		containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv),
+		env:             opts.NewListOpts(runconfigopts.ValidateEnv),
 		endpoint: endpointOptions{
 			ports: opts.NewListOpts(ValidatePort),
 		},
@@ -442,6 +444,7 @@ func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
 				Image:           opts.image,
 				Args:            opts.args,
 				Env:             opts.env.GetAll(),
+				Labels:          runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()),
 				Dir:             opts.workdir,
 				User:            opts.user,
 				Mounts:          opts.mounts.Value(),
@@ -516,42 +519,45 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
 }
 
 const (
-	flagConstraint          = "constraint"
-	flagConstraintRemove    = "constraint-rm"
-	flagConstraintAdd       = "constraint-add"
-	flagEndpointMode        = "endpoint-mode"
-	flagEnv                 = "env"
-	flagEnvRemove           = "env-rm"
-	flagEnvAdd              = "env-add"
-	flagLabel               = "label"
-	flagLabelRemove         = "label-rm"
-	flagLabelAdd            = "label-add"
-	flagLimitCPU            = "limit-cpu"
-	flagLimitMemory         = "limit-memory"
-	flagMode                = "mode"
-	flagMount               = "mount"
-	flagMountRemove         = "mount-rm"
-	flagMountAdd            = "mount-add"
-	flagName                = "name"
-	flagNetwork             = "network"
-	flagNetworkRemove       = "network-rm"
-	flagNetworkAdd          = "network-add"
-	flagPublish             = "publish"
-	flagPublishRemove       = "publish-rm"
-	flagPublishAdd          = "publish-add"
-	flagReplicas            = "replicas"
-	flagReserveCPU          = "reserve-cpu"
-	flagReserveMemory       = "reserve-memory"
-	flagRestartCondition    = "restart-condition"
-	flagRestartDelay        = "restart-delay"
-	flagRestartMaxAttempts  = "restart-max-attempts"
-	flagRestartWindow       = "restart-window"
-	flagStopGracePeriod     = "stop-grace-period"
-	flagUpdateDelay         = "update-delay"
-	flagUpdateFailureAction = "update-failure-action"
-	flagUpdateParallelism   = "update-parallelism"
-	flagUser                = "user"
-	flagRegistryAuth        = "with-registry-auth"
-	flagLogDriver           = "log-driver"
-	flagLogOpt              = "log-opt"
+	flagConstraint           = "constraint"
+	flagConstraintRemove     = "constraint-rm"
+	flagConstraintAdd        = "constraint-add"
+	flagContainerLabel       = "container-label"
+	flagContainerLabelRemove = "container-label-rm"
+	flagContainerLabelAdd    = "container-label-add"
+	flagEndpointMode         = "endpoint-mode"
+	flagEnv                  = "env"
+	flagEnvRemove            = "env-rm"
+	flagEnvAdd               = "env-add"
+	flagLabel                = "label"
+	flagLabelRemove          = "label-rm"
+	flagLabelAdd             = "label-add"
+	flagLimitCPU             = "limit-cpu"
+	flagLimitMemory          = "limit-memory"
+	flagMode                 = "mode"
+	flagMount                = "mount"
+	flagMountRemove          = "mount-rm"
+	flagMountAdd             = "mount-add"
+	flagName                 = "name"
+	flagNetwork              = "network"
+	flagNetworkRemove        = "network-rm"
+	flagNetworkAdd           = "network-add"
+	flagPublish              = "publish"
+	flagPublishRemove        = "publish-rm"
+	flagPublishAdd           = "publish-add"
+	flagReplicas             = "replicas"
+	flagReserveCPU           = "reserve-cpu"
+	flagReserveMemory        = "reserve-memory"
+	flagRestartCondition     = "restart-condition"
+	flagRestartDelay         = "restart-delay"
+	flagRestartMaxAttempts   = "restart-max-attempts"
+	flagRestartWindow        = "restart-window"
+	flagStopGracePeriod      = "stop-grace-period"
+	flagUpdateDelay          = "update-delay"
+	flagUpdateFailureAction  = "update-failure-action"
+	flagUpdateParallelism    = "update-parallelism"
+	flagUser                 = "user"
+	flagRegistryAuth         = "with-registry-auth"
+	flagLogDriver            = "log-driver"
+	flagLogOpt               = "log-opt"
 )

+ 37 - 11
api/client/service/update.go

@@ -38,11 +38,13 @@ func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
 
 	flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable")
 	flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key")
+	flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key")
 	flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path")
 	flags.Var(newListOptsVar(), flagPublishRemove, "Remove a published port by its target port")
 	flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network by name")
 	flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint")
 	flags.Var(&opts.labels, flagLabelAdd, "Add or update service labels")
+	flags.Var(&opts.containerLabels, flagContainerLabelAdd, "Add or update container labels")
 	flags.Var(&opts.env, flagEnvAdd, "Add or update environment variables")
 	flags.Var(&opts.mounts, flagMountAdd, "Add or update a mount on a service")
 	flags.StringSliceVar(&opts.constraints, flagConstraintAdd, []string{}, "Add or update placement constraints")
@@ -96,7 +98,6 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID stri
 }
 
 func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
-
 	updateString := func(flag string, field *string) {
 		if flags.Changed(flag) {
 			*field, _ = flags.GetString(flag)
@@ -115,9 +116,10 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 		}
 	}
 
-	updateDurationOpt := func(flag string, field *time.Duration) {
+	updateDurationOpt := func(flag string, field **time.Duration) {
 		if flags.Changed(flag) {
-			*field = *flags.Lookup(flag).Value.(*DurationOpt).Value()
+			val := *flags.Lookup(flag).Value.(*DurationOpt).Value()
+			*field = &val
 		}
 	}
 
@@ -127,9 +129,10 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 		}
 	}
 
-	updateUint64Opt := func(flag string, field *uint64) {
+	updateUint64Opt := func(flag string, field **uint64) {
 		if flags.Changed(flag) {
-			*field = *flags.Lookup(flag).Value.(*Uint64Opt).Value()
+			val := *flags.Lookup(flag).Value.(*Uint64Opt).Value()
+			*field = &val
 		}
 	}
 
@@ -145,6 +148,7 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 
 	updateString(flagName, &spec.Name)
 	updateLabels(flags, &spec.Labels)
+	updateContainerLabels(flags, &cspec.Labels)
 	updateString("image", &cspec.Image)
 	updateStringToSlice(flags, "args", &cspec.Args)
 	updateEnvironment(flags, &cspec.Env)
@@ -156,7 +160,6 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 		taskResources().Limits = &swarm.Resources{}
 		updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs)
 		updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes)
-
 	}
 	if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) {
 		taskResources().Reservations = &swarm.Resources{}
@@ -164,7 +167,7 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 		updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes)
 	}
 
-	updateDurationOpt(flagStopGracePeriod, cspec.StopGracePeriod)
+	updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod)
 
 	if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) {
 		if task.RestartPolicy == nil {
@@ -175,9 +178,9 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 			value, _ := flags.GetString(flagRestartCondition)
 			task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value)
 		}
-		updateDurationOpt(flagRestartDelay, task.RestartPolicy.Delay)
-		updateUint64Opt(flagRestartMaxAttempts, task.RestartPolicy.MaxAttempts)
-		updateDurationOpt((flagRestartWindow), task.RestartPolicy.Window)
+		updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay)
+		updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts)
+		updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window)
 	}
 
 	if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) {
@@ -203,6 +206,9 @@ func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
 	updateNetworks(flags, &spec.Networks)
 	if flags.Changed(flagEndpointMode) {
 		value, _ := flags.GetString(flagEndpointMode)
+		if spec.EndpointSpec == nil {
+			spec.EndpointSpec = &swarm.EndpointSpec{}
+		}
 		spec.EndpointSpec.Mode = swarm.ResolutionMode(value)
 	}
 
@@ -248,6 +254,26 @@ func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) {
 	placement.Constraints = removeItems(placement.Constraints, toRemove, itemKey)
 }
 
+func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) {
+	if flags.Changed(flagContainerLabelAdd) {
+		if *field == nil {
+			*field = map[string]string{}
+		}
+
+		values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll()
+		for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
+			(*field)[key] = value
+		}
+	}
+
+	if *field != nil && flags.Changed(flagContainerLabelRemove) {
+		toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll()
+		for _, label := range toRemove {
+			delete(*field, label)
+		}
+	}
+}
+
 func updateLabels(flags *pflag.FlagSet, field *map[string]string) {
 	if flags.Changed(flagLabelAdd) {
 		if *field == nil {
@@ -386,7 +412,7 @@ func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error
 		return nil
 	}
 
-	if serviceMode.Replicated == nil {
+	if serviceMode == nil || serviceMode.Replicated == nil {
 		return fmt.Errorf("replicas can only be used with replicated mode")
 	}
 	serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value()

+ 0 - 1
api/client/swarm/cmd.go

@@ -25,7 +25,6 @@ func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command {
 		newJoinTokenCommand(dockerCli),
 		newUpdateCommand(dockerCli),
 		newLeaveCommand(dockerCli),
-		newInspectCommand(dockerCli),
 	)
 	return cmd
 }

+ 0 - 47
api/client/swarm/inspect.go

@@ -1,47 +0,0 @@
-package swarm
-
-import (
-	"golang.org/x/net/context"
-
-	"github.com/docker/docker/api/client"
-	"github.com/docker/docker/api/client/inspect"
-	"github.com/docker/docker/cli"
-	"github.com/spf13/cobra"
-)
-
-type inspectOptions struct {
-	format string
-}
-
-func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
-	var opts inspectOptions
-
-	cmd := &cobra.Command{
-		Use:   "inspect [OPTIONS]",
-		Short: "Inspect the swarm",
-		Args:  cli.NoArgs,
-		RunE: func(cmd *cobra.Command, args []string) error {
-			return runInspect(dockerCli, opts)
-		},
-	}
-
-	flags := cmd.Flags()
-	flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template")
-	return cmd
-}
-
-func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
-	client := dockerCli.Client()
-	ctx := context.Background()
-
-	swarm, err := client.SwarmInspect(ctx)
-	if err != nil {
-		return err
-	}
-
-	getRef := func(_ string) (interface{}, []byte, error) {
-		return swarm, nil, nil
-	}
-
-	return inspect.Inspect(dockerCli.Out(), []string{""}, opts.format, getRef)
-}

+ 14 - 1
api/client/system/info.go

@@ -3,6 +3,7 @@ package system
 import (
 	"fmt"
 	"strings"
+	"time"
 
 	"golang.org/x/net/context"
 
@@ -30,7 +31,8 @@ func NewInfoCommand(dockerCli *client.DockerCli) *cobra.Command {
 }
 
 func runInfo(dockerCli *client.DockerCli) error {
-	info, err := dockerCli.Client().Info(context.Background())
+	ctx := context.Background()
+	info, err := dockerCli.Client().Info(ctx)
 	if err != nil {
 		return err
 	}
@@ -83,8 +85,19 @@ func runInfo(dockerCli *client.DockerCli) error {
 		}
 		fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable)
 		if info.Swarm.ControlAvailable {
+			fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID)
 			fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
 			fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
+			fmt.Fprintf(dockerCli.Out(), " Orchestration:\n")
+			fmt.Fprintf(dockerCli.Out(), "  Task History Retention Limit: %d\n", info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit)
+			fmt.Fprintf(dockerCli.Out(), " Raft:\n")
+			fmt.Fprintf(dockerCli.Out(), "  Snapshot interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval)
+			fmt.Fprintf(dockerCli.Out(), "  Heartbeat tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick)
+			fmt.Fprintf(dockerCli.Out(), "  Election tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick)
+			fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n")
+			fmt.Fprintf(dockerCli.Out(), "  Heartbeat period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod)))
+			fmt.Fprintf(dockerCli.Out(), " CA configuration:\n")
+			fmt.Fprintf(dockerCli.Out(), "  Expiry duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry))
 		}
 		fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr)
 	}

+ 5 - 0
daemon/cluster/cluster.go

@@ -729,6 +729,11 @@ func (c *Cluster) Info() types.Info {
 
 	if c.isActiveManager() {
 		info.ControlAvailable = true
+		swarm, err := c.Inspect()
+		if err != nil {
+			info.Error = err.Error()
+		}
+		info.Cluster = swarm
 		if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil {
 			info.Nodes = len(r.Nodes)
 			for _, n := range r.Nodes {

+ 53 - 17
daemon/cluster/executor/container/controller.go

@@ -23,6 +23,10 @@ type controller struct {
 	adapter *containerAdapter
 	closed  chan struct{}
 	err     error
+
+	pulled     chan struct{} // closed after pull
+	cancelPull func()        // cancels pull context if not nil
+	pullErr    error         // pull error, only read after pulled closed
 }
 
 var _ exec.Controller = &controller{}
@@ -84,24 +88,40 @@ func (r *controller) Prepare(ctx context.Context) error {
 		return err
 	}
 
-	if err := r.adapter.pullImage(ctx); err != nil {
-		cause := errors.Cause(err)
-		if cause == context.Canceled || cause == context.DeadlineExceeded {
-			return err
-		}
+	if r.pulled == nil {
+		// Fork the pull to a different context to allow pull to continue
+		// on re-entrant calls to Prepare. This ensures that Prepare can be
+		// idempotent and not incur the extra cost of pulling when
+		// cancelled on updates.
+		var pctx context.Context
+
+		r.pulled = make(chan struct{})
+		pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller.
+
+		go func() {
+			defer close(r.pulled)
+			r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled
+		}()
+	}
 
-		// NOTE(stevvooe): We always try to pull the image to make sure we have
-		// the most up to date version. This will return an error, but we only
-		// log it. If the image truly doesn't exist, the create below will
-		// error out.
-		//
-		// This gives us some nice behavior where we use up to date versions of
-		// mutable tags, but will still run if the old image is available but a
-		// registry is down.
-		//
-		// If you don't want this behavior, lock down your image to an
-		// immutable tag or digest.
-		log.G(ctx).WithError(err).Error("pulling image failed")
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-r.pulled:
+		if r.pullErr != nil {
+			// NOTE(stevvooe): We always try to pull the image to make sure we have
+			// the most up to date version. This will return an error, but we only
+			// log it. If the image truly doesn't exist, the create below will
+			// error out.
+			//
+			// This gives us some nice behavior where we use up to date versions of
+			// mutable tags, but will still run if the old image is available but a
+			// registry is down.
+			//
+			// If you don't want this behavior, lock down your image to an
+			// immutable tag or digest.
+			log.G(ctx).WithError(r.pullErr).Error("pulling image failed")
+		}
 	}
 
 	if err := r.adapter.create(ctx, r.backend); err != nil {
@@ -249,6 +269,10 @@ func (r *controller) Shutdown(ctx context.Context) error {
 		return err
 	}
 
+	if r.cancelPull != nil {
+		r.cancelPull()
+	}
+
 	if err := r.adapter.shutdown(ctx); err != nil {
 		if isUnknownContainer(err) || isStoppedContainer(err) {
 			return nil
@@ -266,6 +290,10 @@ func (r *controller) Terminate(ctx context.Context) error {
 		return err
 	}
 
+	if r.cancelPull != nil {
+		r.cancelPull()
+	}
+
 	if err := r.adapter.terminate(ctx); err != nil {
 		if isUnknownContainer(err) {
 			return nil
@@ -283,6 +311,10 @@ func (r *controller) Remove(ctx context.Context) error {
 		return err
 	}
 
+	if r.cancelPull != nil {
+		r.cancelPull()
+	}
+
 	// It may be necessary to shut down the task before removing it.
 	if err := r.Shutdown(ctx); err != nil {
 		if isUnknownContainer(err) {
@@ -317,6 +349,10 @@ func (r *controller) Close() error {
 	case <-r.closed:
 		return r.err
 	default:
+		if r.cancelPull != nil {
+			r.cancelPull()
+		}
+
 		r.err = exec.ErrControllerClosed
 		close(r.closed)
 	}

+ 1 - 0
docs/reference/commandline/service_create.md

@@ -20,6 +20,7 @@ Create a new service
 
 Options:
       --constraint value               Placement constraints (default [])
+      --container-label value          Service container labels (default [])
       --endpoint-mode string           Endpoint mode (vip or dnsrr)
   -e, --env value                      Set environment variables (default [])
       --help                           Print usage

+ 2 - 0
docs/reference/commandline/service_update.md

@@ -21,6 +21,8 @@ Options:
       --args string                    Service command args
       --constraint-add value           Add or update placement constraints (default [])
       --constraint-rm value            Remove a constraint (default [])
+      --container-label-add value      Add or update container labels (default [])
+      --container-label-rm value       Remove a container label by its key (default [])
       --endpoint-mode string           Endpoint mode (vip or dnsrr)
       --env-add value                  Add or update environment variables (default [])
       --env-rm value                   Remove an environment variable (default [])

+ 2 - 2
hack/vendor.sh

@@ -60,12 +60,12 @@ clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://gith
 clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
 clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
 clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
-clone git github.com/docker/engine-api 53b6b19ee622c8584c28fdde0e3893383b290da3
+clone git github.com/docker/engine-api a52656d77f09d394104c1639824eada038bfdb89
 clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
 clone git github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-clone git github.com/docker/libnetwork 6a3feece4ede9473439f0c835a13e666dc2ab857
+clone git github.com/docker/libnetwork c7dc6dc476a5f00f9b28efebe591347dd64264fc
 clone git github.com/docker/go-events afb2b9f2c23f33ada1a22b03651775fdc65a5089
 clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec

+ 6 - 2
integration-cli/daemon_swarm.go

@@ -246,18 +246,22 @@ func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service {
 	return services
 }
 
-func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
+func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm {
 	var sw swarm.Swarm
 	status, out, err := d.SockRequest("GET", "/swarm", nil)
 	c.Assert(err, checker.IsNil)
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
 	c.Assert(json.Unmarshal(out, &sw), checker.IsNil)
+	return sw
+}
 
+func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
+	sw := d.getSwarm(c)
 	for _, fn := range f {
 		fn(&sw.Spec)
 	}
 	url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index)
-	status, out, err = d.SockRequest("POST", url, sw.Spec)
+	status, out, err := d.SockRequest("POST", url, sw.Spec)
 	c.Assert(err, checker.IsNil)
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
 }

+ 4 - 13
integration-cli/docker_cli_swarm_test.go

@@ -3,7 +3,6 @@
 package main
 
 import (
-	"encoding/json"
 	"io/ioutil"
 	"strings"
 	"time"
@@ -17,12 +16,8 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
 	getSpec := func() swarm.Spec {
-		out, err := d.Cmd("swarm", "inspect")
-		c.Assert(err, checker.IsNil)
-		var sw []swarm.Swarm
-		c.Assert(json.Unmarshal([]byte(out), &sw), checker.IsNil)
-		c.Assert(len(sw), checker.Equals, 1)
-		return sw[0].Spec
+		sw := d.getSwarm(c)
+		return sw.Spec
 	}
 
 	out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")
@@ -44,12 +39,8 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
 	d := s.AddDaemon(c, false, false)
 
 	getSpec := func() swarm.Spec {
-		out, err := d.Cmd("swarm", "inspect")
-		c.Assert(err, checker.IsNil)
-		var sw []swarm.Swarm
-		c.Assert(json.Unmarshal([]byte(out), &sw), checker.IsNil)
-		c.Assert(len(sw), checker.Equals, 1)
-		return sw[0].Spec
+		sw := d.getSwarm(c)
+		return sw.Spec
 	}
 
 	out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")

+ 2 - 0
vendor/src/github.com/docker/engine-api/types/swarm/swarm.go

@@ -118,6 +118,8 @@ type Info struct {
 	RemoteManagers []Peer
 	Nodes          int
 	Managers       int
+
+	Cluster Swarm
 }
 
 // Peer represents a peer.

+ 1 - 0
vendor/src/github.com/docker/engine-api/types/types.go

@@ -256,6 +256,7 @@ type Info struct {
 	Runtimes           map[string]Runtime
 	DefaultRuntime     string
 	Swarm              swarm.Info
+	LiveRestore        bool
 }
 
 // PluginsInfo is a temp struct holding Plugins name

+ 2 - 2
vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go

@@ -95,7 +95,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
 	switch {
 	case isLocal:
 		if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
-			if !lIP.Equal(pEntry.vtep) {
+			if !aIP.Equal(pEntry.vtep) {
 				nodes[pEntry.vtep.String()] = pEntry.vtep
 			}
 			return false
@@ -488,7 +488,7 @@ func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx,
 
 	if delIdx != -1 {
 		// -rSA0
-		programSA(rIP, lIP, spis[delIdx], nil, reverse, false)
+		programSA(lIP, rIP, spis[delIdx], nil, reverse, false)
 	}
 
 	if newIdx > -1 {