Przeglądaj źródła

Allow swarm init with `--availability=drain`
This fix adds a new flag `--availability` to `swarm join`.

Related documentation has been updated.

An integration test has been added.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>

Yong Tang 8 lat temu
rodzic
commit
0f30c64444

+ 1 - 0
api/types/swarm/swarm.go

@@ -135,6 +135,7 @@ type InitRequest struct {
 	ForceNewCluster  bool
 	ForceNewCluster  bool
 	Spec             Spec
 	Spec             Spec
 	AutoLockManagers bool
 	AutoLockManagers bool
+	Availability     NodeAvailability
 }
 }
 
 
 // JoinRequest is the request used to join a swarm.
 // JoinRequest is the request used to join a swarm.

+ 11 - 0
cli/command/swarm/init.go

@@ -20,6 +20,7 @@ type initOptions struct {
 	// Not a NodeAddrOption because it has no default port.
 	// Not a NodeAddrOption because it has no default port.
 	advertiseAddr   string
 	advertiseAddr   string
 	forceNewCluster bool
 	forceNewCluster bool
+	availability    string
 }
 }
 
 
 func newInitCommand(dockerCli command.Cli) *cobra.Command {
 func newInitCommand(dockerCli command.Cli) *cobra.Command {
@@ -41,6 +42,7 @@ func newInitCommand(dockerCli command.Cli) *cobra.Command {
 	flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
 	flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
 	flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state")
 	flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state")
 	flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)")
 	flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)")
+	flags.StringVar(&opts.availability, flagAvailability, "active", "Availability of the node (active/pause/drain)")
 	addSwarmFlags(flags, &opts.swarmOptions)
 	addSwarmFlags(flags, &opts.swarmOptions)
 	return cmd
 	return cmd
 }
 }
@@ -56,6 +58,15 @@ func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) erro
 		Spec:             opts.swarmOptions.ToSpec(flags),
 		Spec:             opts.swarmOptions.ToSpec(flags),
 		AutoLockManagers: opts.swarmOptions.autolock,
 		AutoLockManagers: opts.swarmOptions.autolock,
 	}
 	}
+	if flags.Changed(flagAvailability) {
+		availability := swarm.NodeAvailability(strings.ToLower(opts.availability))
+		switch availability {
+		case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain:
+			req.Availability = availability
+		default:
+			return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
+		}
+	}
 
 
 	nodeID, err := client.SwarmInit(ctx, req)
 	nodeID, err := client.SwarmInit(ctx, req)
 	if err != nil {
 	if err != nil {

+ 1 - 0
daemon/cluster/cluster.go

@@ -320,6 +320,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
 		LocalAddr:       localAddr,
 		LocalAddr:       localAddr,
 		ListenAddr:      net.JoinHostPort(listenHost, listenPort),
 		ListenAddr:      net.JoinHostPort(listenHost, listenPort),
 		AdvertiseAddr:   net.JoinHostPort(advertiseHost, advertisePort),
 		AdvertiseAddr:   net.JoinHostPort(advertiseHost, advertisePort),
+		availability:    req.Availability,
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err

+ 11 - 0
docs/reference/commandline/swarm_init.md

@@ -23,6 +23,7 @@ Initialize a swarm
 Options:
 Options:
       --advertise-addr string           Advertised address (format: <ip|interface>[:port])
       --advertise-addr string           Advertised address (format: <ip|interface>[:port])
       --autolock                        Enable manager autolocking (requiring an unlock key to start a stopped manager)
       --autolock                        Enable manager autolocking (requiring an unlock key to start a stopped manager)
+      --availability string             Availability of the node (active/pause/drain) (default "active")
       --cert-expiry duration            Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
       --cert-expiry duration            Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
       --dispatcher-heartbeat duration   Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)
       --dispatcher-heartbeat duration   Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)
       --external-ca external-ca         Specifications of one or more certificate signing endpoints
       --external-ca external-ca         Specifications of one or more certificate signing endpoints
@@ -133,6 +134,16 @@ Snapshots compact the Raft log and allow for more efficient transfer of the
 state to new managers. However, there is a performance cost to taking snapshots
 state to new managers. However, there is a performance cost to taking snapshots
 frequently.
 frequently.
 
 
+### `--availability`
+
+This flag specifies the availability of the node at the time the node joins a master.
+Possible availability values are `active`, `pause`, or `drain`.
+
+This flag is useful in certain situations. For example, a cluster may want to have
+dedicated manager nodes that are not served as worker nodes. This could be achieved
+by passing `--availability=drain` to `docker swarm init`.
+
+
 ## Related information
 ## Related information
 
 
 * [swarm join](swarm_join.md)
 * [swarm join](swarm_join.md)

+ 11 - 0
integration-cli/docker_cli_swarm_test.go

@@ -1619,3 +1619,14 @@ func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *check.C) {
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 	c.Assert(out, checker.Contains, "Drain")
 	c.Assert(out, checker.Contains, "Drain")
 }
 }
+
+func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *check.C) {
+	d := s.AddDaemon(c, false, false)
+
+	out, err := d.Cmd("swarm", "init", "--availability", "drain")
+	c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
+
+	out, err = d.Cmd("node", "ls")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "Drain")
+}