Merge pull request #24993 from yongtang/24596-swarm-join-with-drain
Allow swarm join with `--availability=drain`
This commit is contained in:
commit
66aba12df2
9 changed files with 106 additions and 5 deletions
|
@ -135,6 +135,7 @@ type InitRequest struct {
|
|||
ForceNewCluster bool
|
||||
Spec Spec
|
||||
AutoLockManagers bool
|
||||
Availability NodeAvailability
|
||||
}
|
||||
|
||||
// JoinRequest is the request used to join a swarm.
|
||||
|
@ -143,6 +144,7 @@ type JoinRequest struct {
|
|||
AdvertiseAddr string
|
||||
RemoteAddrs []string
|
||||
JoinToken string // accept by secret
|
||||
Availability NodeAvailability
|
||||
}
|
||||
|
||||
// UnlockRequest is the request used to unlock a swarm.
|
||||
|
|
|
@ -20,6 +20,7 @@ type initOptions struct {
|
|||
// Not a NodeAddrOption because it has no default port.
|
||||
advertiseAddr string
|
||||
forceNewCluster bool
|
||||
availability string
|
||||
}
|
||||
|
||||
func newInitCommand(dockerCli command.Cli) *cobra.Command {
|
||||
|
@ -41,6 +42,7 @@ func newInitCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
|
||||
flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state")
|
||||
flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)")
|
||||
flags.StringVar(&opts.availability, flagAvailability, "active", "Availability of the node (active/pause/drain)")
|
||||
addSwarmFlags(flags, &opts.swarmOptions)
|
||||
return cmd
|
||||
}
|
||||
|
@ -56,6 +58,15 @@ func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) erro
|
|||
Spec: opts.swarmOptions.ToSpec(flags),
|
||||
AutoLockManagers: opts.swarmOptions.autolock,
|
||||
}
|
||||
if flags.Changed(flagAvailability) {
|
||||
availability := swarm.NodeAvailability(strings.ToLower(opts.availability))
|
||||
switch availability {
|
||||
case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain:
|
||||
req.Availability = availability
|
||||
default:
|
||||
return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
|
||||
}
|
||||
}
|
||||
|
||||
nodeID, err := client.SwarmInit(ctx, req)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,12 +2,15 @@ package swarm
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type joinOptions struct {
|
||||
|
@ -16,6 +19,7 @@ type joinOptions struct {
|
|||
// Not a NodeAddrOption because it has no default port.
|
||||
advertiseAddr string
|
||||
token string
|
||||
availability string
|
||||
}
|
||||
|
||||
func newJoinCommand(dockerCli command.Cli) *cobra.Command {
|
||||
|
@ -29,7 +33,7 @@ func newJoinCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.remote = args[0]
|
||||
return runJoin(dockerCli, opts)
|
||||
return runJoin(dockerCli, cmd.Flags(), opts)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -37,10 +41,11 @@ func newJoinCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
|
||||
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
|
||||
flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm")
|
||||
flags.StringVar(&opts.availability, flagAvailability, "active", "Availability of the node (active/pause/drain)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runJoin(dockerCli command.Cli, opts joinOptions) error {
|
||||
func runJoin(dockerCli command.Cli, flags *pflag.FlagSet, opts joinOptions) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -50,6 +55,16 @@ func runJoin(dockerCli command.Cli, opts joinOptions) error {
|
|||
AdvertiseAddr: opts.advertiseAddr,
|
||||
RemoteAddrs: []string{opts.remote},
|
||||
}
|
||||
if flags.Changed(flagAvailability) {
|
||||
availability := swarm.NodeAvailability(strings.ToLower(opts.availability))
|
||||
switch availability {
|
||||
case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain:
|
||||
req.Availability = availability
|
||||
default:
|
||||
return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
|
||||
}
|
||||
}
|
||||
|
||||
err := client.SwarmJoin(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -28,6 +28,7 @@ const (
|
|||
flagSnapshotInterval = "snapshot-interval"
|
||||
flagLockKey = "lock-key"
|
||||
flagAutolock = "autolock"
|
||||
flagAvailability = "availability"
|
||||
)
|
||||
|
||||
type swarmOptions struct {
|
||||
|
|
|
@ -320,6 +320,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
|||
LocalAddr: localAddr,
|
||||
ListenAddr: net.JoinHostPort(listenHost, listenPort),
|
||||
AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort),
|
||||
availability: req.Availability,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -389,6 +390,7 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
|||
AdvertiseAddr: advertiseAddr,
|
||||
joinAddr: req.RemoteAddrs[0],
|
||||
joinToken: req.JoinToken,
|
||||
availability: req.Availability,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -51,6 +52,7 @@ type nodeStartConfig struct {
|
|||
joinToken string
|
||||
lockKey []byte
|
||||
autolock bool
|
||||
availability types.NodeAvailability
|
||||
}
|
||||
|
||||
func (n *nodeRunner) Ready() chan error {
|
||||
|
@ -92,7 +94,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
|||
control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
|
||||
}
|
||||
|
||||
node, err := swarmnode.New(&swarmnode.Config{
|
||||
swarmnodeConfig := swarmnode.Config{
|
||||
Hostname: n.cluster.config.Name,
|
||||
ForceNewCluster: conf.forceNewCluster,
|
||||
ListenControlAPI: control,
|
||||
|
@ -106,7 +108,15 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
|||
ElectionTick: 3,
|
||||
UnlockKey: conf.lockKey,
|
||||
AutoLockManagers: conf.autolock,
|
||||
})
|
||||
}
|
||||
if conf.availability != "" {
|
||||
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid Availability: %q", conf.availability)
|
||||
}
|
||||
swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
|
||||
}
|
||||
node, err := swarmnode.New(&swarmnodeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ Initialize a swarm
|
|||
Options:
|
||||
--advertise-addr string Advertised address (format: <ip|interface>[:port])
|
||||
--autolock Enable manager autolocking (requiring an unlock key to start a stopped manager)
|
||||
--availability string Availability of the node (active/pause/drain) (default "active")
|
||||
--cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
|
||||
--dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)
|
||||
--external-ca external-ca Specifications of one or more certificate signing endpoints
|
||||
|
@ -133,6 +134,16 @@ Snapshots compact the Raft log and allow for more efficient transfer of the
|
|||
state to new managers. However, there is a performance cost to taking snapshots
|
||||
frequently.
|
||||
|
||||
### `--availability`
|
||||
|
||||
This flag specifies the availability of the node at the time the node joins a master.
|
||||
Possible availability values are `active`, `pause`, or `drain`.
|
||||
|
||||
This flag is useful in certain situations. For example, a cluster may want to have
|
||||
dedicated manager nodes that are not served as worker nodes. This could be achieved
|
||||
by passing `--availability=drain` to `docker swarm init`.
|
||||
|
||||
|
||||
## Related information
|
||||
|
||||
* [swarm join](swarm_join.md)
|
||||
|
|
|
@ -22,6 +22,7 @@ Join a swarm as a node and/or manager
|
|||
|
||||
Options:
|
||||
--advertise-addr string Advertised address (format: <ip|interface>[:port])
|
||||
--availability string Availability of the node (active/pause/drain) (default "active")
|
||||
--help Print usage
|
||||
--listen-addr node-addr Listen address (format: <ip|interface>[:port]) (default 0.0.0.0:2377)
|
||||
--token string Token for entry into the swarm
|
||||
|
@ -94,6 +95,15 @@ This flag is generally not necessary when joining an existing swarm.
|
|||
|
||||
Secret value required for nodes to join the swarm
|
||||
|
||||
### `--availability`
|
||||
|
||||
This flag specifies the availability of the node at the time the node joins a master.
|
||||
Possible availability values are `active`, `pause`, or `drain`.
|
||||
|
||||
This flag is useful in certain situations. For example, a cluster may want to have
|
||||
dedicated manager nodes that are not served as worker nodes. This could be achieved
|
||||
by passing `--availability=drain` to `docker swarm join`.
|
||||
|
||||
|
||||
## Related information
|
||||
|
||||
|
|
|
@ -1591,3 +1591,42 @@ func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) {
|
|||
c.Assert(out, checker.Contains, "{ tcp 80 5000 ingress}")
|
||||
c.Assert(out, checker.Contains, "{ tcp 80 5001 ingress}")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *check.C) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
out, err := d.Cmd("node", "ls")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Not(checker.Contains), "Drain")
|
||||
|
||||
out, err = d.Cmd("swarm", "join-token", "-q", "manager")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
|
||||
|
||||
token := strings.TrimSpace(out)
|
||||
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
|
||||
out, err = d1.Cmd("swarm", "join", "--availability=drain", "--token", token, d.ListenAddr)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
|
||||
|
||||
out, err = d.Cmd("node", "ls")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Contains, "Drain")
|
||||
|
||||
out, err = d1.Cmd("node", "ls")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Contains, "Drain")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *check.C) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
|
||||
out, err := d.Cmd("swarm", "init", "--availability", "drain")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||
|
||||
out, err = d.Cmd("node", "ls")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(out, checker.Contains, "Drain")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue