Replace secrets with join tokens
Implement the proposal from https://github.com/docker/docker/issues/24430#issuecomment-233100121 Removes acceptance policy and secret in favor of an automatically generated join token that combines the secret, CA hash, and manager/worker role into a single opaque string. Adds a docker swarm join-token subcommand to inspect and rotate the tokens. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
c435045c30
commit
2cc5bd33ee
46 changed files with 451 additions and 893 deletions
|
@ -1,32 +0,0 @@
|
||||||
package node
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
|
||||||
"github.com/docker/docker/cli"
|
|
||||||
"github.com/docker/engine-api/types/swarm"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newAcceptCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|
||||||
return &cobra.Command{
|
|
||||||
Use: "accept NODE [NODE...]",
|
|
||||||
Short: "Accept a node in the swarm",
|
|
||||||
Args: cli.RequiresMinArgs(1),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runAccept(dockerCli, args)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAccept(dockerCli *client.DockerCli, nodes []string) error {
|
|
||||||
accept := func(node *swarm.Node) error {
|
|
||||||
node.Spec.Membership = swarm.NodeMembershipAccepted
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
success := func(nodeID string) {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Node %s accepted in the swarm.\n", nodeID)
|
|
||||||
}
|
|
||||||
return updateNodes(dockerCli, nodes, accept, success)
|
|
||||||
}
|
|
|
@ -23,7 +23,6 @@ func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
newAcceptCommand(dockerCli),
|
|
||||||
newDemoteCommand(dockerCli),
|
newDemoteCommand(dockerCli),
|
||||||
newInspectCommand(dockerCli),
|
newInspectCommand(dockerCli),
|
||||||
newListCommand(dockerCli),
|
newListCommand(dockerCli),
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\n"
|
listItemFmt = "%s\t%s\t%s\t%s\t%s\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
type listOptions struct {
|
type listOptions struct {
|
||||||
|
@ -74,11 +74,10 @@ func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
|
||||||
// Ignore flushing errors
|
// Ignore flushing errors
|
||||||
defer writer.Flush()
|
defer writer.Flush()
|
||||||
|
|
||||||
fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS")
|
fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS")
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
name := node.Description.Hostname
|
name := node.Description.Hostname
|
||||||
availability := string(node.Spec.Availability)
|
availability := string(node.Spec.Availability)
|
||||||
membership := string(node.Spec.Membership)
|
|
||||||
|
|
||||||
reachability := ""
|
reachability := ""
|
||||||
if node.ManagerStatus != nil {
|
if node.ManagerStatus != nil {
|
||||||
|
@ -99,7 +98,6 @@ func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
|
||||||
listItemFmt,
|
listItemFmt,
|
||||||
ID,
|
ID,
|
||||||
name,
|
name,
|
||||||
client.PrettyPrint(membership),
|
|
||||||
client.PrettyPrint(string(node.Status.State)),
|
client.PrettyPrint(string(node.Status.State)),
|
||||||
client.PrettyPrint(availability),
|
client.PrettyPrint(availability),
|
||||||
client.PrettyPrint(reachability))
|
client.PrettyPrint(reachability))
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
type nodeOptions struct {
|
type nodeOptions struct {
|
||||||
annotations
|
annotations
|
||||||
role string
|
role string
|
||||||
membership string
|
|
||||||
availability string
|
availability string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,14 +44,6 @@ func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) {
|
||||||
return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
|
return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch swarm.NodeMembership(strings.ToLower(opts.membership)) {
|
|
||||||
case swarm.NodeMembershipAccepted:
|
|
||||||
spec.Membership = swarm.NodeMembershipAccepted
|
|
||||||
case "":
|
|
||||||
default:
|
|
||||||
return swarm.NodeSpec{}, fmt.Errorf("invalid membership %q, only accepted is supported", opts.membership)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
|
switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
|
||||||
case swarm.NodeAvailabilityActive:
|
case swarm.NodeAvailabilityActive:
|
||||||
spec.Availability = swarm.NodeAvailabilityActive
|
spec.Availability = swarm.NodeAvailabilityActive
|
||||||
|
|
|
@ -27,7 +27,6 @@ func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)")
|
flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)")
|
||||||
flags.StringVar(&nodeOpts.membership, flagMembership, "", "Membership of the node (accepted/rejected)")
|
|
||||||
flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)")
|
flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)")
|
||||||
flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)")
|
flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)")
|
||||||
labelKeys := opts.NewListOpts(nil)
|
labelKeys := opts.NewListOpts(nil)
|
||||||
|
@ -76,13 +75,6 @@ func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error {
|
||||||
}
|
}
|
||||||
spec.Role = swarm.NodeRole(str)
|
spec.Role = swarm.NodeRole(str)
|
||||||
}
|
}
|
||||||
if flags.Changed(flagMembership) {
|
|
||||||
str, err := flags.GetString(flagMembership)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
spec.Membership = swarm.NodeMembership(str)
|
|
||||||
}
|
|
||||||
if flags.Changed(flagAvailability) {
|
if flags.Changed(flagAvailability) {
|
||||||
str, err := flags.GetString(flagAvailability)
|
str, err := flags.GetString(flagAvailability)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -115,7 +107,6 @@ func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
flagRole = "role"
|
flagRole = "role"
|
||||||
flagMembership = "membership"
|
|
||||||
flagAvailability = "availability"
|
flagAvailability = "availability"
|
||||||
flagLabelAdd = "label-add"
|
flagLabelAdd = "label-add"
|
||||||
flagLabelRemove = "label-rm"
|
flagLabelRemove = "label-rm"
|
||||||
|
|
|
@ -506,7 +506,7 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
|
||||||
|
|
||||||
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)")
|
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)")
|
||||||
|
|
||||||
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to Swarm agents")
|
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
|
||||||
|
|
||||||
flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
||||||
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options")
|
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options")
|
||||||
|
|
|
@ -25,6 +25,7 @@ func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
newUpdateCommand(dockerCli),
|
newUpdateCommand(dockerCli),
|
||||||
newLeaveCommand(dockerCli),
|
newLeaveCommand(dockerCli),
|
||||||
newInspectCommand(dockerCli),
|
newInspectCommand(dockerCli),
|
||||||
|
newJoinTokenCommand(dockerCli),
|
||||||
)
|
)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,14 +28,11 @@ type initOptions struct {
|
||||||
func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
|
func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
opts := initOptions{
|
opts := initOptions{
|
||||||
listenAddr: NewListenAddrOption(),
|
listenAddr: NewListenAddrOption(),
|
||||||
swarmOptions: swarmOptions{
|
|
||||||
autoAccept: NewAutoAcceptOption(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "init [OPTIONS]",
|
Use: "init [OPTIONS]",
|
||||||
Short: "Initialize a Swarm",
|
Short: "Initialize a swarm",
|
||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runInit(dockerCli, cmd.Flags(), opts)
|
return runInit(dockerCli, cmd.Flags(), opts)
|
||||||
|
@ -53,12 +50,6 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// If no secret was specified, we create a random one
|
|
||||||
if !flags.Changed("secret") {
|
|
||||||
opts.secret = generateRandomSecret()
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "No --secret provided. Generated random secret:\n %s\n\n", opts.secret)
|
|
||||||
}
|
|
||||||
|
|
||||||
req := swarm.InitRequest{
|
req := swarm.InitRequest{
|
||||||
ListenAddr: opts.listenAddr.String(),
|
ListenAddr: opts.listenAddr.String(),
|
||||||
ForceNewCluster: opts.forceNewCluster,
|
ForceNewCluster: opts.forceNewCluster,
|
||||||
|
@ -72,24 +63,5 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID)
|
fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID)
|
||||||
|
|
||||||
// Fetch CAHash and Address from the API
|
return printJoinCommand(ctx, dockerCli, nodeID, true, true)
|
||||||
info, err := client.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
node, _, err := client.NodeInspectWithRaw(ctx, nodeID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.ManagerStatus != nil && info.Swarm.CACertHash != "" {
|
|
||||||
var secretArgs string
|
|
||||||
if opts.secret != "" {
|
|
||||||
secretArgs = "--secret " + opts.secret
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n docker swarm join %s \\\n --ca-hash %s \\\n %s\n", secretArgs, info.Swarm.CACertHash, node.ManagerStatus.Addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "inspect [OPTIONS]",
|
Use: "inspect [OPTIONS]",
|
||||||
Short: "Inspect the Swarm",
|
Short: "Inspect the swarm",
|
||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runInspect(dockerCli, opts)
|
return runInspect(dockerCli, opts)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package swarm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
"github.com/docker/docker/api/client"
|
||||||
"github.com/docker/docker/cli"
|
"github.com/docker/docker/cli"
|
||||||
|
@ -13,9 +14,7 @@ import (
|
||||||
type joinOptions struct {
|
type joinOptions struct {
|
||||||
remote string
|
remote string
|
||||||
listenAddr NodeAddrOption
|
listenAddr NodeAddrOption
|
||||||
manager bool
|
token string
|
||||||
secret string
|
|
||||||
CACertHash string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
@ -25,7 +24,7 @@ func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "join [OPTIONS] HOST:PORT",
|
Use: "join [OPTIONS] HOST:PORT",
|
||||||
Short: "Join a Swarm as a node and/or manager",
|
Short: "Join a swarm as a node and/or manager",
|
||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
opts.remote = args[0]
|
opts.remote = args[0]
|
||||||
|
@ -35,9 +34,7 @@ func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address")
|
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address")
|
||||||
flags.BoolVar(&opts.manager, "manager", false, "Try joining as a manager.")
|
flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm")
|
||||||
flags.StringVar(&opts.secret, flagSecret, "", "Secret for node acceptance")
|
|
||||||
flags.StringVar(&opts.CACertHash, "ca-hash", "", "Hash of the Root Certificate Authority certificate used for trusted join")
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,20 +43,29 @@ func runJoin(dockerCli *client.DockerCli, opts joinOptions) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
req := swarm.JoinRequest{
|
req := swarm.JoinRequest{
|
||||||
Manager: opts.manager,
|
JoinToken: opts.token,
|
||||||
Secret: opts.secret,
|
|
||||||
ListenAddr: opts.listenAddr.String(),
|
ListenAddr: opts.listenAddr.String(),
|
||||||
RemoteAddrs: []string{opts.remote},
|
RemoteAddrs: []string{opts.remote},
|
||||||
CACertHash: opts.CACertHash,
|
|
||||||
}
|
}
|
||||||
err := client.SwarmJoin(ctx, req)
|
err := client.SwarmJoin(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if opts.manager {
|
|
||||||
fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a manager.")
|
info, err := client.Info(ctx)
|
||||||
} else {
|
if err != nil {
|
||||||
fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a worker.")
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, _, err = client.NodeInspectWithRaw(ctx, info.Swarm.NodeID)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(aaronl): is there a better way to do this?
|
||||||
|
if strings.Contains(err.Error(), "This node is not a swarm manager.") {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
110
api/client/swarm/join_token.go
Normal file
110
api/client/swarm/join_token.go
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/client"
|
||||||
|
"github.com/docker/docker/cli"
|
||||||
|
"github.com/docker/engine-api/types/swarm"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
flagRotate = "rotate"
|
||||||
|
flagQuiet = "quiet"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newJoinTokenCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
var rotate, quiet bool
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "join-token [-q] [--rotate] (worker|manager)",
|
||||||
|
Short: "Manage join tokens",
|
||||||
|
Args: cli.ExactArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if args[0] != "worker" && args[0] != "manager" {
|
||||||
|
return errors.New("unknown role " + args[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
client := dockerCli.Client()
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if rotate {
|
||||||
|
var flags swarm.UpdateFlags
|
||||||
|
|
||||||
|
swarm, err := client.SwarmInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[0] == "worker" {
|
||||||
|
flags.RotateWorkerToken = true
|
||||||
|
} else if args[0] == "manager" {
|
||||||
|
flags.RotateManagerToken = true
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
swarm, err := client.SwarmInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if quiet {
|
||||||
|
if args[0] == "worker" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker)
|
||||||
|
} else if args[0] == "manager" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info, err := client.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, args[0] == "worker", args[0] == "manager")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&rotate, flagRotate, false, "Rotate join token")
|
||||||
|
flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func printJoinCommand(ctx context.Context, dockerCli *client.DockerCli, nodeID string, worker bool, manager bool) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
|
||||||
|
swarm, err := client.SwarmInspect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
node, _, err := client.NodeInspectWithRaw(ctx, nodeID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.ManagerStatus != nil {
|
||||||
|
if worker {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n docker swarm join \\\n --token %s \\\n %s\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr)
|
||||||
|
}
|
||||||
|
if manager {
|
||||||
|
if worker {
|
||||||
|
fmt.Fprintln(dockerCli.Out())
|
||||||
|
}
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n docker swarm join \\\n --token %s \\\n %s\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -19,7 +19,7 @@ func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "leave [OPTIONS]",
|
Use: "leave [OPTIONS]",
|
||||||
Short: "Leave a Swarm",
|
Short: "Leave a swarm",
|
||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runLeave(dockerCli, opts)
|
return runLeave(dockerCli, opts)
|
||||||
|
|
|
@ -15,29 +15,15 @@ import (
|
||||||
const (
|
const (
|
||||||
defaultListenAddr = "0.0.0.0:2377"
|
defaultListenAddr = "0.0.0.0:2377"
|
||||||
|
|
||||||
worker = "WORKER"
|
|
||||||
manager = "MANAGER"
|
|
||||||
none = "NONE"
|
|
||||||
|
|
||||||
flagAutoAccept = "auto-accept"
|
|
||||||
flagCertExpiry = "cert-expiry"
|
flagCertExpiry = "cert-expiry"
|
||||||
flagDispatcherHeartbeat = "dispatcher-heartbeat"
|
flagDispatcherHeartbeat = "dispatcher-heartbeat"
|
||||||
flagListenAddr = "listen-addr"
|
flagListenAddr = "listen-addr"
|
||||||
flagSecret = "secret"
|
flagToken = "token"
|
||||||
flagTaskHistoryLimit = "task-history-limit"
|
flagTaskHistoryLimit = "task-history-limit"
|
||||||
flagExternalCA = "external-ca"
|
flagExternalCA = "external-ca"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
defaultPolicies = []swarm.Policy{
|
|
||||||
{Role: worker, Autoaccept: true},
|
|
||||||
{Role: manager, Autoaccept: false},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type swarmOptions struct {
|
type swarmOptions struct {
|
||||||
autoAccept AutoAcceptOption
|
|
||||||
secret string
|
|
||||||
taskHistoryLimit int64
|
taskHistoryLimit int64
|
||||||
dispatcherHeartbeat time.Duration
|
dispatcherHeartbeat time.Duration
|
||||||
nodeCertExpiry time.Duration
|
nodeCertExpiry time.Duration
|
||||||
|
@ -84,71 +70,6 @@ func NewListenAddrOption() NodeAddrOption {
|
||||||
return NewNodeAddrOption(defaultListenAddr)
|
return NewNodeAddrOption(defaultListenAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoAcceptOption is a value type for auto-accept policy
|
|
||||||
type AutoAcceptOption struct {
|
|
||||||
values map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String prints a string representation of this option
|
|
||||||
func (o *AutoAcceptOption) String() string {
|
|
||||||
keys := []string{}
|
|
||||||
for key := range o.values {
|
|
||||||
keys = append(keys, fmt.Sprintf("%s=true", strings.ToLower(key)))
|
|
||||||
}
|
|
||||||
return strings.Join(keys, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets a new value on this option
|
|
||||||
func (o *AutoAcceptOption) Set(acceptValues string) error {
|
|
||||||
for _, value := range strings.Split(acceptValues, ",") {
|
|
||||||
value = strings.ToUpper(value)
|
|
||||||
switch value {
|
|
||||||
case none, worker, manager:
|
|
||||||
o.values[value] = struct{}{}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("must be one / combination of %s, %s; or NONE", worker, manager)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// NONE must stand alone, so if any non-NONE setting exist with it, error with conflict
|
|
||||||
if o.isPresent(none) && len(o.values) > 1 {
|
|
||||||
return fmt.Errorf("value NONE cannot be specified alongside other node types")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (o *AutoAcceptOption) Type() string {
|
|
||||||
return "auto-accept"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Policies returns a representation of this option for the api
|
|
||||||
func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy {
|
|
||||||
policies := []swarm.Policy{}
|
|
||||||
for _, p := range defaultPolicies {
|
|
||||||
if len(o.values) != 0 {
|
|
||||||
if _, ok := o.values[string(p.Role)]; ok {
|
|
||||||
p.Autoaccept = true
|
|
||||||
} else {
|
|
||||||
p.Autoaccept = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.Secret = secret
|
|
||||||
policies = append(policies, p)
|
|
||||||
}
|
|
||||||
return policies
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPresent returns whether the key exists in the set or not
|
|
||||||
func (o *AutoAcceptOption) isPresent(key string) bool {
|
|
||||||
_, c := o.values[key]
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAutoAcceptOption returns a new auto-accept option
|
|
||||||
func NewAutoAcceptOption() AutoAcceptOption {
|
|
||||||
return AutoAcceptOption{values: make(map[string]struct{})}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalCAOption is a Value type for parsing external CA specifications.
|
// ExternalCAOption is a Value type for parsing external CA specifications.
|
||||||
type ExternalCAOption struct {
|
type ExternalCAOption struct {
|
||||||
values []*swarm.ExternalCA
|
values []*swarm.ExternalCA
|
||||||
|
@ -239,8 +160,6 @@ func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
||||||
flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)")
|
|
||||||
flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to join a cluster")
|
|
||||||
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit")
|
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit")
|
||||||
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period")
|
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period")
|
||||||
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates")
|
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates")
|
||||||
|
@ -249,11 +168,6 @@ func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
||||||
|
|
||||||
func (opts *swarmOptions) ToSpec() swarm.Spec {
|
func (opts *swarmOptions) ToSpec() swarm.Spec {
|
||||||
spec := swarm.Spec{}
|
spec := swarm.Spec{}
|
||||||
if opts.secret != "" {
|
|
||||||
spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(&opts.secret)
|
|
||||||
} else {
|
|
||||||
spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(nil)
|
|
||||||
}
|
|
||||||
spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit
|
spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit
|
||||||
spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds())
|
spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds())
|
||||||
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
|
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/testutil/assert"
|
"github.com/docker/docker/pkg/testutil/assert"
|
||||||
"github.com/docker/engine-api/types/swarm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNodeAddrOptionSetHostAndPort(t *testing.T) {
|
func TestNodeAddrOptionSetHostAndPort(t *testing.T) {
|
||||||
|
@ -36,101 +35,3 @@ func TestNodeAddrOptionSetInvalidFormat(t *testing.T) {
|
||||||
opt := NewListenAddrOption()
|
opt := NewListenAddrOption()
|
||||||
assert.Error(t, opt.Set("http://localhost:4545"), "Invalid")
|
assert.Error(t, opt.Set("http://localhost:4545"), "Invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetWorker(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.NilError(t, opt.Set("worker"))
|
|
||||||
assert.Equal(t, opt.isPresent(worker), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetManager(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.NilError(t, opt.Set("manager"))
|
|
||||||
assert.Equal(t, opt.isPresent(manager), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetInvalid(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set("bogus"), "must be one / combination")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetEmpty(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set(""), "must be one / combination")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetNone(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.NilError(t, opt.Set("none"))
|
|
||||||
assert.Equal(t, opt.isPresent(manager), false)
|
|
||||||
assert.Equal(t, opt.isPresent(worker), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetTwo(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.NilError(t, opt.Set("worker,manager"))
|
|
||||||
assert.Equal(t, opt.isPresent(manager), true)
|
|
||||||
assert.Equal(t, opt.isPresent(worker), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetConflict(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set("none,manager"), "value NONE cannot be specified alongside other node types")
|
|
||||||
|
|
||||||
opt = NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set("none,worker"), "value NONE cannot be specified alongside other node types")
|
|
||||||
|
|
||||||
opt = NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set("worker,none,manager"), "value NONE cannot be specified alongside other node types")
|
|
||||||
|
|
||||||
opt = NewAutoAcceptOption()
|
|
||||||
assert.Error(t, opt.Set("worker,manager,none"), "value NONE cannot be specified alongside other node types")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionPoliciesDefault(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
secret := "thesecret"
|
|
||||||
|
|
||||||
policies := opt.Policies(&secret)
|
|
||||||
assert.Equal(t, len(policies), 2)
|
|
||||||
assert.Equal(t, policies[0], swarm.Policy{
|
|
||||||
Role: worker,
|
|
||||||
Autoaccept: true,
|
|
||||||
Secret: &secret,
|
|
||||||
})
|
|
||||||
assert.Equal(t, policies[1], swarm.Policy{
|
|
||||||
Role: manager,
|
|
||||||
Autoaccept: false,
|
|
||||||
Secret: &secret,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionPoliciesWithManager(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
secret := "thesecret"
|
|
||||||
|
|
||||||
assert.NilError(t, opt.Set("manager"))
|
|
||||||
|
|
||||||
policies := opt.Policies(&secret)
|
|
||||||
assert.Equal(t, len(policies), 2)
|
|
||||||
assert.Equal(t, policies[0], swarm.Policy{
|
|
||||||
Role: worker,
|
|
||||||
Autoaccept: false,
|
|
||||||
Secret: &secret,
|
|
||||||
})
|
|
||||||
assert.Equal(t, policies[1], swarm.Policy{
|
|
||||||
Role: manager,
|
|
||||||
Autoaccept: true,
|
|
||||||
Secret: &secret,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoAcceptOptionString(t *testing.T) {
|
|
||||||
opt := NewAutoAcceptOption()
|
|
||||||
assert.NilError(t, opt.Set("manager"))
|
|
||||||
assert.NilError(t, opt.Set("worker"))
|
|
||||||
|
|
||||||
repr := opt.String()
|
|
||||||
assert.Contains(t, repr, "worker=true")
|
|
||||||
assert.Contains(t, repr, "manager=true")
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import (
|
|
||||||
cryptorand "crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
func generateRandomSecret() string {
|
|
||||||
var secretBytes [generatedSecretEntropyBytes]byte
|
|
||||||
|
|
||||||
if _, err := cryptorand.Read(secretBytes[:]); err != nil {
|
|
||||||
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
var nn big.Int
|
|
||||||
nn.SetBytes(secretBytes[:])
|
|
||||||
return fmt.Sprintf("%0[1]*s", maxGeneratedSecretLength, nn.Text(generatedSecretBase))
|
|
||||||
}
|
|
|
@ -13,11 +13,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
opts := swarmOptions{autoAccept: NewAutoAcceptOption()}
|
opts := swarmOptions{}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "update [OPTIONS]",
|
Use: "update [OPTIONS]",
|
||||||
Short: "Update the Swarm",
|
Short: "Update the swarm",
|
||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runUpdate(dockerCli, cmd.Flags(), opts)
|
return runUpdate(dockerCli, cmd.Flags(), opts)
|
||||||
|
@ -32,6 +32,8 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var updateFlags swarm.UpdateFlags
|
||||||
|
|
||||||
swarm, err := client.SwarmInspect(ctx)
|
swarm, err := client.SwarmInspect(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -42,7 +44,7 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec)
|
err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -55,21 +57,6 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti
|
||||||
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
|
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
|
||||||
spec := &swarm.Spec
|
spec := &swarm.Spec
|
||||||
|
|
||||||
if flags.Changed(flagAutoAccept) {
|
|
||||||
value := flags.Lookup(flagAutoAccept).Value.(*AutoAcceptOption)
|
|
||||||
spec.AcceptancePolicy.Policies = value.Policies(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var psecret *string
|
|
||||||
if flags.Changed(flagSecret) {
|
|
||||||
secret, _ := flags.GetString(flagSecret)
|
|
||||||
psecret = &secret
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range spec.AcceptancePolicy.Policies {
|
|
||||||
spec.AcceptancePolicy.Policies[i].Secret = psecret
|
|
||||||
}
|
|
||||||
|
|
||||||
if flags.Changed(flagTaskHistoryLimit) {
|
if flags.Changed(flagTaskHistoryLimit) {
|
||||||
spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit)
|
spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit)
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,7 +85,6 @@ func runInfo(dockerCli *client.DockerCli) error {
|
||||||
if info.Swarm.ControlAvailable {
|
if info.Swarm.ControlAvailable {
|
||||||
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
|
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
|
||||||
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
|
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
|
||||||
ioutils.FprintfIfNotEmpty(dockerCli.Out(), " CA Certificate Hash: %s\n", info.Swarm.CACertHash)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ type Backend interface {
|
||||||
Join(req types.JoinRequest) error
|
Join(req types.JoinRequest) error
|
||||||
Leave(force bool) error
|
Leave(force bool) error
|
||||||
Inspect() (types.Swarm, error)
|
Inspect() (types.Swarm, error)
|
||||||
Update(uint64, types.Spec) error
|
Update(uint64, types.Spec, types.UpdateFlags) error
|
||||||
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
|
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
|
||||||
GetService(string) (types.Service, error)
|
GetService(string) (types.Service, error)
|
||||||
CreateService(types.ServiceSpec, string) (string, error)
|
CreateService(types.ServiceSpec, string) (string, error)
|
||||||
|
|
|
@ -66,7 +66,15 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
||||||
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
|
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sr.backend.Update(version, swarm); err != nil {
|
var flags types.UpdateFlags
|
||||||
|
if r.URL.Query().Get("rotate_worker_token") == "true" {
|
||||||
|
flags.RotateWorkerToken = true
|
||||||
|
}
|
||||||
|
if r.URL.Query().Get("rotate_manager_token") == "true" {
|
||||||
|
flags.RotateManagerToken = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sr.backend.Update(version, swarm, flags); err != nil {
|
||||||
logrus.Errorf("Error configuring swarm: %v", err)
|
logrus.Errorf("Error configuring swarm: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1087,7 +1087,7 @@ __docker_service_subcommand() {
|
||||||
"($help)--name=[Service name]:name: "
|
"($help)--name=[Service name]:name: "
|
||||||
"($help)*--network=[Network attachments]:network: "
|
"($help)*--network=[Network attachments]:network: "
|
||||||
"($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: "
|
"($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: "
|
||||||
"($help)--registry-auth[Send registry authentication details to Swarm agents]"
|
"($help)--registry-auth[Send registry authentication details to swarm agents]"
|
||||||
"($help)--replicas=[Number of tasks]:replicas: "
|
"($help)--replicas=[Number of tasks]:replicas: "
|
||||||
"($help)--reserve-cpu=[Reserve CPUs]:value: "
|
"($help)--reserve-cpu=[Reserve CPUs]:value: "
|
||||||
"($help)--reserve-memory=[Reserve Memory]:value: "
|
"($help)--reserve-memory=[Reserve Memory]:value: "
|
||||||
|
@ -1185,11 +1185,11 @@ __docker_service_subcommand() {
|
||||||
__docker_swarm_commands() {
|
__docker_swarm_commands() {
|
||||||
local -a _docker_swarm_subcommands
|
local -a _docker_swarm_subcommands
|
||||||
_docker_swarm_subcommands=(
|
_docker_swarm_subcommands=(
|
||||||
"init:Initialize a Swarm"
|
"init:Initialize a swarm"
|
||||||
"inspect:Inspect the Swarm"
|
"inspect:Inspect the swarm"
|
||||||
"join:Join a Swarm as a node and/or manager"
|
"join:Join a swarm as a node and/or manager"
|
||||||
"leave:Leave a Swarm"
|
"leave:Leave a swarm"
|
||||||
"update:Update the Swarm"
|
"update:Update the swarm"
|
||||||
)
|
)
|
||||||
_describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands
|
_describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
"github.com/docker/docker/daemon/cluster/convert"
|
"github.com/docker/docker/daemon/cluster/convert"
|
||||||
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
||||||
"github.com/docker/docker/daemon/cluster/executor/container"
|
"github.com/docker/docker/daemon/cluster/executor/container"
|
||||||
|
@ -42,16 +41,16 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNoSwarm is returned on leaving a cluster that was never initialized
|
// ErrNoSwarm is returned on leaving a cluster that was never initialized
|
||||||
var ErrNoSwarm = fmt.Errorf("This node is not part of Swarm")
|
var ErrNoSwarm = fmt.Errorf("This node is not part of swarm")
|
||||||
|
|
||||||
// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
|
// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
|
||||||
var ErrSwarmExists = fmt.Errorf("This node is already part of a Swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.")
|
var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.")
|
||||||
|
|
||||||
// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet.
|
// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet.
|
||||||
var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
|
var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
|
||||||
|
|
||||||
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
|
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
|
||||||
var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current Swarm status of your node.")
|
var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current swarm status of your node.")
|
||||||
|
|
||||||
// defaultSpec contains some sane defaults if cluster options are missing on init
|
// defaultSpec contains some sane defaults if cluster options are missing on init
|
||||||
var defaultSpec = types.Spec{
|
var defaultSpec = types.Spec{
|
||||||
|
@ -127,7 +126,7 @@ func New(config Config) (*Cluster, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := c.startNewNode(false, st.ListenAddr, "", "", "", false)
|
n, err := c.startNewNode(false, st.ListenAddr, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -196,7 +195,7 @@ func (c *Cluster) reconnectOnFailure(n *node) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
|
n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.err = err
|
c.err = err
|
||||||
close(n.done)
|
close(n.done)
|
||||||
|
@ -205,7 +204,7 @@ func (c *Cluster) reconnectOnFailure(n *node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*node, error) {
|
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, joinToken string) (*node, error) {
|
||||||
if err := c.config.Backend.IsSwarmCompatible(); err != nil {
|
if err := c.config.Backend.IsSwarmCompatible(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -219,12 +218,10 @@ func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secre
|
||||||
ListenRemoteAPI: listenAddr,
|
ListenRemoteAPI: listenAddr,
|
||||||
JoinAddr: joinAddr,
|
JoinAddr: joinAddr,
|
||||||
StateDir: c.root,
|
StateDir: c.root,
|
||||||
CAHash: cahash,
|
JoinToken: joinToken,
|
||||||
Secret: secret,
|
|
||||||
Executor: container.NewExecutor(c.config.Backend),
|
Executor: container.NewExecutor(c.config.Backend),
|
||||||
HeartbeatTick: 1,
|
HeartbeatTick: 1,
|
||||||
ElectionTick: 3,
|
ElectionTick: 3,
|
||||||
IsManager: ismanager,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -291,7 +288,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
||||||
if node := c.node; node != nil {
|
if node := c.node; node != nil {
|
||||||
if !req.ForceNewCluster {
|
if !req.ForceNewCluster {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return "", errSwarmExists(node)
|
return "", ErrSwarmExists
|
||||||
}
|
}
|
||||||
if err := c.stopNode(); err != nil {
|
if err := c.stopNode(); err != nil {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
|
@ -305,7 +302,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: check current state existing
|
// todo: check current state existing
|
||||||
n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false)
|
n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -336,28 +333,21 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
||||||
c.Lock()
|
c.Lock()
|
||||||
if node := c.node; node != nil {
|
if node := c.node; node != nil {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return errSwarmExists(node)
|
return ErrSwarmExists
|
||||||
}
|
}
|
||||||
if err := validateAndSanitizeJoinRequest(&req); err != nil {
|
if err := validateAndSanitizeJoinRequest(&req); err != nil {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// todo: check current state existing
|
// todo: check current state existing
|
||||||
n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager)
|
n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.JoinToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
|
|
||||||
certificateRequested := n.CertificateRequested()
|
|
||||||
for {
|
|
||||||
select {
|
select {
|
||||||
case <-certificateRequested:
|
|
||||||
if n.NodeMembership() == swarmapi.NodeMembershipPending {
|
|
||||||
return fmt.Errorf("Your node is in the process of joining the cluster but needs to be accepted by existing cluster member.\nTo accept this node into cluster run \"docker node accept %v\" in an existing cluster manager. Use \"docker info\" command to see the current Swarm status of your node.", n.NodeID())
|
|
||||||
}
|
|
||||||
certificateRequested = nil
|
|
||||||
case <-time.After(swarmConnectTimeout):
|
case <-time.After(swarmConnectTimeout):
|
||||||
// attempt to connect will continue in background, also reconnecting
|
// attempt to connect will continue in background, also reconnecting
|
||||||
go c.reconnectOnFailure(n)
|
go c.reconnectOnFailure(n)
|
||||||
|
@ -370,7 +360,6 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
||||||
defer c.RUnlock()
|
defer c.RUnlock()
|
||||||
return c.err
|
return c.err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// stopNode is a helper that stops the active c.node and waits until it has
|
// stopNode is a helper that stops the active c.node and waits until it has
|
||||||
|
@ -489,7 +478,7 @@ func (c *Cluster) Inspect() (types.Swarm, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates configuration of a managed swarm cluster.
|
// Update updates configuration of a managed swarm cluster.
|
||||||
func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
|
||||||
c.RLock()
|
c.RLock()
|
||||||
defer c.RUnlock()
|
defer c.RUnlock()
|
||||||
|
|
||||||
|
@ -505,7 +494,7 @@ func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
swarmSpec, err := convert.SwarmSpecToGRPCandMerge(spec, &swarm.Spec)
|
swarmSpec, err := convert.SwarmSpecToGRPC(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -518,6 +507,10 @@ func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
||||||
ClusterVersion: &swarmapi.Version{
|
ClusterVersion: &swarmapi.Version{
|
||||||
Index: version,
|
Index: version,
|
||||||
},
|
},
|
||||||
|
Rotation: swarmapi.JoinTokenRotation{
|
||||||
|
RotateWorkerToken: flags.RotateWorkerToken,
|
||||||
|
RotateManagerToken: flags.RotateManagerToken,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
return err
|
return err
|
||||||
|
@ -611,10 +604,6 @@ func (c *Cluster) Info() types.Info {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if swarm, err := getSwarm(ctx, c.client); err == nil && swarm != nil {
|
|
||||||
info.CACertHash = swarm.RootCA.CACertHash
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.node != nil {
|
if c.node != nil {
|
||||||
|
@ -636,12 +625,12 @@ func (c *Cluster) isActiveManager() bool {
|
||||||
// Call with read lock.
|
// Call with read lock.
|
||||||
func (c *Cluster) errNoManager() error {
|
func (c *Cluster) errNoManager() error {
|
||||||
if c.node == nil {
|
if c.node == nil {
|
||||||
return fmt.Errorf("This node is not a Swarm manager. Use \"docker swarm init\" or \"docker swarm join --manager\" to connect this node to Swarm and try again.")
|
return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join --manager\" to connect this node to swarm and try again.")
|
||||||
}
|
}
|
||||||
if c.node.Manager() != nil {
|
if c.node.Manager() != nil {
|
||||||
return fmt.Errorf("This node is not a Swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
|
return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
|
||||||
}
|
}
|
||||||
return fmt.Errorf("This node is not a Swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
|
return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServices returns all services of a managed swarm cluster.
|
// GetServices returns all services of a managed swarm cluster.
|
||||||
|
@ -1219,11 +1208,6 @@ func validateAndSanitizeJoinRequest(req *types.JoinRequest) error {
|
||||||
return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
|
return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if req.CACertHash != "" {
|
|
||||||
if _, err := digest.ParseDigest(req.CACertHash); err != nil {
|
|
||||||
return fmt.Errorf("invalid CACertHash %q, %v", req.CACertHash, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1238,13 +1222,6 @@ func validateAddr(addr string) (string, error) {
|
||||||
return strings.TrimPrefix(newaddr, "tcp://"), nil
|
return strings.TrimPrefix(newaddr, "tcp://"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func errSwarmExists(node *node) error {
|
|
||||||
if node.NodeMembership() != swarmapi.NodeMembershipAccepted {
|
|
||||||
return ErrPendingSwarmExists
|
|
||||||
}
|
|
||||||
return ErrSwarmExists
|
|
||||||
}
|
|
||||||
|
|
||||||
func initClusterSpec(node *node, spec types.Spec) error {
|
func initClusterSpec(node *node, spec types.Spec) error {
|
||||||
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
for conn := range node.ListenControlSocket(ctx) {
|
for conn := range node.ListenControlSocket(ctx) {
|
||||||
|
@ -1269,7 +1246,7 @@ func initClusterSpec(node *node, spec types.Spec) error {
|
||||||
cluster = lcr.Clusters[0]
|
cluster = lcr.Clusters[0]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
newspec, err := convert.SwarmSpecToGRPCandMerge(spec, &cluster.Spec)
|
newspec, err := convert.SwarmSpecToGRPC(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error updating cluster settings: %v", err)
|
return fmt.Errorf("error updating cluster settings: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
|
||||||
ID: n.ID,
|
ID: n.ID,
|
||||||
Spec: types.NodeSpec{
|
Spec: types.NodeSpec{
|
||||||
Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())),
|
Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())),
|
||||||
Membership: types.NodeMembership(strings.ToLower(n.Spec.Membership.String())),
|
|
||||||
Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
|
Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
|
||||||
},
|
},
|
||||||
Status: types.NodeStatus{
|
Status: types.NodeStatus{
|
||||||
|
@ -79,12 +78,6 @@ func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
|
||||||
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
|
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
|
||||||
}
|
}
|
||||||
|
|
||||||
if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok {
|
|
||||||
spec.Membership = swarmapi.NodeSpec_Membership(membership)
|
|
||||||
} else {
|
|
||||||
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership)
|
|
||||||
}
|
|
||||||
|
|
||||||
if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
|
if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
|
||||||
spec.Availability = swarmapi.NodeSpec_Availability(availability)
|
spec.Availability = swarmapi.NodeSpec_Availability(availability)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/bcrypt"
|
|
||||||
|
|
||||||
types "github.com/docker/engine-api/types/swarm"
|
types "github.com/docker/engine-api/types/swarm"
|
||||||
swarmapi "github.com/docker/swarmkit/api"
|
swarmapi "github.com/docker/swarmkit/api"
|
||||||
"github.com/docker/swarmkit/protobuf/ptypes"
|
"github.com/docker/swarmkit/protobuf/ptypes"
|
||||||
|
@ -28,6 +26,10 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
|
||||||
ElectionTick: c.Spec.Raft.ElectionTick,
|
ElectionTick: c.Spec.Raft.ElectionTick,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
JoinTokens: types.JoinTokens{
|
||||||
|
Worker: c.RootCA.JoinTokens.Worker,
|
||||||
|
Manager: c.RootCA.JoinTokens.Manager,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod)
|
heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod)
|
||||||
|
@ -52,23 +54,11 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
|
||||||
swarm.Spec.Name = c.Spec.Annotations.Name
|
swarm.Spec.Name = c.Spec.Annotations.Name
|
||||||
swarm.Spec.Labels = c.Spec.Annotations.Labels
|
swarm.Spec.Labels = c.Spec.Annotations.Labels
|
||||||
|
|
||||||
for _, policy := range c.Spec.AcceptancePolicy.Policies {
|
|
||||||
p := types.Policy{
|
|
||||||
Role: types.NodeRole(strings.ToLower(policy.Role.String())),
|
|
||||||
Autoaccept: policy.Autoaccept,
|
|
||||||
}
|
|
||||||
if policy.Secret != nil {
|
|
||||||
secret := string(policy.Secret.Data)
|
|
||||||
p.Secret = &secret
|
|
||||||
}
|
|
||||||
swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return swarm
|
return swarm
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided.
|
// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
|
||||||
func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
|
func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
|
||||||
spec := swarmapi.ClusterSpec{
|
spec := swarmapi.ClusterSpec{
|
||||||
Annotations: swarmapi.Annotations{
|
Annotations: swarmapi.Annotations{
|
||||||
Name: s.Name,
|
Name: s.Name,
|
||||||
|
@ -104,63 +94,5 @@ func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
|
|
||||||
return swarmapi.ClusterSpec{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy.
|
|
||||||
func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy, oldSpec *swarmapi.ClusterSpec) error {
|
|
||||||
spec.AcceptancePolicy.Policies = nil
|
|
||||||
hashs := make(map[string][]byte)
|
|
||||||
|
|
||||||
for _, p := range acceptancePolicy.Policies {
|
|
||||||
role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid Role: %q", p.Role)
|
|
||||||
}
|
|
||||||
|
|
||||||
policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{
|
|
||||||
Role: swarmapi.NodeRole(role),
|
|
||||||
Autoaccept: p.Autoaccept,
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Secret != nil {
|
|
||||||
if *p.Secret == "" { // if provided secret is empty, it means erase previous secret.
|
|
||||||
policy.Secret = nil
|
|
||||||
} else { // if provided secret is not empty, we generate a new one.
|
|
||||||
hashPwd, ok := hashs[*p.Secret]
|
|
||||||
if !ok {
|
|
||||||
hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0)
|
|
||||||
hashs[*p.Secret] = hashPwd
|
|
||||||
}
|
|
||||||
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
|
||||||
Data: hashPwd,
|
|
||||||
Alg: "bcrypt",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one.
|
|
||||||
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
|
||||||
Data: oldSecret.Data,
|
|
||||||
Alg: oldSecret.Alg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret {
|
|
||||||
if oldSpec == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, p := range oldSpec.AcceptancePolicy.Policies {
|
|
||||||
if p.Role == role {
|
|
||||||
return p.Secret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -3351,7 +3351,6 @@ List nodes
|
||||||
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"Role": "MANAGER",
|
"Role": "MANAGER",
|
||||||
"Membership": "ACCEPTED",
|
|
||||||
"Availability": "ACTIVE"
|
"Availability": "ACTIVE"
|
||||||
},
|
},
|
||||||
"Description": {
|
"Description": {
|
||||||
|
@ -3481,7 +3480,6 @@ Return low-level information on the node `id`
|
||||||
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"Role": "MANAGER",
|
"Role": "MANAGER",
|
||||||
"Membership": "ACCEPTED",
|
|
||||||
"Availability": "ACTIVE"
|
"Availability": "ACTIVE"
|
||||||
},
|
},
|
||||||
"Description": {
|
"Description": {
|
||||||
|
@ -3595,18 +3593,6 @@ Initialize a new Swarm
|
||||||
"ListenAddr": "0.0.0.0:4500",
|
"ListenAddr": "0.0.0.0:4500",
|
||||||
"ForceNewCluster": false,
|
"ForceNewCluster": false,
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"AcceptancePolicy": {
|
|
||||||
"Policies": [
|
|
||||||
{
|
|
||||||
"Role": "MANAGER",
|
|
||||||
"Autoaccept": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Role": "WORKER",
|
|
||||||
"Autoaccept": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Orchestration": {},
|
"Orchestration": {},
|
||||||
"Raft": {},
|
"Raft": {},
|
||||||
"Dispatcher": {},
|
"Dispatcher": {},
|
||||||
|
@ -3676,9 +3662,7 @@ Join an existing new Swarm
|
||||||
{
|
{
|
||||||
"ListenAddr": "0.0.0.0:4500",
|
"ListenAddr": "0.0.0.0:4500",
|
||||||
"RemoteAddrs": ["node1:4500"],
|
"RemoteAddrs": ["node1:4500"],
|
||||||
"Secret": "",
|
"JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
|
||||||
"CACertHash": "",
|
|
||||||
"Manager": false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
**Example response**:
|
**Example response**:
|
||||||
|
@ -3698,9 +3682,7 @@ JSON Parameters:
|
||||||
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to
|
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to
|
||||||
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
|
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
|
||||||
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join.
|
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join.
|
||||||
- **Secret** – Secret token for joining this Swarm.
|
- **JoinToken** – Secret token for joining this Swarm.
|
||||||
- **CACertHash** – Optional hash of the root CA to avoid relying on trust on first use.
|
|
||||||
- **Manager** – Directly join as a manager (only for a Swarm configured to autoaccept managers).
|
|
||||||
|
|
||||||
### Leave a Swarm
|
### Leave a Swarm
|
||||||
|
|
||||||
|
@ -3741,18 +3723,6 @@ Update a Swarm
|
||||||
|
|
||||||
{
|
{
|
||||||
"Name": "default",
|
"Name": "default",
|
||||||
"AcceptancePolicy": {
|
|
||||||
"Policies": [
|
|
||||||
{
|
|
||||||
"Role": "WORKER",
|
|
||||||
"Autoaccept": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Role": "MANAGER",
|
|
||||||
"Autoaccept": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Orchestration": {
|
"Orchestration": {
|
||||||
"TaskHistoryRetentionLimit": 10
|
"TaskHistoryRetentionLimit": 10
|
||||||
},
|
},
|
||||||
|
@ -3767,6 +3737,10 @@ Update a Swarm
|
||||||
},
|
},
|
||||||
"CAConfig": {
|
"CAConfig": {
|
||||||
"NodeCertExpiry": 7776000000000000
|
"NodeCertExpiry": 7776000000000000
|
||||||
|
},
|
||||||
|
"JoinTokens": {
|
||||||
|
"Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx",
|
||||||
|
"Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3777,6 +3751,13 @@ Update a Swarm
|
||||||
Content-Length: 0
|
Content-Length: 0
|
||||||
Content-Type: text/plain; charset=utf-8
|
Content-Type: text/plain; charset=utf-8
|
||||||
|
|
||||||
|
**Query parameters**:
|
||||||
|
|
||||||
|
- **version** – The version number of the swarm object being updated. This is
|
||||||
|
required to avoid conflicting writes.
|
||||||
|
- **rotate_worker_token** - Set to `true` to rotate the worker join token.
|
||||||
|
- **rotate_manager_token** - Set to `true` to rotate the manager join token.
|
||||||
|
|
||||||
**Status codes**:
|
**Status codes**:
|
||||||
|
|
||||||
- **200** – no error
|
- **200** – no error
|
||||||
|
@ -3785,11 +3766,6 @@ Update a Swarm
|
||||||
|
|
||||||
JSON Parameters:
|
JSON Parameters:
|
||||||
|
|
||||||
- **Policies** – An array of acceptance policies.
|
|
||||||
- **Role** – The role that policy applies to (`MANAGER` or `WORKER`)
|
|
||||||
- **Autoaccept** – A boolean indicating whether nodes joining for that role should be
|
|
||||||
automatically accepted in the Swarm.
|
|
||||||
- **Secret** – An optional secret to provide for nodes to join the Swarm.
|
|
||||||
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm.
|
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm.
|
||||||
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored.
|
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored.
|
||||||
- **Raft** – Raft related configuration.
|
- **Raft** – Raft related configuration.
|
||||||
|
@ -3811,6 +3787,9 @@ JSON Parameters:
|
||||||
- **URL** - URL where certificate signing requests should be sent.
|
- **URL** - URL where certificate signing requests should be sent.
|
||||||
- **Options** - An object with key/value pairs that are interpreted
|
- **Options** - An object with key/value pairs that are interpreted
|
||||||
as protocol-specific options for the external CA driver.
|
as protocol-specific options for the external CA driver.
|
||||||
|
- **JoinTokens** - Tokens that can be used by other nodes to join the Swarm.
|
||||||
|
- **Worker** - Token to use for joining as a worker.
|
||||||
|
- **Manager** - Token to use for joining as a manager.
|
||||||
|
|
||||||
## 3.8 Services
|
## 3.8 Services
|
||||||
|
|
||||||
|
@ -4292,6 +4271,10 @@ Update the service `id`.
|
||||||
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
||||||
- **VirtualIPs**
|
- **VirtualIPs**
|
||||||
|
|
||||||
|
**Query parameters**:
|
||||||
|
|
||||||
|
- **version** – The version number of the service object being updated. This is
|
||||||
|
required to avoid conflicting writes.
|
||||||
|
|
||||||
**Status codes**:
|
**Status codes**:
|
||||||
|
|
||||||
|
|
|
@ -3352,7 +3352,6 @@ List nodes
|
||||||
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"Role": "MANAGER",
|
"Role": "MANAGER",
|
||||||
"Membership": "ACCEPTED",
|
|
||||||
"Availability": "ACTIVE"
|
"Availability": "ACTIVE"
|
||||||
},
|
},
|
||||||
"Description": {
|
"Description": {
|
||||||
|
@ -3482,7 +3481,6 @@ Return low-level information on the node `id`
|
||||||
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
"UpdatedAt": "2016-06-07T20:31:11.999868824Z",
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"Role": "MANAGER",
|
"Role": "MANAGER",
|
||||||
"Membership": "ACCEPTED",
|
|
||||||
"Availability": "ACTIVE"
|
"Availability": "ACTIVE"
|
||||||
},
|
},
|
||||||
"Description": {
|
"Description": {
|
||||||
|
@ -3596,18 +3594,6 @@ Initialize a new Swarm
|
||||||
"ListenAddr": "0.0.0.0:4500",
|
"ListenAddr": "0.0.0.0:4500",
|
||||||
"ForceNewCluster": false,
|
"ForceNewCluster": false,
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"AcceptancePolicy": {
|
|
||||||
"Policies": [
|
|
||||||
{
|
|
||||||
"Role": "MANAGER",
|
|
||||||
"Autoaccept": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Role": "WORKER",
|
|
||||||
"Autoaccept": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Orchestration": {},
|
"Orchestration": {},
|
||||||
"Raft": {},
|
"Raft": {},
|
||||||
"Dispatcher": {},
|
"Dispatcher": {},
|
||||||
|
@ -3677,9 +3663,7 @@ Join an existing new Swarm
|
||||||
{
|
{
|
||||||
"ListenAddr": "0.0.0.0:4500",
|
"ListenAddr": "0.0.0.0:4500",
|
||||||
"RemoteAddrs": ["node1:4500"],
|
"RemoteAddrs": ["node1:4500"],
|
||||||
"Secret": "",
|
"JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
|
||||||
"CACertHash": "",
|
|
||||||
"Manager": false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
**Example response**:
|
**Example response**:
|
||||||
|
@ -3699,9 +3683,7 @@ JSON Parameters:
|
||||||
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to
|
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to
|
||||||
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
|
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
|
||||||
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join.
|
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join.
|
||||||
- **Secret** – Secret token for joining this Swarm.
|
- **JoinToken** – Secret token for joining this Swarm.
|
||||||
- **CACertHash** – Optional hash of the root CA to avoid relying on trust on first use.
|
|
||||||
- **Manager** – Directly join as a manager (only for a Swarm configured to autoaccept managers).
|
|
||||||
|
|
||||||
### Leave a Swarm
|
### Leave a Swarm
|
||||||
|
|
||||||
|
@ -3742,18 +3724,6 @@ Update a Swarm
|
||||||
|
|
||||||
{
|
{
|
||||||
"Name": "default",
|
"Name": "default",
|
||||||
"AcceptancePolicy": {
|
|
||||||
"Policies": [
|
|
||||||
{
|
|
||||||
"Role": "WORKER",
|
|
||||||
"Autoaccept": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Role": "MANAGER",
|
|
||||||
"Autoaccept": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Orchestration": {
|
"Orchestration": {
|
||||||
"TaskHistoryRetentionLimit": 10
|
"TaskHistoryRetentionLimit": 10
|
||||||
},
|
},
|
||||||
|
@ -3768,6 +3738,10 @@ Update a Swarm
|
||||||
},
|
},
|
||||||
"CAConfig": {
|
"CAConfig": {
|
||||||
"NodeCertExpiry": 7776000000000000
|
"NodeCertExpiry": 7776000000000000
|
||||||
|
},
|
||||||
|
"JoinTokens": {
|
||||||
|
"Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx",
|
||||||
|
"Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3778,6 +3752,13 @@ Update a Swarm
|
||||||
Content-Length: 0
|
Content-Length: 0
|
||||||
Content-Type: text/plain; charset=utf-8
|
Content-Type: text/plain; charset=utf-8
|
||||||
|
|
||||||
|
**Query parameters**:
|
||||||
|
|
||||||
|
- **version** – The version number of the swarm object being updated. This is
|
||||||
|
required to avoid conflicting writes.
|
||||||
|
- **rotate_worker_token** - Set to `true` to rotate the worker join token.
|
||||||
|
- **rotate_manager_token** - Set to `true` to rotate the manager join token.
|
||||||
|
|
||||||
**Status codes**:
|
**Status codes**:
|
||||||
|
|
||||||
- **200** – no error
|
- **200** – no error
|
||||||
|
@ -3786,11 +3767,6 @@ Update a Swarm
|
||||||
|
|
||||||
JSON Parameters:
|
JSON Parameters:
|
||||||
|
|
||||||
- **Policies** – An array of acceptance policies.
|
|
||||||
- **Role** – The role that policy applies to (`MANAGER` or `WORKER`)
|
|
||||||
- **Autoaccept** – A boolean indicating whether nodes joining for that role should be
|
|
||||||
automatically accepted in the Swarm.
|
|
||||||
- **Secret** – An optional secret to provide for nodes to join the Swarm.
|
|
||||||
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm.
|
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm.
|
||||||
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored.
|
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored.
|
||||||
- **Raft** – Raft related configuration.
|
- **Raft** – Raft related configuration.
|
||||||
|
@ -3812,6 +3788,9 @@ JSON Parameters:
|
||||||
- **URL** - URL where certificate signing requests should be sent.
|
- **URL** - URL where certificate signing requests should be sent.
|
||||||
- **Options** - An object with key/value pairs that are interpreted
|
- **Options** - An object with key/value pairs that are interpreted
|
||||||
as protocol-specific options for the external CA driver.
|
as protocol-specific options for the external CA driver.
|
||||||
|
- **JoinTokens** - Tokens that can be used by other nodes to join the Swarm.
|
||||||
|
- **Worker** - Token to use for joining as a worker.
|
||||||
|
- **Manager** - Token to use for joining as a manager.
|
||||||
|
|
||||||
## 3.8 Services
|
## 3.8 Services
|
||||||
|
|
||||||
|
@ -4293,6 +4272,10 @@ Update the service `id`.
|
||||||
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
||||||
- **VirtualIPs**
|
- **VirtualIPs**
|
||||||
|
|
||||||
|
**Query parameters**:
|
||||||
|
|
||||||
|
- **version** – The version number of the service object being updated. This is
|
||||||
|
required to avoid conflicting writes.
|
||||||
|
|
||||||
**Status codes**:
|
**Status codes**:
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ Create and update a stack from a Distributed Application Bundle (DAB)
|
||||||
Options:
|
Options:
|
||||||
--file string Path to a Distributed Application Bundle file (Default: STACK.dab)
|
--file string Path to a Distributed Application Bundle file (Default: STACK.dab)
|
||||||
--help Print usage
|
--help Print usage
|
||||||
--registry-auth Send registry authentication details to Swarm agents
|
--registry-auth Send registry authentication details to swarm agents
|
||||||
```
|
```
|
||||||
|
|
||||||
Create and update a stack from a `dab` file. This command has to be
|
Create and update a stack from a `dab` file. This command has to be
|
||||||
|
|
|
@ -111,7 +111,6 @@ read the [`dockerd`](dockerd.md) reference page.
|
||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
|:--------|:-------------------------------------------------------------------|
|
|:--------|:-------------------------------------------------------------------|
|
||||||
| [node accept](node_accept.md) | Accept a node into the swarm |
|
|
||||||
| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager |
|
| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager |
|
||||||
| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager |
|
| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager |
|
||||||
| [node inspect](node_inspect.md) | Inspect a node in the swarm |
|
| [node inspect](node_inspect.md) | Inspect a node in the swarm |
|
||||||
|
@ -124,10 +123,11 @@ read the [`dockerd`](dockerd.md) reference page.
|
||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
|:--------|:-------------------------------------------------------------------|
|
|:--------|:-------------------------------------------------------------------|
|
||||||
| [swarm init](swarm_init.md) | Initialize a Swarm |
|
| [swarm init](swarm_init.md) | Initialize a swarm |
|
||||||
| [swarm join](swarm_join.md) | Join a Swarm as a manager node or worker node |
|
| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node |
|
||||||
| [swarm leave](swarm_leave.md) | Remove the current node from the swarm |
|
| [swarm leave](swarm_leave.md) | Remove the current node from the swarm |
|
||||||
| [swarm update](swarm_update.md) | Update attributes of a swarm |
|
| [swarm update](swarm_update.md) | Update attributes of a swarm |
|
||||||
|
| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens |
|
||||||
|
|
||||||
### Swarm service commands
|
### Swarm service commands
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ available on the volume where `/var/lib/docker` is mounted.
|
||||||
## Display Docker system information
|
## Display Docker system information
|
||||||
|
|
||||||
Here is a sample output for a daemon running on Ubuntu, using the overlay
|
Here is a sample output for a daemon running on Ubuntu, using the overlay
|
||||||
storage driver and a node that is part of a 2 node Swarm cluster:
|
storage driver and a node that is part of a 2 node swarm cluster:
|
||||||
|
|
||||||
$ docker -D info
|
$ docker -D info
|
||||||
Containers: 14
|
Containers: 14
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
<!--[metadata]>
|
|
||||||
+++
|
|
||||||
title = "node accept"
|
|
||||||
description = "The node accept command description and usage"
|
|
||||||
keywords = ["node, accept"]
|
|
||||||
[menu.main]
|
|
||||||
parent = "smn_cli"
|
|
||||||
+++
|
|
||||||
<![end-metadata]-->
|
|
||||||
|
|
||||||
# node accept
|
|
||||||
|
|
||||||
```markdown
|
|
||||||
Usage: docker node accept NODE [NODE...]
|
|
||||||
|
|
||||||
Accept a node in the swarm
|
|
||||||
|
|
||||||
Options:
|
|
||||||
--help Print usage
|
|
||||||
```
|
|
||||||
|
|
||||||
Accept a node into the swarm. This command targets a docker engine that is a manager in the swarm cluster.
|
|
||||||
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ docker node accept <node name>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Related information
|
|
||||||
|
|
||||||
* [node promote](node_promote.md)
|
|
||||||
* [node demote](node_demote.md)
|
|
|
@ -29,5 +29,4 @@ $ docker node demote <node name>
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
||||||
* [node accept](node_accept.md)
|
|
||||||
* [node promote](node_promote.md)
|
* [node promote](node_promote.md)
|
||||||
|
|
|
@ -41,7 +41,6 @@ Example output:
|
||||||
"UpdatedAt": "2016-06-16T22:52:45.230878043Z",
|
"UpdatedAt": "2016-06-16T22:52:45.230878043Z",
|
||||||
"Spec": {
|
"Spec": {
|
||||||
"Role": "manager",
|
"Role": "manager",
|
||||||
"Membership": "accepted",
|
|
||||||
"Availability": "active"
|
"Availability": "active"
|
||||||
},
|
},
|
||||||
"Description": {
|
"Description": {
|
||||||
|
|
|
@ -30,10 +30,10 @@ Lists all the nodes that the Docker Swarm manager knows about. You can filter us
|
||||||
Example output:
|
Example output:
|
||||||
|
|
||||||
$ docker node ls
|
$ docker node ls
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active
|
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
|
||||||
38ciaotwjuritcdtn9npbnkuz swarm-worker1 Accepted Ready Active
|
38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active
|
||||||
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Accepted Ready Active Reachable Yes
|
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
|
||||||
|
|
||||||
|
|
||||||
## Filtering
|
## Filtering
|
||||||
|
@ -54,16 +54,16 @@ The `name` filter matches on all or part of a node name.
|
||||||
The following filter matches the node with a name equal to `swarm-master` string.
|
The following filter matches the node with a name equal to `swarm-master` string.
|
||||||
|
|
||||||
$ docker node ls -f name=swarm-manager1
|
$ docker node ls -f name=swarm-manager1
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Accepted Ready Active Reachable Yes
|
e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
|
||||||
|
|
||||||
### id
|
### id
|
||||||
|
|
||||||
The `id` filter matches all or part of a node's id.
|
The `id` filter matches all or part of a node's id.
|
||||||
|
|
||||||
$ docker node ls -f id=1
|
$ docker node ls -f id=1
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active
|
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
|
||||||
|
|
||||||
|
|
||||||
#### label
|
#### label
|
||||||
|
@ -75,8 +75,8 @@ The following filter matches nodes with the `usage` label regardless of its valu
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker node ls -f "label=foo"
|
$ docker node ls -f "label=foo"
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active
|
1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,5 +28,4 @@ $ docker node promote <node name>
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
||||||
* [node accept](node_accept.md)
|
|
||||||
* [node demote](node_demote.md)
|
* [node demote](node_demote.md)
|
||||||
|
|
|
@ -23,14 +23,13 @@ Options:
|
||||||
--help Print usage
|
--help Print usage
|
||||||
```
|
```
|
||||||
|
|
||||||
Removes specified nodes from a swarm. Rejects nodes with `Pending`
|
Removes specified nodes from a swarm.
|
||||||
membership from the swarm.
|
|
||||||
|
|
||||||
|
|
||||||
Example output:
|
Example output:
|
||||||
|
|
||||||
$ docker node rm swarm-node-02
|
$ docker node rm swarm-node-02
|
||||||
Node swarm-node-02 removed from Swarm
|
Node swarm-node-02 removed from swarm
|
||||||
|
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
|
@ -21,7 +21,6 @@ Options:
|
||||||
--help Print usage
|
--help Print usage
|
||||||
--label-add value Add or update a node label (key=value) (default [])
|
--label-add value Add or update a node label (key=value) (default [])
|
||||||
--label-rm value Remove a node label if exists (default [])
|
--label-rm value Remove a node label if exists (default [])
|
||||||
--membership string Membership of the node (accepted/rejected)
|
|
||||||
--role string Role of the node (worker/manager)
|
--role string Role of the node (worker/manager)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ Options:
|
||||||
--name string Service name
|
--name string Service name
|
||||||
--network value Network attachments (default [])
|
--network value Network attachments (default [])
|
||||||
-p, --publish value Publish a port as a node port (default [])
|
-p, --publish value Publish a port as a node port (default [])
|
||||||
--registry-auth Send registry authentication details to Swarm agents
|
--registry-auth Send registry authentication details to swarm agents
|
||||||
--replicas value Number of tasks (default none)
|
--replicas value Number of tasks (default none)
|
||||||
--reserve-cpu value Reserve CPUs (default 0.000)
|
--reserve-cpu value Reserve CPUs (default 0.000)
|
||||||
--reserve-memory value Reserve Memory (default 0 B)
|
--reserve-memory value Reserve Memory (default 0 B)
|
||||||
|
|
|
@ -38,7 +38,7 @@ Options:
|
||||||
--network-rm value Remove a network by name (default [])
|
--network-rm value Remove a network by name (default [])
|
||||||
--publish-add value Add or update a published port (default [])
|
--publish-add value Add or update a published port (default [])
|
||||||
--publish-rm value Remove a published port by its target port (default [])
|
--publish-rm value Remove a published port by its target port (default [])
|
||||||
--registry-auth Send registry authentication details to Swarm agents
|
--registry-auth Send registry authentication details to swarm agents
|
||||||
--replicas value Number of tasks (default none)
|
--replicas value Number of tasks (default none)
|
||||||
--reserve-cpu value Reserve CPUs (default 0.000)
|
--reserve-cpu value Reserve CPUs (default 0.000)
|
||||||
--reserve-memory value Reserve Memory (default 0 B)
|
--reserve-memory value Reserve Memory (default 0 B)
|
||||||
|
|
|
@ -14,74 +14,43 @@ parent = "smn_cli"
|
||||||
```markdown
|
```markdown
|
||||||
Usage: docker swarm init [OPTIONS]
|
Usage: docker swarm init [OPTIONS]
|
||||||
|
|
||||||
Initialize a Swarm
|
Initialize a swarm
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--auto-accept value Auto acceptance policy (default worker)
|
|
||||||
--cert-expiry duration Validity period for node certificates (default 2160h0m0s)
|
--cert-expiry duration Validity period for node certificates (default 2160h0m0s)
|
||||||
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
|
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
|
||||||
--external-ca value Specifications of one or more certificate signing endpoints
|
--external-ca value Specifications of one or more certificate signing endpoints
|
||||||
--force-new-cluster Force create a new cluster from current state.
|
--force-new-cluster Force create a new cluster from current state.
|
||||||
--help Print usage
|
--help Print usage
|
||||||
--listen-addr value Listen address (default 0.0.0.0:2377)
|
--listen-addr value Listen address (default 0.0.0.0:2377)
|
||||||
--secret string Set secret value needed to accept nodes into cluster
|
|
||||||
--task-history-limit int Task history retention limit (default 10)
|
--task-history-limit int Task history retention limit (default 10)
|
||||||
```
|
```
|
||||||
|
|
||||||
Initialize a Swarm cluster. The docker engine targeted by this command becomes a manager
|
Initialize a swarm cluster. The docker engine targeted by this command becomes a manager
|
||||||
in the newly created one node Swarm cluster.
|
in the newly created one node swarm cluster.
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker swarm init --listen-addr 192.168.99.121:2377
|
$ docker swarm init --listen-addr 192.168.99.121:2377
|
||||||
No --secret provided. Generated random secret:
|
Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager.
|
||||||
4ao565v9jsuogtq5t8s379ulb
|
|
||||||
|
|
||||||
Swarm initialized: current node (1ujecd0j9n3ro9i6628smdmth) is now a manager.
|
|
||||||
|
|
||||||
To add a worker to this swarm, run the following command:
|
To add a worker to this swarm, run the following command:
|
||||||
docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb \
|
docker swarm join \
|
||||||
--ca-hash sha256:07ce22bd1a7619f2adc0d63bd110479a170e7c4e69df05b67a1aa2705c88ef09 \
|
--token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \
|
||||||
192.168.99.121:2377
|
172.17.0.2:2377
|
||||||
$ docker node ls
|
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
To add a manager to this swarm, run the following command:
|
||||||
1ujecd0j9n3ro9i6628smdmth * manager1 Accepted Ready Active Reachable Yes
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \
|
||||||
|
172.17.0.2:2377
|
||||||
```
|
```
|
||||||
|
|
||||||
If a secret for joining new nodes is not provided with `--secret`, `docker swarm init` will
|
`docker swarm init` generates two random tokens, a worker token and a manager token. When you join
|
||||||
generate a random one and print it to the terminal (as seen in the example above). To initialize
|
a new node to the swarm, the node joins as a worker or manager node based upon the token you pass
|
||||||
a swarm with no secret, use `--secret ""`.
|
to [swarm join](swarm_join.md).
|
||||||
|
|
||||||
### `--auto-accept value`
|
After you create the swarm, you can display or rotate the token using
|
||||||
|
[swarm join-token](swarm_join_token.md).
|
||||||
This flag controls node acceptance into the cluster. By default, `worker` nodes are
|
|
||||||
automatically accepted by the cluster. This can be changed by specifying what kinds of nodes
|
|
||||||
can be auto-accepted into the cluster. If auto-accept is not turned on, then
|
|
||||||
[node accept](node_accept.md) can be used to explicitly accept a node into the cluster.
|
|
||||||
|
|
||||||
For example, the following initializes a cluster with auto-acceptance of workers, but not managers
|
|
||||||
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker
|
|
||||||
```
|
|
||||||
|
|
||||||
It is possible to pass a comma-separated list of node types. The following initializes a cluster
|
|
||||||
with auto-acceptance of both `worker` and `manager` nodes
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker,manager
|
|
||||||
```
|
|
||||||
|
|
||||||
To disable auto acceptance, use the `none` option. Note that this option cannot
|
|
||||||
be combined with other values. When disabling auto acceptance, nodes must be
|
|
||||||
manually accepted or rejected using `docker node accept` or `docker node rm`.
|
|
||||||
|
|
||||||
The following example enables swarm mode with auto acceptance disabled:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept none
|
|
||||||
```
|
|
||||||
|
|
||||||
### `--cert-expiry`
|
### `--cert-expiry`
|
||||||
|
|
||||||
|
@ -105,11 +74,7 @@ This flag forces an existing node that was part of a quorum that was lost to res
|
||||||
|
|
||||||
### `--listen-addr value`
|
### `--listen-addr value`
|
||||||
|
|
||||||
The node listens for inbound Swarm manager traffic on this IP:PORT
|
The node listens for inbound swarm manager traffic on this IP:PORT
|
||||||
|
|
||||||
### `--secret string`
|
|
||||||
|
|
||||||
Secret value needed to accept nodes into the Swarm
|
|
||||||
|
|
||||||
### `--task-history-limit`
|
### `--task-history-limit`
|
||||||
|
|
||||||
|
@ -120,5 +85,5 @@ This flag sets up task history retention limit.
|
||||||
* [swarm join](swarm_join.md)
|
* [swarm join](swarm_join.md)
|
||||||
* [swarm leave](swarm_leave.md)
|
* [swarm leave](swarm_leave.md)
|
||||||
* [swarm update](swarm_update.md)
|
* [swarm update](swarm_update.md)
|
||||||
* [node accept](node_accept.md)
|
* [swarm join-token](swarm_join_token.md)
|
||||||
* [node rm](node_rm.md)
|
* [node rm](node_rm.md)
|
||||||
|
|
|
@ -14,55 +14,54 @@ parent = "smn_cli"
|
||||||
```markdown
|
```markdown
|
||||||
Usage: docker swarm join [OPTIONS] HOST:PORT
|
Usage: docker swarm join [OPTIONS] HOST:PORT
|
||||||
|
|
||||||
Join a Swarm as a node and/or manager
|
Join a swarm as a node and/or manager
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--ca-hash string Hash of the Root Certificate Authority certificate used for trusted join
|
|
||||||
--help Print usage
|
--help Print usage
|
||||||
--listen-addr value Listen address (default 0.0.0.0:2377)
|
--listen-addr value Listen address (default 0.0.0.0:2377)
|
||||||
--manager Try joining as a manager.
|
--token string Token for entry into the swarm
|
||||||
--secret string Secret for node acceptance
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Join a node to a Swarm cluster. If the `--manager` flag is specified, the docker engine
|
Join a node to a swarm. The node joins as a manager node or worker node based upon the token you
|
||||||
targeted by this command becomes a `manager`. If it is not specified, it becomes a `worker`.
|
pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you
|
||||||
|
pass a worker token, the node joins as a worker.
|
||||||
|
|
||||||
### Join a node to swarm as a manager
|
### Join a node to swarm as a manager
|
||||||
|
|
||||||
|
The example below demonstrates joining a manager node using a manager token.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb --manager --listen-addr 192.168.99.122:2377 192.168.99.121:2377
|
$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 --listen-addr 192.168.99.122:2377 192.168.99.121:2377
|
||||||
This node joined a Swarm as a manager.
|
This node joined a swarm as a manager.
|
||||||
$ docker node ls
|
$ docker node ls
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
dkp8vy1dq1kxleu9g4u78tlag * manager2 Accepted Ready Active Reachable
|
dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable
|
||||||
dvfxp4zseq4s0rih1selh0d20 manager1 Accepted Ready Active Reachable Yes
|
dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader
|
||||||
```
|
```
|
||||||
|
|
||||||
|
A cluster should only have 3-7 managers at most, because a majority of managers must be available
|
||||||
|
for the cluster to function. Nodes that aren't meant to participate in this management quorum
|
||||||
|
should join as workers instead. Managers should be stable hosts that have static IP addresses.
|
||||||
|
|
||||||
### Join a node to swarm as a worker
|
### Join a node to swarm as a worker
|
||||||
|
|
||||||
|
The example below demonstrates joining a worker node using a worker token.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb --listen-addr 192.168.99.123:2377 192.168.99.121:2377
|
$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx --listen-addr 192.168.99.123:2377 192.168.99.121:2377
|
||||||
This node joined a Swarm as a worker.
|
This node joined a swarm as a worker.
|
||||||
$ docker node ls
|
$ docker node ls
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Ready Active
|
7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active
|
||||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable
|
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable
|
||||||
dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes
|
dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
|
||||||
```
|
```
|
||||||
|
|
||||||
### `--ca-hash`
|
|
||||||
|
|
||||||
Hash of the Root Certificate Authority certificate used for trusted join.
|
|
||||||
|
|
||||||
### `--listen-addr value`
|
### `--listen-addr value`
|
||||||
|
|
||||||
The node listens for inbound Swarm manager traffic on this IP:PORT
|
The node listens for inbound swarm manager traffic on this IP:PORT
|
||||||
|
|
||||||
### `--manager`
|
### `--token string`
|
||||||
|
|
||||||
Joins the node as a manager
|
|
||||||
|
|
||||||
### `--secret string`
|
|
||||||
|
|
||||||
Secret value required for nodes to join the swarm
|
Secret value required for nodes to join the swarm
|
||||||
|
|
||||||
|
|
76
docs/reference/commandline/swarm_join_token.md
Normal file
76
docs/reference/commandline/swarm_join_token.md
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
<!--[metadata]>
|
||||||
|
+++
|
||||||
|
title = "swarm join-token"
|
||||||
|
description = "The swarm join-token command description and usage"
|
||||||
|
keywords = ["swarm, join-token"]
|
||||||
|
advisory = "rc"
|
||||||
|
[menu.main]
|
||||||
|
parent = "smn_cli"
|
||||||
|
+++
|
||||||
|
<![end-metadata]-->
|
||||||
|
|
||||||
|
# swarm join-token
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
Usage: docker swarm join-token [--rotate] (worker|manager)
|
||||||
|
|
||||||
|
Manage join tokens
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--help Print usage
|
||||||
|
-q, --quiet Only display token
|
||||||
|
--rotate Rotate join token
|
||||||
|
```
|
||||||
|
|
||||||
|
Join tokens are secrets that determine whether or not a node will join the swarm as a manager node
|
||||||
|
or a worker node. You pass the token using the `--token flag` when you run
|
||||||
|
[swarm join](swarm_join.md). You can access the current tokens or rotate the tokens using
|
||||||
|
`swarm join-token`.
|
||||||
|
|
||||||
|
Run with only a single `worker` or `manager` argument, it will print a command for joining a new
|
||||||
|
node to the swarm, including the necessary token:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker swarm join-token worker
|
||||||
|
To add a worker to this swarm, run the following command:
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \
|
||||||
|
172.17.0.2:2377
|
||||||
|
|
||||||
|
$ docker swarm join-token manager
|
||||||
|
To add a manager to this swarm, run the following command:
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \
|
||||||
|
172.17.0.2:2377
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the `--rotate` flag to generate a new join token for the specified role:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker swarm join-token --rotate worker
|
||||||
|
To add a worker to this swarm, run the following command:
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \
|
||||||
|
172.17.0.2:2377
|
||||||
|
```
|
||||||
|
|
||||||
|
After using `--rotate`, only the new token will be valid for joining with the specified role.
|
||||||
|
|
||||||
|
The `-q` (or `--quiet`) flag only prints the token:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker swarm join-token -q worker
|
||||||
|
SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t
|
||||||
|
```
|
||||||
|
|
||||||
|
### `--rotate`
|
||||||
|
|
||||||
|
Update the join token for a specified role with a new token and print the token.
|
||||||
|
|
||||||
|
### `--quiet`
|
||||||
|
|
||||||
|
Only print the token. Do not print a complete command for joining.
|
||||||
|
|
||||||
|
## Related information
|
||||||
|
|
||||||
|
* [swarm join](swarm_join.md)
|
|
@ -14,7 +14,7 @@ parent = "smn_cli"
|
||||||
```markdown
|
```markdown
|
||||||
Usage: docker swarm leave [OPTIONS]
|
Usage: docker swarm leave [OPTIONS]
|
||||||
|
|
||||||
Leave a Swarm
|
Leave a swarm
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--force Force leave ignoring warnings.
|
--force Force leave ignoring warnings.
|
||||||
|
@ -26,10 +26,10 @@ This command causes the node to leave the swarm.
|
||||||
On a manager node:
|
On a manager node:
|
||||||
```bash
|
```bash
|
||||||
$ docker node ls
|
$ docker node ls
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Ready Active
|
7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active
|
||||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable
|
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable
|
||||||
dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes
|
dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
|
||||||
```
|
```
|
||||||
|
|
||||||
On a worker node:
|
On a worker node:
|
||||||
|
@ -41,10 +41,10 @@ Node left the default swarm.
|
||||||
On a manager node:
|
On a manager node:
|
||||||
```bash
|
```bash
|
||||||
$ docker node ls
|
$ docker node ls
|
||||||
ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Down Active
|
7ln70fl22uw2dvjn2ft53m3q5 worker2 Down Active
|
||||||
dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable
|
dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable
|
||||||
dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes
|
dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
|
||||||
```
|
```
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
|
@ -14,23 +14,21 @@ parent = "smn_cli"
|
||||||
```markdown
|
```markdown
|
||||||
Usage: docker swarm update [OPTIONS]
|
Usage: docker swarm update [OPTIONS]
|
||||||
|
|
||||||
Update the Swarm
|
Update the swarm
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
--auto-accept value Auto acceptance policy (worker, manager or none)
|
|
||||||
--cert-expiry duration Validity period for node certificates (default 2160h0m0s)
|
--cert-expiry duration Validity period for node certificates (default 2160h0m0s)
|
||||||
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
|
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
|
||||||
--external-ca value Specifications of one or more certificate signing endpoints
|
--external-ca value Specifications of one or more certificate signing endpoints
|
||||||
--help Print usage
|
--help Print usage
|
||||||
--secret string Set secret value needed to accept nodes into cluster
|
|
||||||
--task-history-limit int Task history retention limit (default 10)
|
--task-history-limit int Task history retention limit (default 10)
|
||||||
```
|
```
|
||||||
|
|
||||||
Updates a Swarm cluster with new parameter values. This command must target a manager node.
|
Updates a swarm cluster with new parameter values. This command must target a manager node.
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker swarm update --auto-accept manager
|
$ docker swarm update --cert-expirty 4000h0m0s
|
||||||
```
|
```
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
|
@ -216,15 +216,17 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *Swarm
|
||||||
|
|
||||||
if joinSwarm == true {
|
if joinSwarm == true {
|
||||||
if len(s.daemons) > 0 {
|
if len(s.daemons) > 0 {
|
||||||
|
tokens := s.daemons[0].joinTokens(c)
|
||||||
|
token := tokens.Worker
|
||||||
|
if manager {
|
||||||
|
token = tokens.Manager
|
||||||
|
}
|
||||||
c.Assert(d.Join(swarm.JoinRequest{
|
c.Assert(d.Join(swarm.JoinRequest{
|
||||||
RemoteAddrs: []string{s.daemons[0].listenAddr},
|
RemoteAddrs: []string{s.daemons[0].listenAddr},
|
||||||
Manager: manager}), check.IsNil)
|
JoinToken: token,
|
||||||
} else {
|
|
||||||
c.Assert(d.Init(swarm.InitRequest{
|
|
||||||
Spec: swarm.Spec{
|
|
||||||
AcceptancePolicy: autoAcceptPolicy,
|
|
||||||
},
|
|
||||||
}), check.IsNil)
|
}), check.IsNil)
|
||||||
|
} else {
|
||||||
|
c.Assert(d.Init(swarm.InitRequest{}), check.IsNil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,14 +22,6 @@ type SwarmDaemon struct {
|
||||||
listenAddr string
|
listenAddr string
|
||||||
}
|
}
|
||||||
|
|
||||||
// default policy in tests is allow-all
|
|
||||||
var autoAcceptPolicy = swarm.AcceptancePolicy{
|
|
||||||
Policies: []swarm.Policy{
|
|
||||||
{Role: swarm.NodeRoleWorker, Autoaccept: true},
|
|
||||||
{Role: swarm.NodeRoleManager, Autoaccept: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes a new swarm cluster.
|
// Init initializes a new swarm cluster.
|
||||||
func (d *SwarmDaemon) Init(req swarm.InitRequest) error {
|
func (d *SwarmDaemon) Init(req swarm.InitRequest) error {
|
||||||
if req.ListenAddr == "" {
|
if req.ListenAddr == "" {
|
||||||
|
@ -271,6 +263,28 @@ func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
|
||||||
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *SwarmDaemon) rotateTokens(c *check.C) {
|
||||||
|
var sw swarm.Swarm
|
||||||
|
status, out, err := d.SockRequest("GET", "/swarm", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
||||||
|
c.Assert(json.Unmarshal(out, &sw), checker.IsNil)
|
||||||
|
|
||||||
|
url := fmt.Sprintf("/swarm/update?version=%d&rotate_worker_token=true&rotate_manager_token=true", sw.Version.Index)
|
||||||
|
status, out, err = d.SockRequest("POST", url, sw.Spec)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens {
|
||||||
|
var sw swarm.Swarm
|
||||||
|
status, out, err := d.SockRequest("GET", "/swarm", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
||||||
|
c.Assert(json.Unmarshal(out, &sw), checker.IsNil)
|
||||||
|
return sw.JoinTokens
|
||||||
|
}
|
||||||
|
|
||||||
func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
||||||
info, err := d.info()
|
info, err := d.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
|
@ -43,7 +43,7 @@ func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
|
||||||
c.Assert(info.ControlAvailable, checker.False)
|
c.Assert(info.ControlAvailable, checker.False)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
||||||
|
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -72,89 +72,29 @@ func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) TestApiSwarmManualAcceptance(c *check.C) {
|
func (s *DockerSwarmSuite) TestApiSwarmJoinToken(c *check.C) {
|
||||||
testRequires(c, Network)
|
testRequires(c, Network)
|
||||||
s.testAPISwarmManualAcceptance(c, "")
|
|
||||||
}
|
|
||||||
func (s *DockerSwarmSuite) TestApiSwarmManualAcceptanceSecret(c *check.C) {
|
|
||||||
testRequires(c, Network)
|
|
||||||
s.testAPISwarmManualAcceptance(c, "foobaz")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret string) {
|
|
||||||
d1 := s.AddDaemon(c, false, false)
|
d1 := s.AddDaemon(c, false, false)
|
||||||
c.Assert(d1.Init(swarm.InitRequest{
|
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
||||||
Spec: swarm.Spec{
|
|
||||||
AcceptancePolicy: swarm.AcceptancePolicy{
|
|
||||||
Policies: []swarm.Policy{
|
|
||||||
{Role: swarm.NodeRoleWorker, Secret: &secret},
|
|
||||||
{Role: swarm.NodeRoleManager, Secret: &secret},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}), checker.IsNil)
|
|
||||||
|
|
||||||
d2 := s.AddDaemon(c, false, false)
|
d2 := s.AddDaemon(c, false, false)
|
||||||
err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
if secret == "" {
|
c.Assert(err.Error(), checker.Contains, "join token is necessary")
|
||||||
c.Assert(err.Error(), checker.Contains, "needs to be accepted")
|
|
||||||
info, err := d2.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
|
|
||||||
c.Assert(d2.Leave(false), checker.IsNil)
|
|
||||||
info, err = d2.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
|
||||||
} else {
|
|
||||||
c.Assert(err.Error(), checker.Contains, "valid secret token is necessary")
|
|
||||||
info, err := d2.info()
|
info, err := d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
}
|
|
||||||
d3 := s.AddDaemon(c, false, false)
|
|
||||||
c.Assert(d3.Join(swarm.JoinRequest{Secret: secret, RemoteAddrs: []string{d1.listenAddr}}), checker.NotNil)
|
|
||||||
info, err := d3.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
|
|
||||||
c.Assert(len(info.NodeID), checker.GreaterThan, 5)
|
|
||||||
d1.updateNode(c, info.NodeID, func(n *swarm.Node) {
|
|
||||||
n.Spec.Membership = swarm.NodeMembershipAccepted
|
|
||||||
})
|
|
||||||
waitAndAssert(c, defaultReconciliationTimeout, d3.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
|
||||||
testRequires(c, Network)
|
|
||||||
d1 := s.AddDaemon(c, false, false)
|
|
||||||
secret := "foobar"
|
|
||||||
c.Assert(d1.Init(swarm.InitRequest{
|
|
||||||
Spec: swarm.Spec{
|
|
||||||
AcceptancePolicy: swarm.AcceptancePolicy{
|
|
||||||
Policies: []swarm.Policy{
|
|
||||||
{Role: swarm.NodeRoleWorker, Autoaccept: true, Secret: &secret},
|
|
||||||
{Role: swarm.NodeRoleManager, Secret: &secret},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}), checker.IsNil)
|
|
||||||
|
|
||||||
d2 := s.AddDaemon(c, false, false)
|
|
||||||
err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
c.Assert(err.Error(), checker.Contains, "secret token is necessary")
|
c.Assert(err.Error(), checker.Contains, "join token is necessary")
|
||||||
info, err := d2.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
|
||||||
|
|
||||||
err = d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
|
|
||||||
c.Assert(err, checker.NotNil)
|
|
||||||
c.Assert(err.Error(), checker.Contains, "secret token is necessary")
|
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
workerToken := d1.joinTokens(c).Worker
|
||||||
|
|
||||||
|
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||||
|
@ -163,22 +103,19 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
// change secret
|
// change tokens
|
||||||
d1.updateSwarm(c, func(s *swarm.Spec) {
|
d1.rotateTokens(c)
|
||||||
for i := range s.AcceptancePolicy.Policies {
|
|
||||||
p := "foobaz"
|
|
||||||
s.AcceptancePolicy.Policies[i].Secret = &p
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
err = d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}})
|
err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}})
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
c.Assert(err.Error(), checker.Contains, "secret token is necessary")
|
c.Assert(err.Error(), checker.Contains, "join token is necessary")
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
workerToken = d1.joinTokens(c).Worker
|
||||||
|
|
||||||
|
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||||
|
@ -187,24 +124,17 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
// change policy, don't change secret
|
// change spec, don't change tokens
|
||||||
d1.updateSwarm(c, func(s *swarm.Spec) {
|
d1.updateSwarm(c, func(s *swarm.Spec) {})
|
||||||
for i, p := range s.AcceptancePolicy.Policies {
|
|
||||||
if p.Role == swarm.NodeRoleManager {
|
|
||||||
s.AcceptancePolicy.Policies[i].Autoaccept = false
|
|
||||||
}
|
|
||||||
s.AcceptancePolicy.Policies[i].Secret = nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
c.Assert(err.Error(), checker.Contains, "secret token is necessary")
|
c.Assert(err.Error(), checker.Contains, "join token is necessary")
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||||
|
@ -212,51 +142,24 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
||||||
info, err = d2.info()
|
info, err = d2.info()
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||||
|
|
||||||
// clear secret
|
|
||||||
d1.updateSwarm(c, func(s *swarm.Spec) {
|
|
||||||
for i := range s.AcceptancePolicy.Policies {
|
|
||||||
p := ""
|
|
||||||
s.AcceptancePolicy.Policies[i].Secret = &p
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
||||||
info, err = d2.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
|
||||||
c.Assert(d2.Leave(false), checker.IsNil)
|
|
||||||
info, err = d2.info()
|
|
||||||
c.Assert(err, checker.IsNil)
|
|
||||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
|
func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
|
||||||
testRequires(c, Network)
|
testRequires(c, Network)
|
||||||
d1 := s.AddDaemon(c, true, true)
|
d1 := s.AddDaemon(c, true, true)
|
||||||
d2 := s.AddDaemon(c, false, false)
|
d2 := s.AddDaemon(c, false, false)
|
||||||
err := d2.Join(swarm.JoinRequest{CACertHash: "foobar", RemoteAddrs: []string{d1.listenAddr}})
|
splitToken := strings.Split(d1.joinTokens(c).Worker, "-")
|
||||||
|
splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
|
||||||
|
replacementToken := strings.Join(splitToken, "-")
|
||||||
|
err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}})
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
c.Assert(err.Error(), checker.Contains, "invalid checksum digest format")
|
c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
|
||||||
|
|
||||||
c.Assert(len(d1.CACertHash), checker.GreaterThan, 0)
|
|
||||||
c.Assert(d2.Join(swarm.JoinRequest{CACertHash: d1.CACertHash, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
|
func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
|
||||||
testRequires(c, Network)
|
testRequires(c, Network)
|
||||||
d1 := s.AddDaemon(c, false, false)
|
d1 := s.AddDaemon(c, false, false)
|
||||||
c.Assert(d1.Init(swarm.InitRequest{
|
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
||||||
Spec: swarm.Spec{
|
|
||||||
AcceptancePolicy: swarm.AcceptancePolicy{
|
|
||||||
Policies: []swarm.Policy{
|
|
||||||
{Role: swarm.NodeRoleWorker, Autoaccept: true},
|
|
||||||
{Role: swarm.NodeRoleManager},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}), checker.IsNil)
|
|
||||||
d2 := s.AddDaemon(c, true, false)
|
d2 := s.AddDaemon(c, true, false)
|
||||||
|
|
||||||
info, err := d2.info()
|
info, err := d2.info()
|
||||||
|
@ -838,9 +741,7 @@ func (s *DockerSwarmSuite) TestApiSwarmForceNewCluster(c *check.C) {
|
||||||
|
|
||||||
c.Assert(d1.Init(swarm.InitRequest{
|
c.Assert(d1.Init(swarm.InitRequest{
|
||||||
ForceNewCluster: true,
|
ForceNewCluster: true,
|
||||||
Spec: swarm.Spec{
|
Spec: swarm.Spec{},
|
||||||
AcceptancePolicy: autoAcceptPolicy,
|
|
||||||
},
|
|
||||||
}), checker.IsNil)
|
}), checker.IsNil)
|
||||||
|
|
||||||
waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances)
|
waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances)
|
||||||
|
@ -937,7 +838,6 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount
|
||||||
for _, n := range d.listNodes(c) {
|
for _, n := range d.listNodes(c) {
|
||||||
c.Assert(n.Status.State, checker.Equals, swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID))
|
c.Assert(n.Status.State, checker.Equals, swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID))
|
||||||
c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID))
|
c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID))
|
||||||
c.Assert(n.Spec.Membership, checker.Equals, swarm.NodeMembershipAccepted, check.Commentf("membership of node %s, reported by %s", n.ID, d.Info.NodeID))
|
|
||||||
if n.Spec.Role == swarm.NodeRoleManager {
|
if n.Spec.Role == swarm.NodeRoleManager {
|
||||||
c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
|
c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
|
||||||
if n.ManagerStatus.Leader {
|
if n.ManagerStatus.Leader {
|
||||||
|
|
|
@ -25,50 +25,13 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
|
||||||
return sw[0].Spec
|
return sw[0].Spec
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", "--auto-accept", "manager", "--auto-accept", "worker", "--secret", "foo")
|
out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||||
|
|
||||||
spec := getSpec()
|
spec := getSpec()
|
||||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
||||||
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
|
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
|
||||||
|
|
||||||
c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
|
|
||||||
|
|
||||||
for _, p := range spec.AcceptancePolicy.Policies {
|
|
||||||
c.Assert(p.Autoaccept, checker.Equals, true)
|
|
||||||
c.Assert(p.Secret, checker.NotNil)
|
|
||||||
c.Assert(*p.Secret, checker.Not(checker.Equals), "")
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = d.Cmd("swarm", "update", "--auto-accept", "none")
|
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
|
||||||
|
|
||||||
spec = getSpec()
|
|
||||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
|
||||||
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
|
|
||||||
|
|
||||||
c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
|
|
||||||
|
|
||||||
for _, p := range spec.AcceptancePolicy.Policies {
|
|
||||||
c.Assert(p.Autoaccept, checker.Equals, false)
|
|
||||||
// secret is still set
|
|
||||||
c.Assert(p.Secret, checker.NotNil)
|
|
||||||
c.Assert(*p.Secret, checker.Not(checker.Equals), "")
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err = d.Cmd("swarm", "update", "--auto-accept", "manager", "--secret", "")
|
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
|
||||||
|
|
||||||
spec = getSpec()
|
|
||||||
|
|
||||||
c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
|
|
||||||
|
|
||||||
for _, p := range spec.AcceptancePolicy.Policies {
|
|
||||||
c.Assert(p.Autoaccept, checker.Equals, p.Role == swarm.NodeRoleManager)
|
|
||||||
// secret has been removed
|
|
||||||
c.Assert(p.Secret, checker.IsNil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setting anything under 30m for cert-expiry is not allowed
|
// setting anything under 30m for cert-expiry is not allowed
|
||||||
out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m")
|
out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m")
|
||||||
c.Assert(err, checker.NotNil)
|
c.Assert(err, checker.NotNil)
|
||||||
|
@ -89,37 +52,21 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
|
||||||
return sw[0].Spec
|
return sw[0].Spec
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", "--auto-accept", "manager", "--auto-accept", "worker", "--secret", "foo")
|
out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||||
|
|
||||||
spec := getSpec()
|
spec := getSpec()
|
||||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
||||||
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
|
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
|
||||||
|
|
||||||
c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
|
|
||||||
|
|
||||||
for _, p := range spec.AcceptancePolicy.Policies {
|
|
||||||
c.Assert(p.Autoaccept, checker.Equals, true)
|
|
||||||
c.Assert(p.Secret, checker.NotNil)
|
|
||||||
c.Assert(*p.Secret, checker.Not(checker.Equals), "")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Assert(d.Leave(true), checker.IsNil)
|
c.Assert(d.Leave(true), checker.IsNil)
|
||||||
|
|
||||||
out, err = d.Cmd("swarm", "init", "--auto-accept", "none", "--secret", "")
|
out, err = d.Cmd("swarm", "init")
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||||
|
|
||||||
spec = getSpec()
|
spec = getSpec()
|
||||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour)
|
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour)
|
||||||
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(5*time.Second))
|
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(5*time.Second))
|
||||||
|
|
||||||
c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
|
|
||||||
|
|
||||||
for _, p := range spec.AcceptancePolicy.Policies {
|
|
||||||
c.Assert(p.Autoaccept, checker.Equals, false)
|
|
||||||
c.Assert(p.Secret, checker.IsNil)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) {
|
func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) {
|
||||||
|
|
Loading…
Reference in a new issue