Revise swarm init/update flags, add unlocking capability
- Neither swarm init or swarm update should take an unlock key - Add an autolock flag to turn on autolock - Make the necessary docker api changes - Add SwarmGetUnlockKey API call and use it when turning on autolock - Add swarm unlock-key subcommand Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
8b1f72ad44
commit
0f9fc54df9
16 changed files with 209 additions and 94 deletions
|
@ -64,7 +64,7 @@ func maskSecretKeys(inp interface{}) {
|
|||
if form, ok := inp.(map[string]interface{}); ok {
|
||||
loop0:
|
||||
for k, v := range form {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "lockkey"} {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} {
|
||||
if strings.EqualFold(m, k) {
|
||||
form[k] = "*****"
|
||||
continue loop0
|
||||
|
|
|
@ -12,6 +12,7 @@ type Backend interface {
|
|||
Leave(force bool) error
|
||||
Inspect() (types.Swarm, error)
|
||||
Update(uint64, types.Spec, types.UpdateFlags) error
|
||||
GetUnlockKey() (string, error)
|
||||
UnlockSwarm(req types.UnlockRequest) error
|
||||
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
|
||||
GetService(string) (types.Service, error)
|
||||
|
|
|
@ -28,6 +28,7 @@ func (sr *swarmRouter) initRoutes() {
|
|||
router.NewPostRoute("/swarm/join", sr.joinCluster),
|
||||
router.NewPostRoute("/swarm/leave", sr.leaveCluster),
|
||||
router.NewGetRoute("/swarm", sr.inspectCluster),
|
||||
router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey),
|
||||
router.NewPostRoute("/swarm/update", sr.updateCluster),
|
||||
router.NewPostRoute("/swarm/unlock", sr.unlockCluster),
|
||||
router.NewGetRoute("/services", sr.getServices),
|
||||
|
|
|
@ -101,12 +101,24 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
|
|||
}
|
||||
|
||||
if err := sr.backend.UnlockSwarm(req); err != nil {
|
||||
logrus.Errorf("Error unlocking swarm: %+v", err)
|
||||
logrus.Errorf("Error unlocking swarm: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
unlockKey, err := sr.backend.GetUnlockKey()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
|
||||
return err
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{
|
||||
UnlockKey: unlockKey,
|
||||
})
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
|
|
|
@ -349,3 +349,10 @@ type SecretRequestOption struct {
|
|||
GID string
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
// SwarmUnlockKeyResponse contains the response for Remote API:
|
||||
// GET /swarm/unlockkey
|
||||
type SwarmUnlockKeyResponse struct {
|
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ type Spec struct {
|
|||
Dispatcher DispatcherConfig `json:",omitempty"`
|
||||
CAConfig CAConfig `json:",omitempty"`
|
||||
TaskDefaults TaskDefaults `json:",omitempty"`
|
||||
EncryptionConfig EncryptionConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// OrchestrationConfig represents orchestration configuration.
|
||||
|
@ -53,6 +54,14 @@ type TaskDefaults struct {
|
|||
LogDriver *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EncryptionConfig controls at-rest encryption of data and keys.
|
||||
type EncryptionConfig struct {
|
||||
// AutoLockManagers specifies whether or not managers TLS keys and raft data
|
||||
// should be encrypted at rest in such a way that they must be unlocked
|
||||
// before the manager node starts up again.
|
||||
AutoLockManagers bool
|
||||
}
|
||||
|
||||
// RaftConfig represents raft configuration.
|
||||
type RaftConfig struct {
|
||||
// SnapshotInterval is the number of log entries between snapshots.
|
||||
|
@ -125,7 +134,7 @@ type InitRequest struct {
|
|||
AdvertiseAddr string
|
||||
ForceNewCluster bool
|
||||
Spec Spec
|
||||
LockKey string
|
||||
AutoLockManagers bool
|
||||
}
|
||||
|
||||
// JoinRequest is the request used to join a swarm.
|
||||
|
@ -138,7 +147,8 @@ type JoinRequest struct {
|
|||
|
||||
// UnlockRequest is the request used to unlock a swarm.
|
||||
type UnlockRequest struct {
|
||||
LockKey string
|
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string
|
||||
}
|
||||
|
||||
// LocalNodeState represents the state of the local node.
|
||||
|
@ -183,4 +193,5 @@ type Peer struct {
|
|||
type UpdateFlags struct {
|
||||
RotateWorkerToken bool
|
||||
RotateManagerToken bool
|
||||
RotateManagerUnlockKey bool
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
newInitCommand(dockerCli),
|
||||
newJoinCommand(dockerCli),
|
||||
newJoinTokenCommand(dockerCli),
|
||||
newUnlockKeyCommand(dockerCli),
|
||||
newUpdateCommand(dockerCli),
|
||||
newLeaveCommand(dockerCli),
|
||||
newUnlockCommand(dockerCli),
|
||||
|
|
|
@ -1,20 +1,15 @@
|
|||
package swarm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -25,7 +20,6 @@ type initOptions struct {
|
|||
// Not a NodeAddrOption because it has no default port.
|
||||
advertiseAddr string
|
||||
forceNewCluster bool
|
||||
lockKey bool
|
||||
}
|
||||
|
||||
func newInitCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
|
@ -45,7 +39,6 @@ func newInitCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
flags := cmd.Flags()
|
||||
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
|
||||
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
|
||||
flags.BoolVar(&opts.lockKey, flagLockKey, false, "Encrypt swarm with optionally provided key from stdin")
|
||||
flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state")
|
||||
addSwarmFlags(flags, &opts.swarmOptions)
|
||||
return cmd
|
||||
|
@ -55,31 +48,12 @@ func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOption
|
|||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
var lockKey string
|
||||
if opts.lockKey {
|
||||
var err error
|
||||
lockKey, err = readKey(dockerCli.In(), "Please enter key for encrypting swarm(leave empty to generate): ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(lockKey) == 0 {
|
||||
randBytes := make([]byte, 16)
|
||||
if _, err := rand.Read(randBytes[:]); err != nil {
|
||||
panic(fmt.Errorf("failed to general random lock key: %v", err))
|
||||
}
|
||||
|
||||
var n big.Int
|
||||
n.SetBytes(randBytes[:])
|
||||
lockKey = n.Text(36)
|
||||
}
|
||||
}
|
||||
|
||||
req := swarm.InitRequest{
|
||||
ListenAddr: opts.listenAddr.String(),
|
||||
AdvertiseAddr: opts.advertiseAddr,
|
||||
ForceNewCluster: opts.forceNewCluster,
|
||||
Spec: opts.swarmOptions.ToSpec(flags),
|
||||
LockKey: lockKey,
|
||||
AutoLockManagers: opts.swarmOptions.autolock,
|
||||
}
|
||||
|
||||
nodeID, err := client.SwarmInit(ctx, req)
|
||||
|
@ -92,29 +66,19 @@ func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOption
|
|||
|
||||
fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID)
|
||||
|
||||
if len(lockKey) > 0 {
|
||||
fmt.Fprintf(dockerCli.Out(), "Swarm is encrypted. When a node is restarted it needs to be unlocked by running command:\n\n echo '%s' | docker swarm unlock\n\n", lockKey)
|
||||
}
|
||||
|
||||
if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n")
|
||||
|
||||
if req.AutoLockManagers {
|
||||
unlockKeyResp, err := client.SwarmGetUnlockKey(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch unlock key")
|
||||
}
|
||||
printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readKey(in *command.InStream, prompt string) (string, error) {
|
||||
if in.IsTerminal() {
|
||||
fmt.Print(prompt)
|
||||
dt, err := terminal.ReadPassword(int(in.FD()))
|
||||
fmt.Println()
|
||||
return string(dt), err
|
||||
} else {
|
||||
key, err := bufio.NewReader(in).ReadString('\n')
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return strings.TrimSpace(key), err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ const (
|
|||
flagMaxSnapshots = "max-snapshots"
|
||||
flagSnapshotInterval = "snapshot-interval"
|
||||
flagLockKey = "lock-key"
|
||||
flagAutolock = "autolock"
|
||||
)
|
||||
|
||||
type swarmOptions struct {
|
||||
|
@ -36,6 +37,7 @@ type swarmOptions struct {
|
|||
externalCA ExternalCAOption
|
||||
maxSnapshots uint64
|
||||
snapshotInterval uint64
|
||||
autolock bool
|
||||
}
|
||||
|
||||
// NodeAddrOption is a pflag.Value for listening addresses
|
||||
|
@ -174,6 +176,7 @@ func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
|||
flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
|
||||
flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain")
|
||||
flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots")
|
||||
flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable or disable manager autolocking (requiring an unlock key to start a stopped manager)")
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) {
|
||||
|
@ -195,6 +198,9 @@ func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet)
|
|||
if flags.Changed(flagSnapshotInterval) {
|
||||
spec.Raft.SnapshotInterval = opts.snapshotInterval
|
||||
}
|
||||
if flags.Changed(flagAutolock) {
|
||||
spec.EncryptionConfig.AutoLockManagers = opts.autolock
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec {
|
||||
|
|
|
@ -1,9 +1,14 @@
|
|||
package swarm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
|
@ -24,7 +29,7 @@ func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
return err
|
||||
}
|
||||
req := swarm.UnlockRequest{
|
||||
LockKey: string(key),
|
||||
UnlockKey: key,
|
||||
}
|
||||
|
||||
return client.SwarmUnlock(ctx, req)
|
||||
|
@ -33,3 +38,17 @@ func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func readKey(in *command.InStream, prompt string) (string, error) {
|
||||
if in.IsTerminal() {
|
||||
fmt.Print(prompt)
|
||||
dt, err := terminal.ReadPassword(int(in.FD()))
|
||||
fmt.Println()
|
||||
return string(dt), err
|
||||
}
|
||||
key, err := bufio.NewReader(in).ReadString('\n')
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return strings.TrimSpace(key), err
|
||||
}
|
||||
|
|
57
cli/command/swarm/unlock_key.go
Normal file
57
cli/command/swarm/unlock_key.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func newUnlockKeyCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var rotate, quiet bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "unlock-key [OPTIONS]",
|
||||
Short: "Manage the unlock key",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
if rotate {
|
||||
// FIXME(aaronl)
|
||||
}
|
||||
|
||||
unlockKeyResp, err := client.SwarmGetUnlockKey(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch unlock key")
|
||||
}
|
||||
|
||||
if quiet {
|
||||
fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey)
|
||||
} else {
|
||||
printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&rotate, flagRotate, false, "Rotate unlock key")
|
||||
flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printUnlockCommand(ctx context.Context, dockerCli *command.DockerCli, unlockKey string) {
|
||||
if len(unlockKey) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey)
|
||||
return
|
||||
}
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -39,8 +40,12 @@ func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOpt
|
|||
return err
|
||||
}
|
||||
|
||||
prevAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers
|
||||
|
||||
opts.mergeSwarmSpec(&swarm.Spec, flags)
|
||||
|
||||
curAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers
|
||||
|
||||
err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -48,5 +53,13 @@ func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOpt
|
|||
|
||||
fmt.Fprintln(dockerCli.Out(), "Swarm updated.")
|
||||
|
||||
if curAutoLock && !prevAutoLock {
|
||||
unlockKeyResp, err := client.SwarmGetUnlockKey(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch unlock key")
|
||||
}
|
||||
printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -119,6 +119,7 @@ type ServiceAPIClient interface {
|
|||
type SwarmAPIClient interface {
|
||||
SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
|
||||
SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
|
||||
SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
|
||||
SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
|
||||
SwarmLeave(ctx context.Context, force bool) error
|
||||
SwarmInspect(ctx context.Context) (swarm.Swarm, error)
|
||||
|
|
21
client/swarm_get_unlock_key.go
Normal file
21
client/swarm_get_unlock_key.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// SwarmGetUnlockKey retrieves the swarm's unlock key.
|
||||
func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
|
||||
serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
|
||||
if err != nil {
|
||||
return types.SwarmUnlockKeyResponse{}, err
|
||||
}
|
||||
|
||||
var response types.SwarmUnlockKeyResponse
|
||||
err = json.NewDecoder(serverResp.body).Decode(&response)
|
||||
ensureReaderClosed(serverResp)
|
||||
return response, err
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
@ -27,6 +26,7 @@ import (
|
|||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/runconfig"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/encryption"
|
||||
swarmnode "github.com/docker/swarmkit/node"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -140,6 +140,7 @@ type nodeStartConfig struct {
|
|||
forceNewCluster bool
|
||||
joinToken string
|
||||
lockKey []byte
|
||||
autolock bool
|
||||
}
|
||||
|
||||
// New creates a new Cluster instance using provided config.
|
||||
|
@ -172,12 +173,6 @@ func New(config Config) (*Cluster, error) {
|
|||
|
||||
n, err := c.startNewNode(*nodeConfig)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSwarmLocked {
|
||||
logrus.Warnf("swarm component could not be started: %v", err)
|
||||
c.locked = true
|
||||
c.lastNodeConfig = nodeConfig
|
||||
return c, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -186,6 +181,10 @@ func New(config Config) (*Cluster, error) {
|
|||
logrus.Error("swarm component could not be started before timeout was reached")
|
||||
case <-n.Ready():
|
||||
case <-n.done:
|
||||
if errors.Cause(c.err) == ErrSwarmLocked {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("swarm component could not be started: %v", c.err)
|
||||
}
|
||||
go c.reconnectOnFailure(n)
|
||||
|
@ -314,15 +313,10 @@ func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) {
|
|||
HeartbeatTick: 1,
|
||||
ElectionTick: 3,
|
||||
UnlockKey: conf.lockKey,
|
||||
AutoLockManagers: conf.autolock,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err = detectLockedError(err)
|
||||
if errors.Cause(err) == ErrSwarmLocked {
|
||||
c.locked = true
|
||||
confClone := conf
|
||||
c.lastNodeConfig = &confClone
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
@ -341,13 +335,18 @@ func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) {
|
|||
|
||||
c.config.Backend.SetClusterProvider(c)
|
||||
go func() {
|
||||
err := n.Err(ctx)
|
||||
err := detectLockedError(n.Err(ctx))
|
||||
if err != nil {
|
||||
logrus.Errorf("cluster exited with error: %v", err)
|
||||
}
|
||||
c.Lock()
|
||||
c.node = nil
|
||||
c.err = err
|
||||
if errors.Cause(err) == ErrSwarmLocked {
|
||||
c.locked = true
|
||||
confClone := conf
|
||||
c.lastNodeConfig = &confClone
|
||||
}
|
||||
c.Unlock()
|
||||
close(node.done)
|
||||
}()
|
||||
|
@ -443,18 +442,13 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
|||
localAddr = advertiseIP.String()
|
||||
}
|
||||
|
||||
var key []byte
|
||||
if len(req.LockKey) > 0 {
|
||||
key = []byte(req.LockKey)
|
||||
}
|
||||
|
||||
// todo: check current state existing
|
||||
n, err := c.startNewNode(nodeStartConfig{
|
||||
forceNewCluster: req.ForceNewCluster,
|
||||
autolock: req.AutoLockManagers,
|
||||
LocalAddr: localAddr,
|
||||
ListenAddr: net.JoinHostPort(listenHost, listenPort),
|
||||
AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort),
|
||||
lockKey: key,
|
||||
})
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
|
@ -569,8 +563,9 @@ func (c *Cluster) GetUnlockKey() (string, error) {
|
|||
|
||||
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
|
||||
func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
|
||||
if len(req.LockKey) == 0 {
|
||||
return errors.New("unlock key can't be empty")
|
||||
key, err := encryption.ParseHumanReadableKey(req.UnlockKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
|
@ -580,7 +575,7 @@ func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
|
|||
}
|
||||
|
||||
config := *c.lastNodeConfig
|
||||
config.lockKey = []byte(req.LockKey)
|
||||
config.lockKey = key
|
||||
n, err := c.startNewNode(config)
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
|
@ -779,9 +774,10 @@ func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlag
|
|||
ClusterVersion: &swarmapi.Version{
|
||||
Index: version,
|
||||
},
|
||||
Rotation: swarmapi.JoinTokenRotation{
|
||||
RotateWorkerToken: flags.RotateWorkerToken,
|
||||
RotateManagerToken: flags.RotateManagerToken,
|
||||
Rotation: swarmapi.KeyRotation{
|
||||
WorkerJoinToken: flags.RotateWorkerToken,
|
||||
ManagerJoinToken: flags.RotateManagerToken,
|
||||
ManagerUnlockKey: flags.RotateManagerUnlockKey,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
@ -1708,7 +1704,7 @@ func initClusterSpec(node *node, spec types.Spec) error {
|
|||
}
|
||||
|
||||
func detectLockedError(err error) error {
|
||||
if errors.Cause(err) == x509.IncorrectPasswordError || errors.Cause(err).Error() == "tls: failed to parse private key" { // todo: better to export typed error
|
||||
if err == swarmnode.ErrInvalidUnlockKey {
|
||||
return errors.WithStack(ErrSwarmLocked)
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -26,6 +26,9 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
|
|||
HeartbeatTick: int(c.Spec.Raft.HeartbeatTick),
|
||||
ElectionTick: int(c.Spec.Raft.ElectionTick),
|
||||
},
|
||||
EncryptionConfig: types.EncryptionConfig{
|
||||
AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers,
|
||||
},
|
||||
},
|
||||
},
|
||||
JoinTokens: types.JoinTokens{
|
||||
|
@ -113,5 +116,7 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu
|
|||
})
|
||||
}
|
||||
|
||||
spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue