2016-06-14 02:52:49 +00:00
package cluster
2016-11-16 22:17:18 +00:00
//
// ## Swarmkit integration
//
// Cluster - static configurable object for accessing everything swarm related.
// Contains methods for connecting and controlling the cluster. Exists always,
// even if swarm mode is not enabled.
//
// NodeRunner - Manager for starting the swarmkit node. Is present only and
// always if swarm mode is enabled. Implements backoff restart loop in case of
// errors.
//
// NodeState - Information about the current node status including access to
// gRPC clients if a manager is active.
//
// ### Locking
//
// `cluster.controlMutex` - taken for the whole lifecycle of the processes that
// can reconfigure cluster(init/join/leave etc). Protects that one
// reconfiguration action has fully completed before another can start.
//
// `cluster.mu` - taken when the actual changes in cluster configurations
// happen. Different from `controlMutex` because in some cases we need to
// access current cluster state even if the long-running reconfiguration is
// going on. For example network stack may ask for the current cluster state in
// the middle of the shutdown. Any time current cluster state is asked you
// should take the read lock of `cluster.mu`. If you are writing an API
// responder that returns synchronously, hold `cluster.mu.RLock()` for the
// duration of the whole handler function. That ensures that node will not be
// shut down until the handler has finished.
//
// NodeRunner implements its internal locks that should not be used outside of
// the struct. Instead, you should just call `nodeRunner.State()` method to get
// the current state of the cluster(still need `cluster.mu.RLock()` to access
// `cluster.nr` reference itself). Most of the changes in NodeRunner happen
// because of an external event(network problem, unexpected swarmkit error) and
// Docker shouldn't take any locks that delay these changes from happening.
//
2016-06-14 02:52:49 +00:00
import (
2016-11-09 02:03:47 +00:00
"crypto/x509"
2016-06-14 02:52:49 +00:00
"fmt"
2016-07-01 01:07:35 +00:00
"net"
2016-06-14 02:52:49 +00:00
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
2016-10-22 01:07:55 +00:00
apierrors "github.com/docker/docker/api/errors"
2016-09-06 18:18:12 +00:00
apitypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
2016-08-23 23:50:15 +00:00
"github.com/docker/docker/api/types/network"
2016-09-06 18:18:12 +00:00
types "github.com/docker/docker/api/types/swarm"
2016-06-14 02:52:49 +00:00
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
2016-06-21 21:27:04 +00:00
"github.com/docker/docker/opts"
2016-08-30 21:17:32 +00:00
"github.com/docker/docker/pkg/signal"
2016-06-14 02:52:49 +00:00
swarmapi "github.com/docker/swarmkit/api"
2016-10-28 01:50:49 +00:00
"github.com/docker/swarmkit/manager/encryption"
2016-10-20 18:26:04 +00:00
swarmnode "github.com/docker/swarmkit/node"
2016-10-22 01:07:55 +00:00
"github.com/pkg/errors"
2016-06-14 02:52:49 +00:00
"golang.org/x/net/context"
)
const swarmDirName = "swarm"
const controlSocket = "control.sock"
2016-06-16 16:42:22 +00:00
const swarmConnectTimeout = 20 * time . Second
2016-07-15 17:58:21 +00:00
const swarmRequestTimeout = 20 * time . Second
2016-06-14 02:52:49 +00:00
const stateFile = "docker-state.json"
2016-06-21 21:27:04 +00:00
const defaultAddr = "0.0.0.0:2377"
2016-06-14 02:52:49 +00:00
const (
initialReconnectDelay = 100 * time . Millisecond
2016-06-16 16:42:22 +00:00
maxReconnectDelay = 30 * time . Second
2016-10-26 08:17:31 +00:00
contextPrefix = "com.docker.swarm"
2016-06-14 02:52:49 +00:00
)
2016-12-02 09:14:32 +00:00
// errNoSwarm is returned on leaving a cluster that was never initialized
2016-12-25 06:37:31 +00:00
var errNoSwarm = errors . New ( "This node is not part of a swarm" )
2016-06-14 02:52:49 +00:00
2016-12-02 09:14:32 +00:00
// errSwarmExists is returned on initialize or join request for a cluster that has already been activated
2016-12-25 06:37:31 +00:00
var errSwarmExists = errors . New ( "This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one." )
2016-06-16 16:42:22 +00:00
2016-12-02 09:14:32 +00:00
// errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
2016-12-25 06:37:31 +00:00
var errSwarmJoinTimeoutReached = errors . New ( "Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node." )
2016-06-14 02:52:49 +00:00
2016-12-02 09:14:32 +00:00
// errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it.
2016-12-25 06:37:31 +00:00
var errSwarmLocked = errors . New ( "Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it." )
2016-10-22 01:07:55 +00:00
2016-12-02 09:14:32 +00:00
// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically.
var errSwarmCertificatesExpired = errors . New ( "Swarm certificates have expired. To replace them, leave the swarm and join again." )
2016-11-09 02:03:47 +00:00
2016-07-01 01:07:35 +00:00
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
V4Subnets ( ) [ ] net . IPNet
V6Subnets ( ) [ ] net . IPNet
2016-06-14 02:52:49 +00:00
}
// Config provides values for Cluster.
type Config struct {
2016-07-01 01:07:35 +00:00
Root string
Name string
Backend executorpkg . Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use
// if no AdvertiseAddr value is specified.
DefaultAdvertiseAddr string
2016-08-19 20:06:28 +00:00
// path to store runtime state, such as the swarm control socket
RuntimeRoot string
2016-06-14 02:52:49 +00:00
}
2016-06-24 18:52:28 +00:00
// Cluster provides capabilities to participate in a cluster as a worker or a
// manager.
2016-06-14 02:52:49 +00:00
type Cluster struct {
2016-11-16 22:17:18 +00:00
mu sync . RWMutex
controlMutex sync . RWMutex // protect init/join/leave user operations
nr * nodeRunner
root string
runtimeRoot string
config Config
configEvent chan struct { } // todo: make this array and goroutine safe
attachers map [ string ] * attacher
2016-08-23 23:50:15 +00:00
}
// attacher manages the in-memory attachment state of a container
// attachment to a global scope network managed by swarm manager. It
// helps in identifying the attachment ID via the taskID and the
// corresponding attachment configuration obtained from the manager.
type attacher struct {
2016-09-09 16:55:57 +00:00
taskID string
config * network . NetworkingConfig
attachWaitCh chan * network . NetworkingConfig
attachCompleteCh chan struct { }
detachWaitCh chan struct { }
2016-06-20 23:35:33 +00:00
}
2016-06-14 02:52:49 +00:00
// New creates a new Cluster instance using provided config.
func New ( config Config ) ( * Cluster , error ) {
root := filepath . Join ( config . Root , swarmDirName )
if err := os . MkdirAll ( root , 0700 ) ; err != nil {
return nil , err
}
2016-08-19 20:06:28 +00:00
if config . RuntimeRoot == "" {
config . RuntimeRoot = root
}
if err := os . MkdirAll ( config . RuntimeRoot , 0700 ) ; err != nil {
return nil , err
}
2016-06-14 02:52:49 +00:00
c := & Cluster {
2016-06-20 23:35:33 +00:00
root : root ,
config : config ,
configEvent : make ( chan struct { } , 10 ) ,
2016-08-19 20:06:28 +00:00
runtimeRoot : config . RuntimeRoot ,
2016-08-23 23:50:15 +00:00
attachers : make ( map [ string ] * attacher ) ,
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
nodeConfig , err := loadPersistentState ( root )
2016-06-14 02:52:49 +00:00
if err != nil {
if os . IsNotExist ( err ) {
return c , nil
}
return nil , err
}
2016-11-16 22:17:18 +00:00
nr , err := c . newNodeRunner ( * nodeConfig )
2016-06-14 02:52:49 +00:00
if err != nil {
return nil , err
}
2016-11-16 22:17:18 +00:00
c . nr = nr
2016-06-14 02:52:49 +00:00
select {
case <- time . After ( swarmConnectTimeout ) :
2016-11-01 04:05:01 +00:00
logrus . Error ( "swarm component could not be started before timeout was reached" )
2016-11-16 22:17:18 +00:00
case err := <- nr . Ready ( ) :
2016-06-14 02:52:49 +00:00
if err != nil {
2016-12-02 09:14:32 +00:00
if errors . Cause ( err ) == errSwarmLocked {
2016-11-16 22:17:18 +00:00
return c , nil
}
if err , ok := errors . Cause ( c . nr . err ) . ( x509 . CertificateInvalidError ) ; ok && err . Reason == x509 . Expired {
return c , nil
}
return nil , errors . Wrap ( err , "swarm component could not be started" )
2016-06-14 02:52:49 +00:00
}
}
2016-11-16 22:17:18 +00:00
return c , nil
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
func ( c * Cluster ) newNodeRunner ( conf nodeStartConfig ) ( * nodeRunner , error ) {
2016-06-14 16:13:53 +00:00
if err := c . config . Backend . IsSwarmCompatible ( ) ; err != nil {
2016-06-20 23:35:33 +00:00
return nil , err
2016-06-14 02:52:49 +00:00
}
2016-07-01 01:07:35 +00:00
2016-10-21 20:31:45 +00:00
actualLocalAddr := conf . LocalAddr
2016-07-01 01:07:35 +00:00
if actualLocalAddr == "" {
// If localAddr was not specified, resolve it automatically
// based on the route to joinAddr. localAddr can only be left
// empty on "join".
2016-10-21 20:31:45 +00:00
listenHost , _ , err := net . SplitHostPort ( conf . ListenAddr )
2016-07-01 01:07:35 +00:00
if err != nil {
return nil , fmt . Errorf ( "could not parse listen address: %v" , err )
}
listenAddrIP := net . ParseIP ( listenHost )
if listenAddrIP == nil || ! listenAddrIP . IsUnspecified ( ) {
actualLocalAddr = listenHost
} else {
2016-10-21 20:31:45 +00:00
if conf . RemoteAddr == "" {
2016-07-01 01:07:35 +00:00
// Should never happen except using swarms created by
// old versions that didn't save remoteAddr.
2016-10-21 20:31:45 +00:00
conf . RemoteAddr = "8.8.8.8:53"
2016-07-01 01:07:35 +00:00
}
2016-10-21 20:31:45 +00:00
conn , err := net . Dial ( "udp" , conf . RemoteAddr )
2016-07-01 01:07:35 +00:00
if err != nil {
return nil , fmt . Errorf ( "could not find local IP address: %v" , err )
}
localHostPort := conn . LocalAddr ( ) . String ( )
actualLocalAddr , _ , _ = net . SplitHostPort ( localHostPort )
conn . Close ( )
}
}
2016-11-16 22:17:18 +00:00
nr := & nodeRunner { cluster : c }
nr . actualLocalAddr = actualLocalAddr
2016-10-22 01:07:55 +00:00
2016-11-16 22:17:18 +00:00
if err := nr . Start ( conf ) ; err != nil {
2016-06-20 23:35:33 +00:00
return nil , err
2016-06-14 02:52:49 +00:00
}
2016-07-01 01:07:35 +00:00
2017-01-14 04:14:03 +00:00
c . config . Backend . DaemonJoinsCluster ( c )
2016-06-14 02:52:49 +00:00
2016-11-16 22:17:18 +00:00
return nr , nil
2016-06-14 02:52:49 +00:00
}
// Init initializes new cluster from user provided request.
func ( c * Cluster ) Init ( req types . InitRequest ) ( string , error ) {
2016-11-16 22:17:18 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
if c . nr != nil {
if req . ForceNewCluster {
if err := c . nr . Stop ( ) ; err != nil {
c . mu . Unlock ( )
return "" , err
}
} else {
c . mu . Unlock ( )
2016-12-02 09:14:32 +00:00
return "" , errSwarmExists
2016-06-14 02:52:49 +00:00
}
}
2016-11-16 22:17:18 +00:00
c . mu . Unlock ( )
2016-06-21 21:27:04 +00:00
if err := validateAndSanitizeInitRequest ( & req ) ; err != nil {
2016-09-14 17:23:11 +00:00
return "" , apierrors . NewBadRequestError ( err )
2016-06-21 21:27:04 +00:00
}
2016-07-01 01:07:35 +00:00
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
return "" , err
}
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
if err != nil {
return "" , err
}
localAddr := listenHost
2016-11-10 21:45:32 +00:00
// If the local address is undetermined, the advertise address
// will be used as local address, if it belongs to this system.
// If the advertise address is not local, then we try to find
// a system address to use as local address. If this fails,
2016-12-21 06:43:28 +00:00
// we give up and ask the user to pass the listen address.
2016-11-10 21:45:32 +00:00
if net . ParseIP ( localAddr ) . IsUnspecified ( ) {
2016-07-01 01:07:35 +00:00
advertiseIP := net . ParseIP ( advertiseHost )
found := false
2016-11-10 21:45:32 +00:00
for _ , systemIP := range listSystemIPs ( ) {
2016-07-01 01:07:35 +00:00
if systemIP . Equal ( advertiseIP ) {
2016-11-10 21:45:32 +00:00
localAddr = advertiseIP . String ( )
2016-07-01 01:07:35 +00:00
found = true
break
}
}
2016-11-10 21:45:32 +00:00
2016-07-01 01:07:35 +00:00
if ! found {
2016-11-10 21:45:32 +00:00
ip , err := c . resolveSystemAddr ( )
if err != nil {
logrus . Warnf ( "Could not find a local address: %v" , err )
return "" , errMustSpecifyListenAddr
}
localAddr = ip . String ( )
2016-07-01 01:07:35 +00:00
}
}
2016-11-16 22:17:18 +00:00
if ! req . ForceNewCluster {
clearPersistentState ( c . root )
}
nr , err := c . newNodeRunner ( nodeStartConfig {
2016-10-21 20:31:45 +00:00
forceNewCluster : req . ForceNewCluster ,
2016-10-28 01:50:49 +00:00
autolock : req . AutoLockManagers ,
2016-10-21 20:31:45 +00:00
LocalAddr : localAddr ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : net . JoinHostPort ( advertiseHost , advertisePort ) ,
2016-12-22 02:13:31 +00:00
availability : req . Availability ,
2016-10-21 20:31:45 +00:00
} )
2016-06-14 02:52:49 +00:00
if err != nil {
return "" , err
}
2016-11-16 22:17:18 +00:00
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
2016-06-14 02:52:49 +00:00
2016-11-16 22:17:18 +00:00
if err := <- nr . Ready ( ) ; err != nil {
2016-06-20 23:35:33 +00:00
if ! req . ForceNewCluster { // if failure on first attempt don't keep state
2016-11-16 22:17:18 +00:00
if err := clearPersistentState ( c . root ) ; err != nil {
2016-06-20 23:35:33 +00:00
return "" , err
2016-06-14 02:52:49 +00:00
}
}
2016-11-16 22:17:18 +00:00
if err != nil {
c . mu . Lock ( )
c . nr = nil
c . mu . Unlock ( )
}
return "" , err
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
state := nr . State ( )
if state . swarmNode == nil { // should never happen but protect from panic
return "" , errors . New ( "invalid cluster state for spec initialization" )
}
if err := initClusterSpec ( state . swarmNode , req . Spec ) ; err != nil {
return "" , err
}
return state . NodeID ( ) , nil
2016-06-14 02:52:49 +00:00
}
// Join makes current Cluster part of an existing swarm cluster.
func ( c * Cluster ) Join ( req types . JoinRequest ) error {
2016-11-16 22:17:18 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
if c . nr != nil {
c . mu . Unlock ( )
2016-12-02 09:14:32 +00:00
return errSwarmExists
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
c . mu . Unlock ( )
2016-06-21 21:27:04 +00:00
if err := validateAndSanitizeJoinRequest ( & req ) ; err != nil {
2016-09-14 17:23:11 +00:00
return apierrors . NewBadRequestError ( err )
2016-06-14 02:52:49 +00:00
}
2016-07-01 01:07:35 +00:00
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
return err
}
var advertiseAddr string
2016-08-31 18:44:32 +00:00
if req . AdvertiseAddr != "" {
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net . JoinHostPort ( advertiseHost , advertisePort )
}
2016-07-01 01:07:35 +00:00
}
2016-11-16 22:17:18 +00:00
clearPersistentState ( c . root )
nr , err := c . newNodeRunner ( nodeStartConfig {
2016-10-21 20:31:45 +00:00
RemoteAddr : req . RemoteAddrs [ 0 ] ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : advertiseAddr ,
joinAddr : req . RemoteAddrs [ 0 ] ,
joinToken : req . JoinToken ,
2016-12-22 02:06:16 +00:00
availability : req . Availability ,
2016-10-21 20:31:45 +00:00
} )
2016-06-14 02:52:49 +00:00
if err != nil {
return err
}
2016-11-16 22:17:18 +00:00
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
2016-06-14 02:52:49 +00:00
2016-07-20 18:15:08 +00:00
select {
case <- time . After ( swarmConnectTimeout ) :
2016-12-02 09:14:32 +00:00
return errSwarmJoinTimeoutReached
2016-11-16 22:17:18 +00:00
case err := <- nr . Ready ( ) :
if err != nil {
c . mu . Lock ( )
c . nr = nil
c . mu . Unlock ( )
}
return err
2016-06-14 02:52:49 +00:00
}
}
2016-10-22 01:07:55 +00:00
// GetUnlockKey returns the unlock key for the swarm.
func ( c * Cluster ) GetUnlockKey ( ) ( string , error ) {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
2016-10-22 01:07:55 +00:00
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
if ! state . IsActiveManager ( ) {
return "" , c . errNoManager ( state )
2016-10-22 01:07:55 +00:00
}
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-11-16 22:17:18 +00:00
client := swarmapi . NewCAClient ( state . grpcConn )
2016-10-22 01:07:55 +00:00
r , err := client . GetUnlockKey ( ctx , & swarmapi . GetUnlockKeyRequest { } )
if err != nil {
return "" , err
}
2016-10-28 23:35:49 +00:00
if len ( r . UnlockKey ) == 0 {
// no key
return "" , nil
}
2016-10-22 01:07:55 +00:00
return encryption . HumanReadableKey ( r . UnlockKey ) , nil
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
func ( c * Cluster ) UnlockSwarm ( req types . UnlockRequest ) error {
2016-11-16 22:17:18 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . RLock ( )
state := c . currentNodeState ( )
2016-12-07 05:48:01 +00:00
if ! state . IsActiveManager ( ) {
// when manager is not active,
// unless it is locked, otherwise return error.
if err := c . errNoManager ( state ) ; err != errSwarmLocked {
c . mu . RUnlock ( )
return err
}
} else {
// when manager is active, return an error of "not locked"
c . mu . RUnlock ( )
2016-11-21 08:33:46 +00:00
return errors . New ( "swarm is not locked" )
}
2016-12-07 05:48:01 +00:00
// only when swarm is locked, code running reaches here
nr := c . nr
c . mu . RUnlock ( )
2016-10-28 01:50:49 +00:00
key , err := encryption . ParseHumanReadableKey ( req . UnlockKey )
if err != nil {
return err
2016-10-22 01:07:55 +00:00
}
2016-11-16 22:17:18 +00:00
config := nr . config
2016-10-28 01:50:49 +00:00
config . lockKey = key
2016-11-16 22:17:18 +00:00
if err := nr . Stop ( ) ; err != nil {
return err
}
nr , err = c . newNodeRunner ( config )
2016-10-22 01:07:55 +00:00
if err != nil {
return err
}
2016-11-16 22:17:18 +00:00
c . mu . Lock ( )
c . nr = nr
c . mu . Unlock ( )
if err := <- nr . Ready ( ) ; err != nil {
2016-12-02 09:14:32 +00:00
if errors . Cause ( err ) == errSwarmLocked {
2016-10-31 22:02:34 +00:00
return errors . New ( "swarm could not be unlocked: invalid key provided" )
}
2016-11-16 22:17:18 +00:00
return fmt . Errorf ( "swarm component could not be started: %v" , err )
2016-10-22 01:07:55 +00:00
}
return nil
}
2016-06-14 02:52:49 +00:00
// Leave shuts down Cluster and removes current state.
func ( c * Cluster ) Leave ( force bool ) error {
2016-11-16 22:17:18 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
nr := c . nr
if nr == nil {
c . mu . Unlock ( )
2016-12-02 09:14:32 +00:00
return errNoSwarm
2016-11-16 22:17:18 +00:00
}
2016-12-02 09:14:32 +00:00
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
2016-12-02 09:14:32 +00:00
if errors . Cause ( state . err ) == errSwarmLocked && ! force {
// leave a locked swarm without --force is not allowed
c . mu . Unlock ( )
return errors . New ( "Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message." )
}
2016-11-16 22:17:18 +00:00
if state . IsManager ( ) && ! force {
msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
if state . IsActiveManager ( ) {
active , reachable , unreachable , err := managerStats ( state . controlClient , state . NodeID ( ) )
if err == nil {
if active && removingManagerCausesLossOfQuorum ( reachable , unreachable ) {
if isLastManager ( reachable , unreachable ) {
msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
c . mu . Unlock ( )
2016-12-25 06:37:31 +00:00
return errors . New ( msg )
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
msg += fmt . Sprintf ( "Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. " , reachable - 1 , reachable + unreachable )
2016-06-14 02:52:49 +00:00
}
}
2016-11-16 22:17:18 +00:00
} else {
msg += "Doing so may lose the consensus of your cluster. "
2016-10-22 01:07:55 +00:00
}
2016-11-16 22:17:18 +00:00
msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
c . mu . Unlock ( )
2016-12-25 06:37:31 +00:00
return errors . New ( msg )
2016-11-16 22:17:18 +00:00
}
// release readers in here
if err := nr . Stop ( ) ; err != nil {
logrus . Errorf ( "failed to shut down cluster node: %v" , err )
signal . DumpStacks ( "" )
c . mu . Unlock ( )
return err
}
c . nr = nil
c . mu . Unlock ( )
if nodeID := state . NodeID ( ) ; nodeID != "" {
nodeContainers , err := c . listContainerForNode ( nodeID )
if err != nil {
2016-08-20 12:14:26 +00:00
return err
}
2016-11-16 22:17:18 +00:00
for _ , id := range nodeContainers {
if err := c . config . Backend . ContainerRm ( id , & apitypes . ContainerRmConfig { ForceRemove : true } ) ; err != nil {
logrus . Errorf ( "error removing %v: %v" , id , err )
2016-06-16 23:08:15 +00:00
}
2016-06-14 02:52:49 +00:00
}
}
2016-12-02 09:14:32 +00:00
2016-06-14 02:52:49 +00:00
c . configEvent <- struct { } { }
// todo: cleanup optional?
2016-11-16 22:17:18 +00:00
if err := clearPersistentState ( c . root ) ; err != nil {
2016-06-14 02:52:49 +00:00
return err
}
2017-01-14 04:14:03 +00:00
c . config . Backend . DaemonLeavesCluster ( )
2016-06-14 02:52:49 +00:00
return nil
}
2016-08-20 12:14:26 +00:00
func ( c * Cluster ) listContainerForNode ( nodeID string ) ( [ ] string , error ) {
var ids [ ] string
filters := filters . NewArgs ( )
filters . Add ( "label" , fmt . Sprintf ( "com.docker.swarm.node.id=%s" , nodeID ) )
containers , err := c . config . Backend . Containers ( & apitypes . ContainerListOptions {
2016-11-01 14:01:16 +00:00
Filters : filters ,
2016-08-20 12:14:26 +00:00
} )
if err != nil {
return [ ] string { } , err
}
for _ , c := range containers {
ids = append ( ids , c . ID )
}
return ids , nil
}
2016-07-15 17:58:21 +00:00
func ( c * Cluster ) getRequestContext ( ) ( context . Context , func ( ) ) { // TODO: not needed when requests don't block on qourum lost
return context . WithTimeout ( context . Background ( ) , swarmRequestTimeout )
2016-06-14 02:52:49 +00:00
}
2016-06-24 18:52:28 +00:00
// Inspect retrieves the configuration properties of a managed swarm cluster.
2016-06-14 02:52:49 +00:00
func ( c * Cluster ) Inspect ( ) ( types . Swarm , error ) {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
2016-06-14 02:52:49 +00:00
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
if ! state . IsActiveManager ( ) {
return types . Swarm { } , c . errNoManager ( state )
2016-06-14 02:52:49 +00:00
}
2016-07-15 17:58:21 +00:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-11-16 22:17:18 +00:00
swarm , err := getSwarm ( ctx , state . controlClient )
2016-06-14 02:52:49 +00:00
if err != nil {
return types . Swarm { } , err
}
return convert . SwarmFromGRPC ( * swarm ) , nil
}
// Update updates configuration of a managed swarm cluster.
2016-07-20 18:15:08 +00:00
func ( c * Cluster ) Update ( version uint64 , spec types . Spec , flags types . UpdateFlags ) error {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
2016-06-14 02:52:49 +00:00
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
if ! state . IsActiveManager ( ) {
return c . errNoManager ( state )
2016-06-14 02:52:49 +00:00
}
2016-07-15 17:58:21 +00:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-11-16 22:17:18 +00:00
swarm , err := getSwarm ( ctx , state . controlClient )
2016-06-14 02:52:49 +00:00
if err != nil {
return err
}
2016-08-26 04:08:53 +00:00
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec , err := convert . SwarmSpecToGRPC ( spec )
2016-06-14 02:52:49 +00:00
if err != nil {
2016-09-14 17:23:11 +00:00
return apierrors . NewBadRequestError ( err )
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
_ , err = state . controlClient . UpdateCluster (
2016-07-15 17:58:21 +00:00
ctx ,
2016-06-14 02:52:49 +00:00
& swarmapi . UpdateClusterRequest {
ClusterID : swarm . ID ,
2016-08-26 04:08:53 +00:00
Spec : & clusterSpec ,
2016-06-14 02:52:49 +00:00
ClusterVersion : & swarmapi . Version {
Index : version ,
} ,
2016-10-28 01:50:49 +00:00
Rotation : swarmapi . KeyRotation {
WorkerJoinToken : flags . RotateWorkerToken ,
ManagerJoinToken : flags . RotateManagerToken ,
ManagerUnlockKey : flags . RotateManagerUnlockKey ,
2016-07-20 18:15:08 +00:00
} ,
2016-06-14 02:52:49 +00:00
} ,
)
return err
}
2016-06-24 18:52:28 +00:00
// IsManager returns true if Cluster is participating as a manager.
2016-06-14 02:52:49 +00:00
func ( c * Cluster ) IsManager ( ) bool {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
return c . currentNodeState ( ) . IsActiveManager ( )
2016-06-14 02:52:49 +00:00
}
2016-06-24 18:52:28 +00:00
// IsAgent returns true if Cluster is participating as a worker/agent.
2016-06-14 02:52:49 +00:00
func ( c * Cluster ) IsAgent ( ) bool {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
return c . currentNodeState ( ) . status == types . LocalNodeStateActive
2016-06-14 02:52:49 +00:00
}
2016-07-01 01:07:35 +00:00
// GetLocalAddress returns the local address.
func ( c * Cluster ) GetLocalAddress ( ) string {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
return c . currentNodeState ( ) . actualLocalAddr
2016-07-01 01:07:35 +00:00
}
2016-09-23 01:43:54 +00:00
// GetListenAddress returns the listen address.
func ( c * Cluster ) GetListenAddress ( ) string {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
if c . nr != nil {
return c . nr . config . ListenAddr
2016-10-21 20:31:45 +00:00
}
return ""
2016-09-23 01:43:54 +00:00
}
2016-07-01 01:07:35 +00:00
// GetAdvertiseAddress returns the remotely reachable address of this node.
func ( c * Cluster ) GetAdvertiseAddress ( ) string {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
if c . nr != nil && c . nr . config . AdvertiseAddr != "" {
advertiseHost , _ , _ := net . SplitHostPort ( c . nr . config . AdvertiseAddr )
2016-07-01 01:07:35 +00:00
return advertiseHost
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
return c . currentNodeState ( ) . actualLocalAddr
2016-06-14 02:52:49 +00:00
}
2016-06-24 18:52:28 +00:00
// GetRemoteAddress returns a known advertise address of a remote manager if
2016-06-14 02:52:49 +00:00
// available.
// todo: change to array/connect with info
func ( c * Cluster ) GetRemoteAddress ( ) string {
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
2016-06-14 02:52:49 +00:00
return c . getRemoteAddress ( )
}
func ( c * Cluster ) getRemoteAddress ( ) string {
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
if state . swarmNode == nil {
2016-06-14 02:52:49 +00:00
return ""
}
2016-11-16 22:17:18 +00:00
nodeID := state . swarmNode . NodeID ( )
for _ , r := range state . swarmNode . Remotes ( ) {
2016-06-14 02:52:49 +00:00
if r . NodeID != nodeID {
return r . Addr
}
}
return ""
}
// ListenClusterEvents returns a channel that receives messages on cluster
// participation changes.
// todo: make cancelable and accessible to multiple callers
func ( c * Cluster ) ListenClusterEvents ( ) <- chan struct { } {
return c . configEvent
}
// Info returns information about the current cluster state.
func ( c * Cluster ) Info ( ) types . Info {
2016-07-01 01:07:35 +00:00
info := types . Info {
NodeAddr : c . GetAdvertiseAddress ( ) ,
}
2016-11-16 22:17:18 +00:00
c . mu . RLock ( )
defer c . mu . RUnlock ( )
2016-07-01 01:07:35 +00:00
2016-11-16 22:17:18 +00:00
state := c . currentNodeState ( )
info . LocalNodeState = state . status
if state . err != nil {
info . Error = state . err . Error ( )
2016-06-14 02:52:49 +00:00
}
2016-07-15 17:58:21 +00:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-11-16 22:17:18 +00:00
if state . IsActiveManager ( ) {
2016-06-14 02:52:49 +00:00
info . ControlAvailable = true
2016-07-24 08:53:52 +00:00
swarm , err := c . Inspect ( )
if err != nil {
info . Error = err . Error ( )
}
2016-07-27 19:06:00 +00:00
// Strip JoinTokens
info . Cluster = swarm . ClusterInfo
2017-01-08 14:23:36 +00:00
if r , err := state . controlClient . ListNodes ( ctx , & swarmapi . ListNodesRequest { } ) ; err != nil {
info . Error = err . Error ( )
} else {
2016-06-14 02:52:49 +00:00
info . Nodes = len ( r . Nodes )
for _ , n := range r . Nodes {
if n . ManagerStatus != nil {
info . Managers = info . Managers + 1
}
}
}
}
2016-11-16 22:17:18 +00:00
if state . swarmNode != nil {
for _ , r := range state . swarmNode . Remotes ( ) {
2016-06-14 02:52:49 +00:00
info . RemoteManagers = append ( info . RemoteManagers , types . Peer { NodeID : r . NodeID , Addr : r . Addr } )
}
2016-11-16 22:17:18 +00:00
info . NodeID = state . swarmNode . NodeID ( )
2016-06-14 02:52:49 +00:00
}
return info
}
2016-11-16 22:17:18 +00:00
// currentNodeState should not be called without a read lock
func ( c * Cluster ) currentNodeState ( ) nodeState {
return c . nr . State ( )
2016-11-09 02:03:47 +00:00
}
2016-06-23 20:52:41 +00:00
// errNoManager returns error describing why manager commands can't be used.
// Call with read lock.
2016-11-16 22:17:18 +00:00
func ( c * Cluster ) errNoManager ( st nodeState ) error {
if st . swarmNode == nil {
2016-12-02 09:14:32 +00:00
if errors . Cause ( st . err ) == errSwarmLocked {
return errSwarmLocked
2016-10-22 01:07:55 +00:00
}
2016-12-02 09:14:32 +00:00
if st . err == errSwarmCertificatesExpired {
return errSwarmCertificatesExpired
2016-11-09 02:03:47 +00:00
}
2016-12-25 06:37:31 +00:00
return errors . New ( "This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again." )
2016-06-23 20:52:41 +00:00
}
2016-11-16 22:17:18 +00:00
if st . swarmNode . Manager ( ) != nil {
2016-12-25 06:37:31 +00:00
return errors . New ( "This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster." )
2016-06-23 20:52:41 +00:00
}
2016-12-25 06:37:31 +00:00
return errors . New ( "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager." )
2016-06-23 20:52:41 +00:00
}
2016-06-14 02:52:49 +00:00
// Cleanup stops active swarm node. This is run before daemon shutdown.
func ( c * Cluster ) Cleanup ( ) {
2016-11-16 22:17:18 +00:00
c . controlMutex . Lock ( )
defer c . controlMutex . Unlock ( )
c . mu . Lock ( )
node := c . nr
2016-06-14 02:52:49 +00:00
if node == nil {
2016-11-16 22:17:18 +00:00
c . mu . Unlock ( )
2016-06-14 02:52:49 +00:00
return
}
2016-11-16 22:17:18 +00:00
defer c . mu . Unlock ( )
state := c . currentNodeState ( )
if state . IsActiveManager ( ) {
active , reachable , unreachable , err := managerStats ( state . controlClient , state . NodeID ( ) )
2016-06-14 02:52:49 +00:00
if err == nil {
2016-08-19 20:49:58 +00:00
singlenode := active && isLastManager ( reachable , unreachable )
if active && ! singlenode && removingManagerCausesLossOfQuorum ( reachable , unreachable ) {
2016-06-14 02:52:49 +00:00
logrus . Errorf ( "Leaving cluster with %v managers left out of %v. Raft quorum will be lost." , reachable - 1 , reachable + unreachable )
}
}
}
2016-11-16 22:17:18 +00:00
if err := node . Stop ( ) ; err != nil {
logrus . Errorf ( "failed to shut down cluster node: %v" , err )
signal . DumpStacks ( "" )
}
c . nr = nil
2016-06-14 02:52:49 +00:00
}
2016-11-16 22:17:18 +00:00
func managerStats ( client swarmapi . ControlClient , currentNodeID string ) ( current bool , reachable int , unreachable int , err error ) {
2016-07-15 17:58:21 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
defer cancel ( )
2016-11-16 22:17:18 +00:00
nodes , err := client . ListNodes ( ctx , & swarmapi . ListNodesRequest { } )
2016-06-14 02:52:49 +00:00
if err != nil {
return false , 0 , 0 , err
}
for _ , n := range nodes . Nodes {
if n . ManagerStatus != nil {
2016-06-15 00:23:01 +00:00
if n . ManagerStatus . Reachability == swarmapi . RaftMemberStatus_REACHABLE {
2016-06-14 02:52:49 +00:00
reachable ++
2016-11-16 22:17:18 +00:00
if n . ID == currentNodeID {
2016-06-14 02:52:49 +00:00
current = true
}
}
2016-06-15 00:23:01 +00:00
if n . ManagerStatus . Reachability == swarmapi . RaftMemberStatus_UNREACHABLE {
2016-06-14 02:52:49 +00:00
unreachable ++
}
}
}
return
}
2016-06-21 21:27:04 +00:00
func validateAndSanitizeInitRequest ( req * types . InitRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
2016-11-29 01:19:29 +00:00
if req . Spec . Annotations . Name == "" {
req . Spec . Annotations . Name = "default"
} else if req . Spec . Annotations . Name != "default" {
return errors . New ( ` swarm spec must be named "default" ` )
}
2016-06-21 21:27:04 +00:00
return nil
}
func validateAndSanitizeJoinRequest ( req * types . JoinRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
if len ( req . RemoteAddrs ) == 0 {
2016-12-25 06:37:31 +00:00
return errors . New ( "at least 1 RemoteAddr is required to join" )
2016-06-21 21:27:04 +00:00
}
for i := range req . RemoteAddrs {
req . RemoteAddrs [ i ] , err = validateAddr ( req . RemoteAddrs [ i ] )
if err != nil {
return fmt . Errorf ( "invalid remoteAddr %q: %v" , req . RemoteAddrs [ i ] , err )
}
}
return nil
}
func validateAddr ( addr string ) ( string , error ) {
if addr == "" {
2016-12-25 06:37:31 +00:00
return addr , errors . New ( "invalid empty address" )
2016-06-21 21:27:04 +00:00
}
newaddr , err := opts . ParseTCPAddr ( addr , defaultAddr )
if err != nil {
return addr , nil
}
return strings . TrimPrefix ( newaddr , "tcp://" ) , nil
}
2016-11-16 22:17:18 +00:00
func initClusterSpec ( node * swarmnode . Node , spec types . Spec ) error {
2016-06-14 02:52:49 +00:00
ctx , _ := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
for conn := range node . ListenControlSocket ( ctx ) {
if ctx . Err ( ) != nil {
return ctx . Err ( )
}
if conn != nil {
client := swarmapi . NewControlClient ( conn )
var cluster * swarmapi . Cluster
for i := 0 ; ; i ++ {
lcr , err := client . ListClusters ( ctx , & swarmapi . ListClustersRequest { } )
if err != nil {
return fmt . Errorf ( "error on listing clusters: %v" , err )
}
if len ( lcr . Clusters ) == 0 {
if i < 10 {
time . Sleep ( 200 * time . Millisecond )
continue
}
2016-12-25 06:37:31 +00:00
return errors . New ( "empty list of clusters was returned" )
2016-06-14 02:52:49 +00:00
}
cluster = lcr . Clusters [ 0 ]
break
}
2016-08-26 04:08:53 +00:00
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec , err := convert . MergeSwarmSpecToGRPC ( spec , cluster . Spec )
2016-06-21 21:27:04 +00:00
if err != nil {
2016-06-14 02:52:49 +00:00
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
2016-06-21 21:27:04 +00:00
_ , err = client . UpdateCluster ( ctx , & swarmapi . UpdateClusterRequest {
2016-06-14 02:52:49 +00:00
ClusterID : cluster . ID ,
ClusterVersion : & cluster . Meta . Version ,
2016-08-26 04:08:53 +00:00
Spec : & clusterSpec ,
2016-06-14 02:52:49 +00:00
} )
if err != nil {
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
return nil
}
}
return ctx . Err ( )
}
2016-10-22 01:07:55 +00:00
func detectLockedError ( err error ) error {
2016-10-28 01:50:49 +00:00
if err == swarmnode . ErrInvalidUnlockKey {
2016-12-02 09:14:32 +00:00
return errors . WithStack ( errSwarmLocked )
2016-10-22 01:07:55 +00:00
}
return err
}