cluster.go 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718
  1. package cluster
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "io/ioutil"
  6. "net"
  7. "os"
  8. "path/filepath"
  9. "runtime"
  10. "strings"
  11. "sync"
  12. "time"
  13. "github.com/Sirupsen/logrus"
  14. apierrors "github.com/docker/docker/api/errors"
  15. apitypes "github.com/docker/docker/api/types"
  16. "github.com/docker/docker/api/types/filters"
  17. "github.com/docker/docker/api/types/network"
  18. types "github.com/docker/docker/api/types/swarm"
  19. "github.com/docker/docker/daemon/cluster/convert"
  20. executorpkg "github.com/docker/docker/daemon/cluster/executor"
  21. "github.com/docker/docker/daemon/cluster/executor/container"
  22. "github.com/docker/docker/opts"
  23. "github.com/docker/docker/pkg/ioutils"
  24. "github.com/docker/docker/pkg/signal"
  25. "github.com/docker/docker/runconfig"
  26. swarmapi "github.com/docker/swarmkit/api"
  27. "github.com/docker/swarmkit/manager/encryption"
  28. swarmnode "github.com/docker/swarmkit/node"
  29. "github.com/pkg/errors"
  30. "golang.org/x/net/context"
  31. "google.golang.org/grpc"
  32. )
  33. const swarmDirName = "swarm"
  34. const controlSocket = "control.sock"
  35. const swarmConnectTimeout = 20 * time.Second
  36. const swarmRequestTimeout = 20 * time.Second
  37. const stateFile = "docker-state.json"
  38. const defaultAddr = "0.0.0.0:2377"
  39. const (
  40. initialReconnectDelay = 100 * time.Millisecond
  41. maxReconnectDelay = 30 * time.Second
  42. )
  43. // ErrNoSwarm is returned on leaving a cluster that was never initialized
  44. var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm")
  45. // ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
  46. var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.")
  47. // ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet.
  48. var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
  49. // ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
  50. var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.")
  51. // ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it.
  52. var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.")
  53. // NetworkSubnetsProvider exposes functions for retrieving the subnets
  54. // of networks managed by Docker, so they can be filtered.
  55. type NetworkSubnetsProvider interface {
  56. V4Subnets() []net.IPNet
  57. V6Subnets() []net.IPNet
  58. }
  59. // Config provides values for Cluster.
  60. type Config struct {
  61. Root string
  62. Name string
  63. Backend executorpkg.Backend
  64. NetworkSubnetsProvider NetworkSubnetsProvider
  65. // DefaultAdvertiseAddr is the default host/IP or network interface to use
  66. // if no AdvertiseAddr value is specified.
  67. DefaultAdvertiseAddr string
  68. // path to store runtime state, such as the swarm control socket
  69. RuntimeRoot string
  70. }
  71. // Cluster provides capabilities to participate in a cluster as a worker or a
  72. // manager.
  73. type Cluster struct {
  74. sync.RWMutex
  75. *node
  76. root string
  77. runtimeRoot string
  78. config Config
  79. configEvent chan struct{} // todo: make this array and goroutine safe
  80. actualLocalAddr string // after resolution, not persisted
  81. stop bool
  82. err error
  83. cancelDelay func()
  84. attachers map[string]*attacher
  85. locked bool
  86. lastNodeConfig *nodeStartConfig
  87. }
  88. // attacher manages the in-memory attachment state of a container
  89. // attachment to a global scope network managed by swarm manager. It
  90. // helps in identifying the attachment ID via the taskID and the
  91. // corresponding attachment configuration obtained from the manager.
  92. type attacher struct {
  93. taskID string
  94. config *network.NetworkingConfig
  95. attachWaitCh chan *network.NetworkingConfig
  96. attachCompleteCh chan struct{}
  97. detachWaitCh chan struct{}
  98. }
  99. type node struct {
  100. *swarmnode.Node
  101. done chan struct{}
  102. ready bool
  103. conn *grpc.ClientConn
  104. client swarmapi.ControlClient
  105. reconnectDelay time.Duration
  106. config nodeStartConfig
  107. }
  108. // nodeStartConfig holds configuration needed to start a new node. Exported
  109. // fields of this structure are saved to disk in json. Unexported fields
  110. // contain data that shouldn't be persisted between daemon reloads.
  111. type nodeStartConfig struct {
  112. // LocalAddr is this machine's local IP or hostname, if specified.
  113. LocalAddr string
  114. // RemoteAddr is the address that was given to "swarm join". It is used
  115. // to find LocalAddr if necessary.
  116. RemoteAddr string
  117. // ListenAddr is the address we bind to, including a port.
  118. ListenAddr string
  119. // AdvertiseAddr is the address other nodes should connect to,
  120. // including a port.
  121. AdvertiseAddr string
  122. joinAddr string
  123. forceNewCluster bool
  124. joinToken string
  125. lockKey []byte
  126. autolock bool
  127. }
  128. // New creates a new Cluster instance using provided config.
  129. func New(config Config) (*Cluster, error) {
  130. root := filepath.Join(config.Root, swarmDirName)
  131. if err := os.MkdirAll(root, 0700); err != nil {
  132. return nil, err
  133. }
  134. if config.RuntimeRoot == "" {
  135. config.RuntimeRoot = root
  136. }
  137. if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil {
  138. return nil, err
  139. }
  140. c := &Cluster{
  141. root: root,
  142. config: config,
  143. configEvent: make(chan struct{}, 10),
  144. runtimeRoot: config.RuntimeRoot,
  145. attachers: make(map[string]*attacher),
  146. }
  147. nodeConfig, err := c.loadState()
  148. if err != nil {
  149. if os.IsNotExist(err) {
  150. return c, nil
  151. }
  152. return nil, err
  153. }
  154. n, err := c.startNewNode(*nodeConfig)
  155. if err != nil {
  156. return nil, err
  157. }
  158. select {
  159. case <-time.After(swarmConnectTimeout):
  160. logrus.Error("swarm component could not be started before timeout was reached")
  161. case <-n.Ready():
  162. case <-n.done:
  163. if errors.Cause(c.err) == ErrSwarmLocked {
  164. return c, nil
  165. }
  166. return nil, fmt.Errorf("swarm component could not be started: %v", c.err)
  167. }
  168. go c.reconnectOnFailure(n)
  169. return c, nil
  170. }
  171. func (c *Cluster) loadState() (*nodeStartConfig, error) {
  172. dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile))
  173. if err != nil {
  174. return nil, err
  175. }
  176. // missing certificate means no actual state to restore from
  177. if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil {
  178. if os.IsNotExist(err) {
  179. c.clearState()
  180. }
  181. return nil, err
  182. }
  183. var st nodeStartConfig
  184. if err := json.Unmarshal(dt, &st); err != nil {
  185. return nil, err
  186. }
  187. return &st, nil
  188. }
  189. func (c *Cluster) saveState(config nodeStartConfig) error {
  190. dt, err := json.Marshal(config)
  191. if err != nil {
  192. return err
  193. }
  194. return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600)
  195. }
  196. func (c *Cluster) reconnectOnFailure(n *node) {
  197. for {
  198. <-n.done
  199. c.Lock()
  200. if c.stop || c.node != nil {
  201. c.Unlock()
  202. return
  203. }
  204. n.reconnectDelay *= 2
  205. if n.reconnectDelay > maxReconnectDelay {
  206. n.reconnectDelay = maxReconnectDelay
  207. }
  208. logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
  209. delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
  210. c.cancelDelay = cancel
  211. c.Unlock()
  212. <-delayCtx.Done()
  213. if delayCtx.Err() != context.DeadlineExceeded {
  214. return
  215. }
  216. c.Lock()
  217. if c.node != nil {
  218. c.Unlock()
  219. return
  220. }
  221. var err error
  222. config := n.config
  223. config.RemoteAddr = c.getRemoteAddress()
  224. config.joinAddr = config.RemoteAddr
  225. n, err = c.startNewNode(config)
  226. if err != nil {
  227. c.err = err
  228. close(n.done)
  229. }
  230. c.Unlock()
  231. }
  232. }
  233. func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) {
  234. if err := c.config.Backend.IsSwarmCompatible(); err != nil {
  235. return nil, err
  236. }
  237. actualLocalAddr := conf.LocalAddr
  238. if actualLocalAddr == "" {
  239. // If localAddr was not specified, resolve it automatically
  240. // based on the route to joinAddr. localAddr can only be left
  241. // empty on "join".
  242. listenHost, _, err := net.SplitHostPort(conf.ListenAddr)
  243. if err != nil {
  244. return nil, fmt.Errorf("could not parse listen address: %v", err)
  245. }
  246. listenAddrIP := net.ParseIP(listenHost)
  247. if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
  248. actualLocalAddr = listenHost
  249. } else {
  250. if conf.RemoteAddr == "" {
  251. // Should never happen except using swarms created by
  252. // old versions that didn't save remoteAddr.
  253. conf.RemoteAddr = "8.8.8.8:53"
  254. }
  255. conn, err := net.Dial("udp", conf.RemoteAddr)
  256. if err != nil {
  257. return nil, fmt.Errorf("could not find local IP address: %v", err)
  258. }
  259. localHostPort := conn.LocalAddr().String()
  260. actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
  261. conn.Close()
  262. }
  263. }
  264. var control string
  265. if runtime.GOOS == "windows" {
  266. control = `\\.\pipe\` + controlSocket
  267. } else {
  268. control = filepath.Join(c.runtimeRoot, controlSocket)
  269. }
  270. c.node = nil
  271. c.cancelDelay = nil
  272. c.stop = false
  273. n, err := swarmnode.New(&swarmnode.Config{
  274. Hostname: c.config.Name,
  275. ForceNewCluster: conf.forceNewCluster,
  276. ListenControlAPI: control,
  277. ListenRemoteAPI: conf.ListenAddr,
  278. AdvertiseRemoteAPI: conf.AdvertiseAddr,
  279. JoinAddr: conf.joinAddr,
  280. StateDir: c.root,
  281. JoinToken: conf.joinToken,
  282. Executor: container.NewExecutor(c.config.Backend),
  283. HeartbeatTick: 1,
  284. ElectionTick: 3,
  285. UnlockKey: conf.lockKey,
  286. AutoLockManagers: conf.autolock,
  287. })
  288. if err != nil {
  289. return nil, err
  290. }
  291. ctx := context.Background()
  292. if err := n.Start(ctx); err != nil {
  293. return nil, err
  294. }
  295. node := &node{
  296. Node: n,
  297. done: make(chan struct{}),
  298. reconnectDelay: initialReconnectDelay,
  299. config: conf,
  300. }
  301. c.node = node
  302. c.actualLocalAddr = actualLocalAddr // not saved
  303. c.saveState(conf)
  304. c.config.Backend.SetClusterProvider(c)
  305. go func() {
  306. err := detectLockedError(n.Err(ctx))
  307. if err != nil {
  308. logrus.Errorf("cluster exited with error: %v", err)
  309. }
  310. c.Lock()
  311. c.node = nil
  312. c.err = err
  313. if errors.Cause(err) == ErrSwarmLocked {
  314. c.locked = true
  315. confClone := conf
  316. c.lastNodeConfig = &confClone
  317. }
  318. c.Unlock()
  319. close(node.done)
  320. }()
  321. go func() {
  322. select {
  323. case <-n.Ready():
  324. c.Lock()
  325. node.ready = true
  326. c.err = nil
  327. c.Unlock()
  328. case <-ctx.Done():
  329. }
  330. c.configEvent <- struct{}{}
  331. }()
  332. go func() {
  333. for conn := range n.ListenControlSocket(ctx) {
  334. c.Lock()
  335. if node.conn != conn {
  336. if conn == nil {
  337. node.client = nil
  338. } else {
  339. node.client = swarmapi.NewControlClient(conn)
  340. }
  341. }
  342. node.conn = conn
  343. c.Unlock()
  344. c.configEvent <- struct{}{}
  345. }
  346. }()
  347. return node, nil
  348. }
  349. // Init initializes new cluster from user provided request.
  350. func (c *Cluster) Init(req types.InitRequest) (string, error) {
  351. c.Lock()
  352. if node := c.node; node != nil || c.locked {
  353. if !req.ForceNewCluster {
  354. c.Unlock()
  355. return "", ErrSwarmExists
  356. }
  357. if err := c.stopNode(); err != nil {
  358. c.Unlock()
  359. return "", err
  360. }
  361. }
  362. if err := validateAndSanitizeInitRequest(&req); err != nil {
  363. c.Unlock()
  364. return "", err
  365. }
  366. listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
  367. if err != nil {
  368. c.Unlock()
  369. return "", err
  370. }
  371. advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
  372. if err != nil {
  373. c.Unlock()
  374. return "", err
  375. }
  376. localAddr := listenHost
  377. // If the advertise address is not one of the system's
  378. // addresses, we also require a listen address.
  379. listenAddrIP := net.ParseIP(listenHost)
  380. if listenAddrIP != nil && listenAddrIP.IsUnspecified() {
  381. advertiseIP := net.ParseIP(advertiseHost)
  382. if advertiseIP == nil {
  383. // not an IP
  384. c.Unlock()
  385. return "", errMustSpecifyListenAddr
  386. }
  387. systemIPs := listSystemIPs()
  388. found := false
  389. for _, systemIP := range systemIPs {
  390. if systemIP.Equal(advertiseIP) {
  391. found = true
  392. break
  393. }
  394. }
  395. if !found {
  396. c.Unlock()
  397. return "", errMustSpecifyListenAddr
  398. }
  399. localAddr = advertiseIP.String()
  400. }
  401. // todo: check current state existing
  402. n, err := c.startNewNode(nodeStartConfig{
  403. forceNewCluster: req.ForceNewCluster,
  404. autolock: req.AutoLockManagers,
  405. LocalAddr: localAddr,
  406. ListenAddr: net.JoinHostPort(listenHost, listenPort),
  407. AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort),
  408. })
  409. if err != nil {
  410. c.Unlock()
  411. return "", err
  412. }
  413. c.Unlock()
  414. select {
  415. case <-n.Ready():
  416. if err := initClusterSpec(n, req.Spec); err != nil {
  417. return "", err
  418. }
  419. go c.reconnectOnFailure(n)
  420. return n.NodeID(), nil
  421. case <-n.done:
  422. c.RLock()
  423. defer c.RUnlock()
  424. if !req.ForceNewCluster { // if failure on first attempt don't keep state
  425. if err := c.clearState(); err != nil {
  426. return "", err
  427. }
  428. }
  429. return "", c.err
  430. }
  431. }
  432. // Join makes current Cluster part of an existing swarm cluster.
  433. func (c *Cluster) Join(req types.JoinRequest) error {
  434. c.Lock()
  435. if node := c.node; node != nil || c.locked {
  436. c.Unlock()
  437. return ErrSwarmExists
  438. }
  439. if err := validateAndSanitizeJoinRequest(&req); err != nil {
  440. c.Unlock()
  441. return err
  442. }
  443. listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
  444. if err != nil {
  445. c.Unlock()
  446. return err
  447. }
  448. var advertiseAddr string
  449. if req.AdvertiseAddr != "" {
  450. advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
  451. // For joining, we don't need to provide an advertise address,
  452. // since the remote side can detect it.
  453. if err == nil {
  454. advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
  455. }
  456. }
  457. // todo: check current state existing
  458. n, err := c.startNewNode(nodeStartConfig{
  459. RemoteAddr: req.RemoteAddrs[0],
  460. ListenAddr: net.JoinHostPort(listenHost, listenPort),
  461. AdvertiseAddr: advertiseAddr,
  462. joinAddr: req.RemoteAddrs[0],
  463. joinToken: req.JoinToken,
  464. })
  465. if err != nil {
  466. c.Unlock()
  467. return err
  468. }
  469. c.Unlock()
  470. select {
  471. case <-time.After(swarmConnectTimeout):
  472. // attempt to connect will continue in background, but reconnect only if it didn't fail
  473. go func() {
  474. select {
  475. case <-n.Ready():
  476. c.reconnectOnFailure(n)
  477. case <-n.done:
  478. logrus.Errorf("failed to join the cluster: %+v", c.err)
  479. }
  480. }()
  481. return ErrSwarmJoinTimeoutReached
  482. case <-n.Ready():
  483. go c.reconnectOnFailure(n)
  484. return nil
  485. case <-n.done:
  486. c.RLock()
  487. defer c.RUnlock()
  488. return c.err
  489. }
  490. }
  491. // GetUnlockKey returns the unlock key for the swarm.
  492. func (c *Cluster) GetUnlockKey() (string, error) {
  493. c.RLock()
  494. defer c.RUnlock()
  495. if !c.isActiveManager() {
  496. return "", c.errNoManager()
  497. }
  498. ctx, cancel := c.getRequestContext()
  499. defer cancel()
  500. client := swarmapi.NewCAClient(c.conn)
  501. r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{})
  502. if err != nil {
  503. return "", err
  504. }
  505. if len(r.UnlockKey) == 0 {
  506. // no key
  507. return "", nil
  508. }
  509. return encryption.HumanReadableKey(r.UnlockKey), nil
  510. }
  511. // UnlockSwarm provides a key to decrypt data that is encrypted at rest.
  512. func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
  513. key, err := encryption.ParseHumanReadableKey(req.UnlockKey)
  514. if err != nil {
  515. return err
  516. }
  517. c.Lock()
  518. if c.node != nil || c.locked != true {
  519. c.Unlock()
  520. return errors.New("swarm is not locked")
  521. }
  522. config := *c.lastNodeConfig
  523. config.lockKey = key
  524. n, err := c.startNewNode(config)
  525. if err != nil {
  526. c.Unlock()
  527. return err
  528. }
  529. c.Unlock()
  530. select {
  531. case <-n.Ready():
  532. case <-n.done:
  533. if errors.Cause(c.err) == ErrSwarmLocked {
  534. return errors.New("swarm could not be unlocked: invalid key provided")
  535. }
  536. return fmt.Errorf("swarm component could not be started: %v", c.err)
  537. }
  538. go c.reconnectOnFailure(n)
  539. return nil
  540. }
  541. // stopNode is a helper that stops the active c.node and waits until it has
  542. // shut down. Call while keeping the cluster lock.
  543. func (c *Cluster) stopNode() error {
  544. if c.node == nil {
  545. return nil
  546. }
  547. c.stop = true
  548. if c.cancelDelay != nil {
  549. c.cancelDelay()
  550. c.cancelDelay = nil
  551. }
  552. node := c.node
  553. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
  554. defer cancel()
  555. // TODO: can't hold lock on stop because it calls back to network
  556. c.Unlock()
  557. defer c.Lock()
  558. if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
  559. return err
  560. }
  561. <-node.done
  562. return nil
  563. }
  564. func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool {
  565. return reachable-2 <= unreachable
  566. }
  567. func isLastManager(reachable, unreachable int) bool {
  568. return reachable == 1 && unreachable == 0
  569. }
  570. // Leave shuts down Cluster and removes current state.
  571. func (c *Cluster) Leave(force bool) error {
  572. c.Lock()
  573. node := c.node
  574. if node == nil {
  575. if c.locked {
  576. c.locked = false
  577. c.lastNodeConfig = nil
  578. c.Unlock()
  579. } else {
  580. c.Unlock()
  581. return ErrNoSwarm
  582. }
  583. } else {
  584. if node.Manager() != nil && !force {
  585. msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
  586. if c.isActiveManager() {
  587. active, reachable, unreachable, err := c.managerStats()
  588. if err == nil {
  589. if active && removingManagerCausesLossOfQuorum(reachable, unreachable) {
  590. if isLastManager(reachable, unreachable) {
  591. msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
  592. c.Unlock()
  593. return fmt.Errorf(msg)
  594. }
  595. msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable)
  596. }
  597. }
  598. } else {
  599. msg += "Doing so may lose the consensus of your cluster. "
  600. }
  601. msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
  602. c.Unlock()
  603. return fmt.Errorf(msg)
  604. }
  605. if err := c.stopNode(); err != nil {
  606. logrus.Errorf("failed to shut down cluster node: %v", err)
  607. signal.DumpStacks("")
  608. c.Unlock()
  609. return err
  610. }
  611. c.Unlock()
  612. if nodeID := node.NodeID(); nodeID != "" {
  613. nodeContainers, err := c.listContainerForNode(nodeID)
  614. if err != nil {
  615. return err
  616. }
  617. for _, id := range nodeContainers {
  618. if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
  619. logrus.Errorf("error removing %v: %v", id, err)
  620. }
  621. }
  622. }
  623. }
  624. c.configEvent <- struct{}{}
  625. // todo: cleanup optional?
  626. if err := c.clearState(); err != nil {
  627. return err
  628. }
  629. return nil
  630. }
  631. func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) {
  632. var ids []string
  633. filters := filters.NewArgs()
  634. filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID))
  635. containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{
  636. Filters: filters,
  637. })
  638. if err != nil {
  639. return []string{}, err
  640. }
  641. for _, c := range containers {
  642. ids = append(ids, c.ID)
  643. }
  644. return ids, nil
  645. }
  646. func (c *Cluster) clearState() error {
  647. // todo: backup this data instead of removing?
  648. if err := os.RemoveAll(c.root); err != nil {
  649. return err
  650. }
  651. if err := os.MkdirAll(c.root, 0700); err != nil {
  652. return err
  653. }
  654. c.config.Backend.SetClusterProvider(nil)
  655. return nil
  656. }
  657. func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost
  658. return context.WithTimeout(context.Background(), swarmRequestTimeout)
  659. }
  660. // Inspect retrieves the configuration properties of a managed swarm cluster.
  661. func (c *Cluster) Inspect() (types.Swarm, error) {
  662. c.RLock()
  663. defer c.RUnlock()
  664. if !c.isActiveManager() {
  665. return types.Swarm{}, c.errNoManager()
  666. }
  667. ctx, cancel := c.getRequestContext()
  668. defer cancel()
  669. swarm, err := getSwarm(ctx, c.client)
  670. if err != nil {
  671. return types.Swarm{}, err
  672. }
  673. return convert.SwarmFromGRPC(*swarm), nil
  674. }
  675. // Update updates configuration of a managed swarm cluster.
  676. func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
  677. c.RLock()
  678. defer c.RUnlock()
  679. if !c.isActiveManager() {
  680. return c.errNoManager()
  681. }
  682. ctx, cancel := c.getRequestContext()
  683. defer cancel()
  684. swarm, err := getSwarm(ctx, c.client)
  685. if err != nil {
  686. return err
  687. }
  688. // In update, client should provide the complete spec of the swarm, including
  689. // Name and Labels. If a field is specified with 0 or nil, then the default value
  690. // will be used to swarmkit.
  691. clusterSpec, err := convert.SwarmSpecToGRPC(spec)
  692. if err != nil {
  693. return err
  694. }
  695. _, err = c.client.UpdateCluster(
  696. ctx,
  697. &swarmapi.UpdateClusterRequest{
  698. ClusterID: swarm.ID,
  699. Spec: &clusterSpec,
  700. ClusterVersion: &swarmapi.Version{
  701. Index: version,
  702. },
  703. Rotation: swarmapi.KeyRotation{
  704. WorkerJoinToken: flags.RotateWorkerToken,
  705. ManagerJoinToken: flags.RotateManagerToken,
  706. ManagerUnlockKey: flags.RotateManagerUnlockKey,
  707. },
  708. },
  709. )
  710. return err
  711. }
  712. // IsManager returns true if Cluster is participating as a manager.
  713. func (c *Cluster) IsManager() bool {
  714. c.RLock()
  715. defer c.RUnlock()
  716. return c.isActiveManager()
  717. }
  718. // IsAgent returns true if Cluster is participating as a worker/agent.
  719. func (c *Cluster) IsAgent() bool {
  720. c.RLock()
  721. defer c.RUnlock()
  722. return c.node != nil && c.ready
  723. }
  724. // GetLocalAddress returns the local address.
  725. func (c *Cluster) GetLocalAddress() string {
  726. c.RLock()
  727. defer c.RUnlock()
  728. return c.actualLocalAddr
  729. }
  730. // GetListenAddress returns the listen address.
  731. func (c *Cluster) GetListenAddress() string {
  732. c.RLock()
  733. defer c.RUnlock()
  734. if c.node != nil {
  735. return c.node.config.ListenAddr
  736. }
  737. return ""
  738. }
  739. // GetAdvertiseAddress returns the remotely reachable address of this node.
  740. func (c *Cluster) GetAdvertiseAddress() string {
  741. c.RLock()
  742. defer c.RUnlock()
  743. if c.node != nil && c.node.config.AdvertiseAddr != "" {
  744. advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr)
  745. return advertiseHost
  746. }
  747. return c.actualLocalAddr
  748. }
  749. // GetRemoteAddress returns a known advertise address of a remote manager if
  750. // available.
  751. // todo: change to array/connect with info
  752. func (c *Cluster) GetRemoteAddress() string {
  753. c.RLock()
  754. defer c.RUnlock()
  755. return c.getRemoteAddress()
  756. }
  757. func (c *Cluster) getRemoteAddress() string {
  758. if c.node == nil {
  759. return ""
  760. }
  761. nodeID := c.node.NodeID()
  762. for _, r := range c.node.Remotes() {
  763. if r.NodeID != nodeID {
  764. return r.Addr
  765. }
  766. }
  767. return ""
  768. }
  769. // ListenClusterEvents returns a channel that receives messages on cluster
  770. // participation changes.
  771. // todo: make cancelable and accessible to multiple callers
  772. func (c *Cluster) ListenClusterEvents() <-chan struct{} {
  773. return c.configEvent
  774. }
  775. // Info returns information about the current cluster state.
  776. func (c *Cluster) Info() types.Info {
  777. info := types.Info{
  778. NodeAddr: c.GetAdvertiseAddress(),
  779. }
  780. c.RLock()
  781. defer c.RUnlock()
  782. if c.node == nil {
  783. info.LocalNodeState = types.LocalNodeStateInactive
  784. if c.cancelDelay != nil {
  785. info.LocalNodeState = types.LocalNodeStateError
  786. }
  787. if c.locked {
  788. info.LocalNodeState = types.LocalNodeStateLocked
  789. }
  790. } else {
  791. info.LocalNodeState = types.LocalNodeStatePending
  792. if c.ready == true {
  793. info.LocalNodeState = types.LocalNodeStateActive
  794. } else if c.locked {
  795. info.LocalNodeState = types.LocalNodeStateLocked
  796. }
  797. }
  798. if c.err != nil {
  799. info.Error = c.err.Error()
  800. }
  801. ctx, cancel := c.getRequestContext()
  802. defer cancel()
  803. if c.isActiveManager() {
  804. info.ControlAvailable = true
  805. swarm, err := c.Inspect()
  806. if err != nil {
  807. info.Error = err.Error()
  808. }
  809. // Strip JoinTokens
  810. info.Cluster = swarm.ClusterInfo
  811. if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil {
  812. info.Nodes = len(r.Nodes)
  813. for _, n := range r.Nodes {
  814. if n.ManagerStatus != nil {
  815. info.Managers = info.Managers + 1
  816. }
  817. }
  818. }
  819. }
  820. if c.node != nil {
  821. for _, r := range c.node.Remotes() {
  822. info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr})
  823. }
  824. info.NodeID = c.node.NodeID()
  825. }
  826. return info
  827. }
  828. // isActiveManager should not be called without a read lock
  829. func (c *Cluster) isActiveManager() bool {
  830. return c.node != nil && c.conn != nil
  831. }
  832. // errNoManager returns error describing why manager commands can't be used.
  833. // Call with read lock.
  834. func (c *Cluster) errNoManager() error {
  835. if c.node == nil {
  836. if c.locked {
  837. return ErrSwarmLocked
  838. }
  839. return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
  840. }
  841. if c.node.Manager() != nil {
  842. return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
  843. }
  844. return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
  845. }
  846. // GetServices returns all services of a managed swarm cluster.
  847. func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
  848. c.RLock()
  849. defer c.RUnlock()
  850. if !c.isActiveManager() {
  851. return nil, c.errNoManager()
  852. }
  853. filters, err := newListServicesFilters(options.Filters)
  854. if err != nil {
  855. return nil, err
  856. }
  857. ctx, cancel := c.getRequestContext()
  858. defer cancel()
  859. r, err := c.client.ListServices(
  860. ctx,
  861. &swarmapi.ListServicesRequest{Filters: filters})
  862. if err != nil {
  863. return nil, err
  864. }
  865. services := []types.Service{}
  866. for _, service := range r.Services {
  867. services = append(services, convert.ServiceFromGRPC(*service))
  868. }
  869. return services, nil
  870. }
  871. // CreateService creates a new service in a managed swarm cluster.
  872. func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (string, error) {
  873. c.RLock()
  874. defer c.RUnlock()
  875. if !c.isActiveManager() {
  876. return "", c.errNoManager()
  877. }
  878. ctx, cancel := c.getRequestContext()
  879. defer cancel()
  880. err := c.populateNetworkID(ctx, c.client, &s)
  881. if err != nil {
  882. return "", err
  883. }
  884. serviceSpec, err := convert.ServiceSpecToGRPC(s)
  885. if err != nil {
  886. return "", err
  887. }
  888. if encodedAuth != "" {
  889. ctnr := serviceSpec.Task.GetContainer()
  890. if ctnr == nil {
  891. return "", fmt.Errorf("service does not use container tasks")
  892. }
  893. ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
  894. }
  895. r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
  896. if err != nil {
  897. return "", err
  898. }
  899. return r.Service.ID, nil
  900. }
  901. // GetService returns a service based on an ID or name.
  902. func (c *Cluster) GetService(input string) (types.Service, error) {
  903. c.RLock()
  904. defer c.RUnlock()
  905. if !c.isActiveManager() {
  906. return types.Service{}, c.errNoManager()
  907. }
  908. ctx, cancel := c.getRequestContext()
  909. defer cancel()
  910. service, err := getService(ctx, c.client, input)
  911. if err != nil {
  912. return types.Service{}, err
  913. }
  914. return convert.ServiceFromGRPC(*service), nil
  915. }
  916. // UpdateService updates existing service to match new properties.
  917. func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) error {
  918. c.RLock()
  919. defer c.RUnlock()
  920. if !c.isActiveManager() {
  921. return c.errNoManager()
  922. }
  923. ctx, cancel := c.getRequestContext()
  924. defer cancel()
  925. err := c.populateNetworkID(ctx, c.client, &spec)
  926. if err != nil {
  927. return err
  928. }
  929. serviceSpec, err := convert.ServiceSpecToGRPC(spec)
  930. if err != nil {
  931. return err
  932. }
  933. currentService, err := getService(ctx, c.client, serviceIDOrName)
  934. if err != nil {
  935. return err
  936. }
  937. if encodedAuth != "" {
  938. ctnr := serviceSpec.Task.GetContainer()
  939. if ctnr == nil {
  940. return fmt.Errorf("service does not use container tasks")
  941. }
  942. ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
  943. } else {
  944. // this is needed because if the encodedAuth isn't being updated then we
  945. // shouldn't lose it, and continue to use the one that was already present
  946. var ctnr *swarmapi.ContainerSpec
  947. switch registryAuthFrom {
  948. case apitypes.RegistryAuthFromSpec, "":
  949. ctnr = currentService.Spec.Task.GetContainer()
  950. case apitypes.RegistryAuthFromPreviousSpec:
  951. if currentService.PreviousSpec == nil {
  952. return fmt.Errorf("service does not have a previous spec")
  953. }
  954. ctnr = currentService.PreviousSpec.Task.GetContainer()
  955. default:
  956. return fmt.Errorf("unsupported registryAuthFromValue")
  957. }
  958. if ctnr == nil {
  959. return fmt.Errorf("service does not use container tasks")
  960. }
  961. serviceSpec.Task.GetContainer().PullOptions = ctnr.PullOptions
  962. }
  963. _, err = c.client.UpdateService(
  964. ctx,
  965. &swarmapi.UpdateServiceRequest{
  966. ServiceID: currentService.ID,
  967. Spec: &serviceSpec,
  968. ServiceVersion: &swarmapi.Version{
  969. Index: version,
  970. },
  971. },
  972. )
  973. return err
  974. }
  975. // RemoveService removes a service from a managed swarm cluster.
  976. func (c *Cluster) RemoveService(input string) error {
  977. c.RLock()
  978. defer c.RUnlock()
  979. if !c.isActiveManager() {
  980. return c.errNoManager()
  981. }
  982. ctx, cancel := c.getRequestContext()
  983. defer cancel()
  984. service, err := getService(ctx, c.client, input)
  985. if err != nil {
  986. return err
  987. }
  988. if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil {
  989. return err
  990. }
  991. return nil
  992. }
  993. // GetNodes returns a list of all nodes known to a cluster.
  994. func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) {
  995. c.RLock()
  996. defer c.RUnlock()
  997. if !c.isActiveManager() {
  998. return nil, c.errNoManager()
  999. }
  1000. filters, err := newListNodesFilters(options.Filters)
  1001. if err != nil {
  1002. return nil, err
  1003. }
  1004. ctx, cancel := c.getRequestContext()
  1005. defer cancel()
  1006. r, err := c.client.ListNodes(
  1007. ctx,
  1008. &swarmapi.ListNodesRequest{Filters: filters})
  1009. if err != nil {
  1010. return nil, err
  1011. }
  1012. nodes := []types.Node{}
  1013. for _, node := range r.Nodes {
  1014. nodes = append(nodes, convert.NodeFromGRPC(*node))
  1015. }
  1016. return nodes, nil
  1017. }
  1018. // GetNode returns a node based on an ID or name.
  1019. func (c *Cluster) GetNode(input string) (types.Node, error) {
  1020. c.RLock()
  1021. defer c.RUnlock()
  1022. if !c.isActiveManager() {
  1023. return types.Node{}, c.errNoManager()
  1024. }
  1025. ctx, cancel := c.getRequestContext()
  1026. defer cancel()
  1027. node, err := getNode(ctx, c.client, input)
  1028. if err != nil {
  1029. return types.Node{}, err
  1030. }
  1031. return convert.NodeFromGRPC(*node), nil
  1032. }
  1033. // UpdateNode updates existing nodes properties.
  1034. func (c *Cluster) UpdateNode(nodeID string, version uint64, spec types.NodeSpec) error {
  1035. c.RLock()
  1036. defer c.RUnlock()
  1037. if !c.isActiveManager() {
  1038. return c.errNoManager()
  1039. }
  1040. nodeSpec, err := convert.NodeSpecToGRPC(spec)
  1041. if err != nil {
  1042. return err
  1043. }
  1044. ctx, cancel := c.getRequestContext()
  1045. defer cancel()
  1046. _, err = c.client.UpdateNode(
  1047. ctx,
  1048. &swarmapi.UpdateNodeRequest{
  1049. NodeID: nodeID,
  1050. Spec: &nodeSpec,
  1051. NodeVersion: &swarmapi.Version{
  1052. Index: version,
  1053. },
  1054. },
  1055. )
  1056. return err
  1057. }
  1058. // RemoveNode removes a node from a cluster
  1059. func (c *Cluster) RemoveNode(input string, force bool) error {
  1060. c.RLock()
  1061. defer c.RUnlock()
  1062. if !c.isActiveManager() {
  1063. return c.errNoManager()
  1064. }
  1065. ctx, cancel := c.getRequestContext()
  1066. defer cancel()
  1067. node, err := getNode(ctx, c.client, input)
  1068. if err != nil {
  1069. return err
  1070. }
  1071. if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil {
  1072. return err
  1073. }
  1074. return nil
  1075. }
  1076. // GetTasks returns a list of tasks matching the filter options.
  1077. func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) {
  1078. c.RLock()
  1079. defer c.RUnlock()
  1080. if !c.isActiveManager() {
  1081. return nil, c.errNoManager()
  1082. }
  1083. byName := func(filter filters.Args) error {
  1084. if filter.Include("service") {
  1085. serviceFilters := filter.Get("service")
  1086. for _, serviceFilter := range serviceFilters {
  1087. service, err := c.GetService(serviceFilter)
  1088. if err != nil {
  1089. return err
  1090. }
  1091. filter.Del("service", serviceFilter)
  1092. filter.Add("service", service.ID)
  1093. }
  1094. }
  1095. if filter.Include("node") {
  1096. nodeFilters := filter.Get("node")
  1097. for _, nodeFilter := range nodeFilters {
  1098. node, err := c.GetNode(nodeFilter)
  1099. if err != nil {
  1100. return err
  1101. }
  1102. filter.Del("node", nodeFilter)
  1103. filter.Add("node", node.ID)
  1104. }
  1105. }
  1106. return nil
  1107. }
  1108. filters, err := newListTasksFilters(options.Filters, byName)
  1109. if err != nil {
  1110. return nil, err
  1111. }
  1112. ctx, cancel := c.getRequestContext()
  1113. defer cancel()
  1114. r, err := c.client.ListTasks(
  1115. ctx,
  1116. &swarmapi.ListTasksRequest{Filters: filters})
  1117. if err != nil {
  1118. return nil, err
  1119. }
  1120. tasks := []types.Task{}
  1121. for _, task := range r.Tasks {
  1122. if task.Spec.GetContainer() != nil {
  1123. tasks = append(tasks, convert.TaskFromGRPC(*task))
  1124. }
  1125. }
  1126. return tasks, nil
  1127. }
  1128. // GetTask returns a task by an ID.
  1129. func (c *Cluster) GetTask(input string) (types.Task, error) {
  1130. c.RLock()
  1131. defer c.RUnlock()
  1132. if !c.isActiveManager() {
  1133. return types.Task{}, c.errNoManager()
  1134. }
  1135. ctx, cancel := c.getRequestContext()
  1136. defer cancel()
  1137. task, err := getTask(ctx, c.client, input)
  1138. if err != nil {
  1139. return types.Task{}, err
  1140. }
  1141. return convert.TaskFromGRPC(*task), nil
  1142. }
  1143. // GetNetwork returns a cluster network by an ID.
  1144. func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
  1145. c.RLock()
  1146. defer c.RUnlock()
  1147. if !c.isActiveManager() {
  1148. return apitypes.NetworkResource{}, c.errNoManager()
  1149. }
  1150. ctx, cancel := c.getRequestContext()
  1151. defer cancel()
  1152. network, err := getNetwork(ctx, c.client, input)
  1153. if err != nil {
  1154. return apitypes.NetworkResource{}, err
  1155. }
  1156. return convert.BasicNetworkFromGRPC(*network), nil
  1157. }
  1158. // GetNetworks returns all current cluster managed networks.
  1159. func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
  1160. c.RLock()
  1161. defer c.RUnlock()
  1162. if !c.isActiveManager() {
  1163. return nil, c.errNoManager()
  1164. }
  1165. ctx, cancel := c.getRequestContext()
  1166. defer cancel()
  1167. r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{})
  1168. if err != nil {
  1169. return nil, err
  1170. }
  1171. var networks []apitypes.NetworkResource
  1172. for _, network := range r.Networks {
  1173. networks = append(networks, convert.BasicNetworkFromGRPC(*network))
  1174. }
  1175. return networks, nil
  1176. }
  1177. func attacherKey(target, containerID string) string {
  1178. return containerID + ":" + target
  1179. }
  1180. // UpdateAttachment signals the attachment config to the attachment
  1181. // waiter who is trying to start or attach the container to the
  1182. // network.
  1183. func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error {
  1184. c.RLock()
  1185. attacher, ok := c.attachers[attacherKey(target, containerID)]
  1186. c.RUnlock()
  1187. if !ok || attacher == nil {
  1188. return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target)
  1189. }
  1190. attacher.attachWaitCh <- config
  1191. close(attacher.attachWaitCh)
  1192. return nil
  1193. }
  1194. // WaitForDetachment waits for the container to stop or detach from
  1195. // the network.
  1196. func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
  1197. c.RLock()
  1198. attacher, ok := c.attachers[attacherKey(networkName, containerID)]
  1199. if !ok {
  1200. attacher, ok = c.attachers[attacherKey(networkID, containerID)]
  1201. }
  1202. if c.node == nil || c.node.Agent() == nil {
  1203. c.RUnlock()
  1204. return fmt.Errorf("invalid cluster node while waiting for detachment")
  1205. }
  1206. agent := c.node.Agent()
  1207. c.RUnlock()
  1208. if ok && attacher != nil &&
  1209. attacher.detachWaitCh != nil &&
  1210. attacher.attachCompleteCh != nil {
  1211. // Attachment may be in progress still so wait for
  1212. // attachment to complete.
  1213. select {
  1214. case <-attacher.attachCompleteCh:
  1215. case <-ctx.Done():
  1216. return ctx.Err()
  1217. }
  1218. if attacher.taskID == taskID {
  1219. select {
  1220. case <-attacher.detachWaitCh:
  1221. case <-ctx.Done():
  1222. return ctx.Err()
  1223. }
  1224. }
  1225. }
  1226. return agent.ResourceAllocator().DetachNetwork(ctx, taskID)
  1227. }
  1228. // AttachNetwork generates an attachment request towards the manager.
  1229. func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) {
  1230. aKey := attacherKey(target, containerID)
  1231. c.Lock()
  1232. if c.node == nil || c.node.Agent() == nil {
  1233. c.Unlock()
  1234. return nil, fmt.Errorf("invalid cluster node while attaching to network")
  1235. }
  1236. if attacher, ok := c.attachers[aKey]; ok {
  1237. c.Unlock()
  1238. return attacher.config, nil
  1239. }
  1240. agent := c.node.Agent()
  1241. attachWaitCh := make(chan *network.NetworkingConfig)
  1242. detachWaitCh := make(chan struct{})
  1243. attachCompleteCh := make(chan struct{})
  1244. c.attachers[aKey] = &attacher{
  1245. attachWaitCh: attachWaitCh,
  1246. attachCompleteCh: attachCompleteCh,
  1247. detachWaitCh: detachWaitCh,
  1248. }
  1249. c.Unlock()
  1250. ctx, cancel := c.getRequestContext()
  1251. defer cancel()
  1252. taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses)
  1253. if err != nil {
  1254. c.Lock()
  1255. delete(c.attachers, aKey)
  1256. c.Unlock()
  1257. return nil, fmt.Errorf("Could not attach to network %s: %v", target, err)
  1258. }
  1259. c.Lock()
  1260. c.attachers[aKey].taskID = taskID
  1261. close(attachCompleteCh)
  1262. c.Unlock()
  1263. logrus.Debugf("Successfully attached to network %s with tid %s", target, taskID)
  1264. var config *network.NetworkingConfig
  1265. select {
  1266. case config = <-attachWaitCh:
  1267. case <-ctx.Done():
  1268. return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err())
  1269. }
  1270. c.Lock()
  1271. c.attachers[aKey].config = config
  1272. c.Unlock()
  1273. return config, nil
  1274. }
  1275. // DetachNetwork unblocks the waiters waiting on WaitForDetachment so
  1276. // that a request to detach can be generated towards the manager.
  1277. func (c *Cluster) DetachNetwork(target string, containerID string) error {
  1278. aKey := attacherKey(target, containerID)
  1279. c.Lock()
  1280. attacher, ok := c.attachers[aKey]
  1281. delete(c.attachers, aKey)
  1282. c.Unlock()
  1283. if !ok {
  1284. return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target)
  1285. }
  1286. close(attacher.detachWaitCh)
  1287. return nil
  1288. }
  1289. // CreateNetwork creates a new cluster managed network.
  1290. func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
  1291. c.RLock()
  1292. defer c.RUnlock()
  1293. if !c.isActiveManager() {
  1294. return "", c.errNoManager()
  1295. }
  1296. if runconfig.IsPreDefinedNetwork(s.Name) {
  1297. err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name)
  1298. return "", apierrors.NewRequestForbiddenError(err)
  1299. }
  1300. ctx, cancel := c.getRequestContext()
  1301. defer cancel()
  1302. networkSpec := convert.BasicNetworkCreateToGRPC(s)
  1303. r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
  1304. if err != nil {
  1305. return "", err
  1306. }
  1307. return r.Network.ID, nil
  1308. }
  1309. // RemoveNetwork removes a cluster network.
  1310. func (c *Cluster) RemoveNetwork(input string) error {
  1311. c.RLock()
  1312. defer c.RUnlock()
  1313. if !c.isActiveManager() {
  1314. return c.errNoManager()
  1315. }
  1316. ctx, cancel := c.getRequestContext()
  1317. defer cancel()
  1318. network, err := getNetwork(ctx, c.client, input)
  1319. if err != nil {
  1320. return err
  1321. }
  1322. if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil {
  1323. return err
  1324. }
  1325. return nil
  1326. }
  1327. func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error {
  1328. // Always prefer NetworkAttachmentConfigs from TaskTemplate
  1329. // but fallback to service spec for backward compatibility
  1330. networks := s.TaskTemplate.Networks
  1331. if len(networks) == 0 {
  1332. networks = s.Networks
  1333. }
  1334. for i, n := range networks {
  1335. apiNetwork, err := getNetwork(ctx, client, n.Target)
  1336. if err != nil {
  1337. if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() {
  1338. err = fmt.Errorf("network %s is not eligible for docker services", ln.Name())
  1339. return apierrors.NewRequestForbiddenError(err)
  1340. }
  1341. return err
  1342. }
  1343. networks[i].Target = apiNetwork.ID
  1344. }
  1345. return nil
  1346. }
  1347. func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) {
  1348. // GetNetwork to match via full ID.
  1349. rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input})
  1350. if err != nil {
  1351. // If any error (including NotFound), ListNetworks to match via ID prefix and full name.
  1352. rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}})
  1353. if err != nil || len(rl.Networks) == 0 {
  1354. rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}})
  1355. }
  1356. if err != nil {
  1357. return nil, err
  1358. }
  1359. if len(rl.Networks) == 0 {
  1360. return nil, fmt.Errorf("network %s not found", input)
  1361. }
  1362. if l := len(rl.Networks); l > 1 {
  1363. return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l)
  1364. }
  1365. return rl.Networks[0], nil
  1366. }
  1367. return rg.Network, nil
  1368. }
  1369. // Cleanup stops active swarm node. This is run before daemon shutdown.
  1370. func (c *Cluster) Cleanup() {
  1371. c.Lock()
  1372. node := c.node
  1373. if node == nil {
  1374. c.Unlock()
  1375. return
  1376. }
  1377. defer c.Unlock()
  1378. if c.isActiveManager() {
  1379. active, reachable, unreachable, err := c.managerStats()
  1380. if err == nil {
  1381. singlenode := active && isLastManager(reachable, unreachable)
  1382. if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) {
  1383. logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
  1384. }
  1385. }
  1386. }
  1387. c.stopNode()
  1388. }
  1389. func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) {
  1390. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  1391. defer cancel()
  1392. nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
  1393. if err != nil {
  1394. return false, 0, 0, err
  1395. }
  1396. for _, n := range nodes.Nodes {
  1397. if n.ManagerStatus != nil {
  1398. if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE {
  1399. reachable++
  1400. if n.ID == c.node.NodeID() {
  1401. current = true
  1402. }
  1403. }
  1404. if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE {
  1405. unreachable++
  1406. }
  1407. }
  1408. }
  1409. return
  1410. }
  1411. func validateAndSanitizeInitRequest(req *types.InitRequest) error {
  1412. var err error
  1413. req.ListenAddr, err = validateAddr(req.ListenAddr)
  1414. if err != nil {
  1415. return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
  1416. }
  1417. return nil
  1418. }
  1419. func validateAndSanitizeJoinRequest(req *types.JoinRequest) error {
  1420. var err error
  1421. req.ListenAddr, err = validateAddr(req.ListenAddr)
  1422. if err != nil {
  1423. return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
  1424. }
  1425. if len(req.RemoteAddrs) == 0 {
  1426. return fmt.Errorf("at least 1 RemoteAddr is required to join")
  1427. }
  1428. for i := range req.RemoteAddrs {
  1429. req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i])
  1430. if err != nil {
  1431. return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
  1432. }
  1433. }
  1434. return nil
  1435. }
  1436. func validateAddr(addr string) (string, error) {
  1437. if addr == "" {
  1438. return addr, fmt.Errorf("invalid empty address")
  1439. }
  1440. newaddr, err := opts.ParseTCPAddr(addr, defaultAddr)
  1441. if err != nil {
  1442. return addr, nil
  1443. }
  1444. return strings.TrimPrefix(newaddr, "tcp://"), nil
  1445. }
  1446. func initClusterSpec(node *node, spec types.Spec) error {
  1447. ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
  1448. for conn := range node.ListenControlSocket(ctx) {
  1449. if ctx.Err() != nil {
  1450. return ctx.Err()
  1451. }
  1452. if conn != nil {
  1453. client := swarmapi.NewControlClient(conn)
  1454. var cluster *swarmapi.Cluster
  1455. for i := 0; ; i++ {
  1456. lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
  1457. if err != nil {
  1458. return fmt.Errorf("error on listing clusters: %v", err)
  1459. }
  1460. if len(lcr.Clusters) == 0 {
  1461. if i < 10 {
  1462. time.Sleep(200 * time.Millisecond)
  1463. continue
  1464. }
  1465. return fmt.Errorf("empty list of clusters was returned")
  1466. }
  1467. cluster = lcr.Clusters[0]
  1468. break
  1469. }
  1470. // In init, we take the initial default values from swarmkit, and merge
  1471. // any non nil or 0 value from spec to GRPC spec. This will leave the
  1472. // default value alone.
  1473. // Note that this is different from Update(), as in Update() we expect
  1474. // user to specify the complete spec of the cluster (as they already know
  1475. // the existing one and knows which field to update)
  1476. clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec)
  1477. if err != nil {
  1478. return fmt.Errorf("error updating cluster settings: %v", err)
  1479. }
  1480. _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
  1481. ClusterID: cluster.ID,
  1482. ClusterVersion: &cluster.Meta.Version,
  1483. Spec: &clusterSpec,
  1484. })
  1485. if err != nil {
  1486. return fmt.Errorf("error updating cluster settings: %v", err)
  1487. }
  1488. return nil
  1489. }
  1490. }
  1491. return ctx.Err()
  1492. }
  1493. func detectLockedError(err error) error {
  1494. if err == swarmnode.ErrInvalidUnlockKey {
  1495. return errors.WithStack(ErrSwarmLocked)
  1496. }
  1497. return err
  1498. }