noderunner.go 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. package cluster
  2. import (
  3. "fmt"
  4. "path/filepath"
  5. "runtime"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/Sirupsen/logrus"
  10. types "github.com/docker/docker/api/types/swarm"
  11. "github.com/docker/docker/daemon/cluster/executor/container"
  12. swarmapi "github.com/docker/swarmkit/api"
  13. swarmnode "github.com/docker/swarmkit/node"
  14. "github.com/pkg/errors"
  15. "golang.org/x/net/context"
  16. "google.golang.org/grpc"
  17. )
  18. // nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
  19. type nodeRunner struct {
  20. nodeState
  21. mu sync.RWMutex
  22. done chan struct{} // closed when swarmNode exits
  23. ready chan struct{} // closed when swarmNode becomes active
  24. reconnectDelay time.Duration
  25. config nodeStartConfig
  26. repeatedRun bool
  27. cancelReconnect func()
  28. stopping bool
  29. cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
  30. }
  31. // nodeStartConfig holds configuration needed to start a new node. Exported
  32. // fields of this structure are saved to disk in json. Unexported fields
  33. // contain data that shouldn't be persisted between daemon reloads.
  34. type nodeStartConfig struct {
  35. // LocalAddr is this machine's local IP or hostname, if specified.
  36. LocalAddr string
  37. // RemoteAddr is the address that was given to "swarm join". It is used
  38. // to find LocalAddr if necessary.
  39. RemoteAddr string
  40. // ListenAddr is the address we bind to, including a port.
  41. ListenAddr string
  42. // AdvertiseAddr is the address other nodes should connect to,
  43. // including a port.
  44. AdvertiseAddr string
  45. joinAddr string
  46. forceNewCluster bool
  47. joinToken string
  48. lockKey []byte
  49. autolock bool
  50. availability types.NodeAvailability
  51. }
  52. func (n *nodeRunner) Ready() chan error {
  53. c := make(chan error, 1)
  54. n.mu.RLock()
  55. ready, done := n.ready, n.done
  56. n.mu.RUnlock()
  57. go func() {
  58. select {
  59. case <-ready:
  60. case <-done:
  61. }
  62. select {
  63. case <-ready:
  64. default:
  65. n.mu.RLock()
  66. c <- n.err
  67. n.mu.RUnlock()
  68. }
  69. close(c)
  70. }()
  71. return c
  72. }
  73. func (n *nodeRunner) Start(conf nodeStartConfig) error {
  74. n.mu.Lock()
  75. defer n.mu.Unlock()
  76. n.reconnectDelay = initialReconnectDelay
  77. return n.start(conf)
  78. }
  79. func (n *nodeRunner) start(conf nodeStartConfig) error {
  80. var control string
  81. if runtime.GOOS == "windows" {
  82. control = `\\.\pipe\` + controlSocket
  83. } else {
  84. control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
  85. }
  86. swarmnodeConfig := swarmnode.Config{
  87. Hostname: n.cluster.config.Name,
  88. ForceNewCluster: conf.forceNewCluster,
  89. ListenControlAPI: control,
  90. ListenRemoteAPI: conf.ListenAddr,
  91. AdvertiseRemoteAPI: conf.AdvertiseAddr,
  92. JoinAddr: conf.joinAddr,
  93. StateDir: n.cluster.root,
  94. JoinToken: conf.joinToken,
  95. Executor: container.NewExecutor(n.cluster.config.Backend),
  96. HeartbeatTick: 1,
  97. ElectionTick: 3,
  98. UnlockKey: conf.lockKey,
  99. AutoLockManagers: conf.autolock,
  100. }
  101. if conf.availability != "" {
  102. avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
  103. if !ok {
  104. return fmt.Errorf("invalid Availability: %q", conf.availability)
  105. }
  106. swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
  107. }
  108. node, err := swarmnode.New(&swarmnodeConfig)
  109. if err != nil {
  110. return err
  111. }
  112. if err := node.Start(context.Background()); err != nil {
  113. return err
  114. }
  115. n.done = make(chan struct{})
  116. n.ready = make(chan struct{})
  117. n.swarmNode = node
  118. n.config = conf
  119. savePersistentState(n.cluster.root, conf)
  120. ctx, cancel := context.WithCancel(context.Background())
  121. go func() {
  122. n.handleNodeExit(node)
  123. cancel()
  124. }()
  125. go n.handleReadyEvent(ctx, node, n.ready)
  126. go n.handleControlSocketChange(ctx, node)
  127. return nil
  128. }
  129. func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
  130. for conn := range node.ListenControlSocket(ctx) {
  131. n.mu.Lock()
  132. if n.grpcConn != conn {
  133. if conn == nil {
  134. n.controlClient = nil
  135. n.logsClient = nil
  136. } else {
  137. n.controlClient = swarmapi.NewControlClient(conn)
  138. n.logsClient = swarmapi.NewLogsClient(conn)
  139. }
  140. }
  141. n.grpcConn = conn
  142. n.mu.Unlock()
  143. n.cluster.configEvent <- struct{}{}
  144. }
  145. }
  146. func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
  147. select {
  148. case <-node.Ready():
  149. n.mu.Lock()
  150. n.err = nil
  151. n.mu.Unlock()
  152. close(ready)
  153. case <-ctx.Done():
  154. }
  155. n.cluster.configEvent <- struct{}{}
  156. }
  157. func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
  158. err := detectLockedError(node.Err(context.Background()))
  159. if err != nil {
  160. logrus.Errorf("cluster exited with error: %v", err)
  161. }
  162. n.mu.Lock()
  163. n.swarmNode = nil
  164. n.err = err
  165. close(n.done)
  166. select {
  167. case <-n.ready:
  168. n.enableReconnectWatcher()
  169. default:
  170. if n.repeatedRun {
  171. n.enableReconnectWatcher()
  172. }
  173. }
  174. n.repeatedRun = true
  175. n.mu.Unlock()
  176. }
  177. // Stop stops the current swarm node if it is running.
  178. func (n *nodeRunner) Stop() error {
  179. n.mu.Lock()
  180. if n.cancelReconnect != nil { // between restarts
  181. n.cancelReconnect()
  182. n.cancelReconnect = nil
  183. }
  184. if n.swarmNode == nil {
  185. n.mu.Unlock()
  186. return nil
  187. }
  188. n.stopping = true
  189. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
  190. defer cancel()
  191. if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
  192. n.mu.Unlock()
  193. return err
  194. }
  195. n.mu.Unlock()
  196. <-n.done
  197. return nil
  198. }
  199. func (n *nodeRunner) State() nodeState {
  200. if n == nil {
  201. return nodeState{status: types.LocalNodeStateInactive}
  202. }
  203. n.mu.RLock()
  204. defer n.mu.RUnlock()
  205. ns := n.nodeState
  206. if ns.err != nil || n.cancelReconnect != nil {
  207. if errors.Cause(ns.err) == errSwarmLocked {
  208. ns.status = types.LocalNodeStateLocked
  209. } else {
  210. ns.status = types.LocalNodeStateError
  211. }
  212. } else {
  213. select {
  214. case <-n.ready:
  215. ns.status = types.LocalNodeStateActive
  216. default:
  217. ns.status = types.LocalNodeStatePending
  218. }
  219. }
  220. return ns
  221. }
  222. func (n *nodeRunner) enableReconnectWatcher() {
  223. if n.stopping {
  224. return
  225. }
  226. n.reconnectDelay *= 2
  227. if n.reconnectDelay > maxReconnectDelay {
  228. n.reconnectDelay = maxReconnectDelay
  229. }
  230. logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
  231. delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
  232. n.cancelReconnect = cancel
  233. config := n.config
  234. go func() {
  235. <-delayCtx.Done()
  236. if delayCtx.Err() != context.DeadlineExceeded {
  237. return
  238. }
  239. n.mu.Lock()
  240. defer n.mu.Unlock()
  241. if n.stopping {
  242. return
  243. }
  244. config.RemoteAddr = n.cluster.getRemoteAddress()
  245. config.joinAddr = config.RemoteAddr
  246. if err := n.start(config); err != nil {
  247. n.err = err
  248. }
  249. }()
  250. }
  251. // nodeState represents information about the current state of the cluster and
  252. // provides access to the grpc clients.
  253. type nodeState struct {
  254. swarmNode *swarmnode.Node
  255. grpcConn *grpc.ClientConn
  256. controlClient swarmapi.ControlClient
  257. logsClient swarmapi.LogsClient
  258. status types.LocalNodeState
  259. actualLocalAddr string
  260. err error
  261. }
  262. // IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
  263. func (ns nodeState) IsActiveManager() bool {
  264. return ns.controlClient != nil
  265. }
  266. // IsManager returns true if node is a manager.
  267. func (ns nodeState) IsManager() bool {
  268. return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
  269. }
  270. // NodeID returns node's ID or empty string if node is inactive.
  271. func (ns nodeState) NodeID() string {
  272. if ns.swarmNode != nil {
  273. return ns.swarmNode.NodeID()
  274. }
  275. return ""
  276. }