noderunner.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. package cluster
  2. import (
  3. "fmt"
  4. "path/filepath"
  5. "runtime"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/Sirupsen/logrus"
  10. types "github.com/docker/docker/api/types/swarm"
  11. "github.com/docker/docker/daemon/cluster/executor/container"
  12. swarmapi "github.com/docker/swarmkit/api"
  13. swarmnode "github.com/docker/swarmkit/node"
  14. "github.com/pkg/errors"
  15. "golang.org/x/net/context"
  16. "google.golang.org/grpc"
  17. )
  18. // nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
  19. type nodeRunner struct {
  20. nodeState
  21. mu sync.RWMutex
  22. done chan struct{} // closed when swarmNode exits
  23. ready chan struct{} // closed when swarmNode becomes active
  24. reconnectDelay time.Duration
  25. config nodeStartConfig
  26. repeatedRun bool
  27. cancelReconnect func()
  28. stopping bool
  29. cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
  30. }
  31. // nodeStartConfig holds configuration needed to start a new node. Exported
  32. // fields of this structure are saved to disk in json. Unexported fields
  33. // contain data that shouldn't be persisted between daemon reloads.
  34. type nodeStartConfig struct {
  35. // LocalAddr is this machine's local IP or hostname, if specified.
  36. LocalAddr string
  37. // RemoteAddr is the address that was given to "swarm join". It is used
  38. // to find LocalAddr if necessary.
  39. RemoteAddr string
  40. // ListenAddr is the address we bind to, including a port.
  41. ListenAddr string
  42. // AdvertiseAddr is the address other nodes should connect to,
  43. // including a port.
  44. AdvertiseAddr string
  45. joinAddr string
  46. forceNewCluster bool
  47. joinToken string
  48. lockKey []byte
  49. autolock bool
  50. availability types.NodeAvailability
  51. }
  52. func (n *nodeRunner) Ready() chan error {
  53. c := make(chan error, 1)
  54. n.mu.RLock()
  55. ready, done := n.ready, n.done
  56. n.mu.RUnlock()
  57. go func() {
  58. select {
  59. case <-ready:
  60. case <-done:
  61. }
  62. select {
  63. case <-ready:
  64. default:
  65. n.mu.RLock()
  66. c <- n.err
  67. n.mu.RUnlock()
  68. }
  69. close(c)
  70. }()
  71. return c
  72. }
  73. func (n *nodeRunner) Start(conf nodeStartConfig) error {
  74. n.mu.Lock()
  75. defer n.mu.Unlock()
  76. n.reconnectDelay = initialReconnectDelay
  77. return n.start(conf)
  78. }
  79. func (n *nodeRunner) start(conf nodeStartConfig) error {
  80. var control string
  81. if runtime.GOOS == "windows" {
  82. control = `\\.\pipe\` + controlSocket
  83. } else {
  84. control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
  85. }
  86. // Hostname is not set here. Instead, it is obtained from
  87. // the node description that is reported periodically
  88. swarmnodeConfig := swarmnode.Config{
  89. ForceNewCluster: conf.forceNewCluster,
  90. ListenControlAPI: control,
  91. ListenRemoteAPI: conf.ListenAddr,
  92. AdvertiseRemoteAPI: conf.AdvertiseAddr,
  93. JoinAddr: conf.joinAddr,
  94. StateDir: n.cluster.root,
  95. JoinToken: conf.joinToken,
  96. Executor: container.NewExecutor(n.cluster.config.Backend),
  97. HeartbeatTick: 1,
  98. ElectionTick: 3,
  99. UnlockKey: conf.lockKey,
  100. AutoLockManagers: conf.autolock,
  101. PluginGetter: n.cluster.config.Backend.PluginGetter(),
  102. }
  103. if conf.availability != "" {
  104. avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
  105. if !ok {
  106. return fmt.Errorf("invalid Availability: %q", conf.availability)
  107. }
  108. swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
  109. }
  110. node, err := swarmnode.New(&swarmnodeConfig)
  111. if err != nil {
  112. return err
  113. }
  114. if err := node.Start(context.Background()); err != nil {
  115. return err
  116. }
  117. n.done = make(chan struct{})
  118. n.ready = make(chan struct{})
  119. n.swarmNode = node
  120. n.config = conf
  121. savePersistentState(n.cluster.root, conf)
  122. ctx, cancel := context.WithCancel(context.Background())
  123. go func() {
  124. n.handleNodeExit(node)
  125. cancel()
  126. }()
  127. go n.handleReadyEvent(ctx, node, n.ready)
  128. go n.handleControlSocketChange(ctx, node)
  129. return nil
  130. }
  131. func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
  132. for conn := range node.ListenControlSocket(ctx) {
  133. n.mu.Lock()
  134. if n.grpcConn != conn {
  135. if conn == nil {
  136. n.controlClient = nil
  137. n.logsClient = nil
  138. } else {
  139. n.controlClient = swarmapi.NewControlClient(conn)
  140. n.logsClient = swarmapi.NewLogsClient(conn)
  141. }
  142. }
  143. n.grpcConn = conn
  144. n.mu.Unlock()
  145. n.cluster.configEvent <- struct{}{}
  146. }
  147. }
  148. func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
  149. select {
  150. case <-node.Ready():
  151. n.mu.Lock()
  152. n.err = nil
  153. n.mu.Unlock()
  154. close(ready)
  155. case <-ctx.Done():
  156. }
  157. n.cluster.configEvent <- struct{}{}
  158. }
  159. func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
  160. err := detectLockedError(node.Err(context.Background()))
  161. if err != nil {
  162. logrus.Errorf("cluster exited with error: %v", err)
  163. }
  164. n.mu.Lock()
  165. n.swarmNode = nil
  166. n.err = err
  167. close(n.done)
  168. select {
  169. case <-n.ready:
  170. n.enableReconnectWatcher()
  171. default:
  172. if n.repeatedRun {
  173. n.enableReconnectWatcher()
  174. }
  175. }
  176. n.repeatedRun = true
  177. n.mu.Unlock()
  178. }
  179. // Stop stops the current swarm node if it is running.
  180. func (n *nodeRunner) Stop() error {
  181. n.mu.Lock()
  182. if n.cancelReconnect != nil { // between restarts
  183. n.cancelReconnect()
  184. n.cancelReconnect = nil
  185. }
  186. if n.swarmNode == nil {
  187. n.mu.Unlock()
  188. return nil
  189. }
  190. n.stopping = true
  191. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
  192. defer cancel()
  193. if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
  194. n.mu.Unlock()
  195. return err
  196. }
  197. n.mu.Unlock()
  198. <-n.done
  199. return nil
  200. }
  201. func (n *nodeRunner) State() nodeState {
  202. if n == nil {
  203. return nodeState{status: types.LocalNodeStateInactive}
  204. }
  205. n.mu.RLock()
  206. defer n.mu.RUnlock()
  207. ns := n.nodeState
  208. if ns.err != nil || n.cancelReconnect != nil {
  209. if errors.Cause(ns.err) == errSwarmLocked {
  210. ns.status = types.LocalNodeStateLocked
  211. } else {
  212. ns.status = types.LocalNodeStateError
  213. }
  214. } else {
  215. select {
  216. case <-n.ready:
  217. ns.status = types.LocalNodeStateActive
  218. default:
  219. ns.status = types.LocalNodeStatePending
  220. }
  221. }
  222. return ns
  223. }
  224. func (n *nodeRunner) enableReconnectWatcher() {
  225. if n.stopping {
  226. return
  227. }
  228. n.reconnectDelay *= 2
  229. if n.reconnectDelay > maxReconnectDelay {
  230. n.reconnectDelay = maxReconnectDelay
  231. }
  232. logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
  233. delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
  234. n.cancelReconnect = cancel
  235. config := n.config
  236. go func() {
  237. <-delayCtx.Done()
  238. if delayCtx.Err() != context.DeadlineExceeded {
  239. return
  240. }
  241. n.mu.Lock()
  242. defer n.mu.Unlock()
  243. if n.stopping {
  244. return
  245. }
  246. config.RemoteAddr = n.cluster.getRemoteAddress()
  247. config.joinAddr = config.RemoteAddr
  248. if err := n.start(config); err != nil {
  249. n.err = err
  250. }
  251. }()
  252. }
  253. // nodeState represents information about the current state of the cluster and
  254. // provides access to the grpc clients.
  255. type nodeState struct {
  256. swarmNode *swarmnode.Node
  257. grpcConn *grpc.ClientConn
  258. controlClient swarmapi.ControlClient
  259. logsClient swarmapi.LogsClient
  260. status types.LocalNodeState
  261. actualLocalAddr string
  262. err error
  263. }
  264. // IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
  265. func (ns nodeState) IsActiveManager() bool {
  266. return ns.controlClient != nil
  267. }
  268. // IsManager returns true if node is a manager.
  269. func (ns nodeState) IsManager() bool {
  270. return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
  271. }
  272. // NodeID returns node's ID or empty string if node is inactive.
  273. func (ns nodeState) NodeID() string {
  274. if ns.swarmNode != nil {
  275. return ns.swarmNode.NodeID()
  276. }
  277. return ""
  278. }