noderunner.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. package cluster
  2. import (
  3. "fmt"
  4. "path/filepath"
  5. "runtime"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/Sirupsen/logrus"
  10. types "github.com/docker/docker/api/types/swarm"
  11. "github.com/docker/docker/daemon/cluster/executor/container"
  12. swarmapi "github.com/docker/swarmkit/api"
  13. swarmnode "github.com/docker/swarmkit/node"
  14. "github.com/pkg/errors"
  15. "golang.org/x/net/context"
  16. "google.golang.org/grpc"
  17. )
  18. // nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
  19. type nodeRunner struct {
  20. nodeState
  21. mu sync.RWMutex
  22. done chan struct{} // closed when swarmNode exits
  23. ready chan struct{} // closed when swarmNode becomes active
  24. reconnectDelay time.Duration
  25. config nodeStartConfig
  26. repeatedRun bool
  27. cancelReconnect func()
  28. stopping bool
  29. cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
  30. }
  31. // nodeStartConfig holds configuration needed to start a new node. Exported
  32. // fields of this structure are saved to disk in json. Unexported fields
  33. // contain data that shouldn't be persisted between daemon reloads.
  34. type nodeStartConfig struct {
  35. // LocalAddr is this machine's local IP or hostname, if specified.
  36. LocalAddr string
  37. // RemoteAddr is the address that was given to "swarm join". It is used
  38. // to find LocalAddr if necessary.
  39. RemoteAddr string
  40. // ListenAddr is the address we bind to, including a port.
  41. ListenAddr string
  42. // AdvertiseAddr is the address other nodes should connect to,
  43. // including a port.
  44. AdvertiseAddr string
  45. joinAddr string
  46. forceNewCluster bool
  47. joinToken string
  48. lockKey []byte
  49. autolock bool
  50. availability types.NodeAvailability
  51. }
  52. func (n *nodeRunner) Ready() chan error {
  53. c := make(chan error, 1)
  54. n.mu.RLock()
  55. ready, done := n.ready, n.done
  56. n.mu.RUnlock()
  57. go func() {
  58. select {
  59. case <-ready:
  60. case <-done:
  61. }
  62. select {
  63. case <-ready:
  64. default:
  65. n.mu.RLock()
  66. c <- n.err
  67. n.mu.RUnlock()
  68. }
  69. close(c)
  70. }()
  71. return c
  72. }
  73. func (n *nodeRunner) Start(conf nodeStartConfig) error {
  74. n.mu.Lock()
  75. defer n.mu.Unlock()
  76. n.reconnectDelay = initialReconnectDelay
  77. return n.start(conf)
  78. }
  79. func (n *nodeRunner) start(conf nodeStartConfig) error {
  80. var control string
  81. if runtime.GOOS == "windows" {
  82. control = `\\.\pipe\` + controlSocket
  83. } else {
  84. control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
  85. }
  86. // Hostname is not set here. Instead, it is obtained from
  87. // the node description that is reported periodically
  88. swarmnodeConfig := swarmnode.Config{
  89. ForceNewCluster: conf.forceNewCluster,
  90. ListenControlAPI: control,
  91. ListenRemoteAPI: conf.ListenAddr,
  92. AdvertiseRemoteAPI: conf.AdvertiseAddr,
  93. JoinAddr: conf.joinAddr,
  94. StateDir: n.cluster.root,
  95. JoinToken: conf.joinToken,
  96. Executor: container.NewExecutor(n.cluster.config.Backend),
  97. HeartbeatTick: 1,
  98. ElectionTick: 3,
  99. UnlockKey: conf.lockKey,
  100. AutoLockManagers: conf.autolock,
  101. PluginGetter: n.cluster.config.Backend.PluginGetter(),
  102. }
  103. if conf.availability != "" {
  104. avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
  105. if !ok {
  106. return fmt.Errorf("invalid Availability: %q", conf.availability)
  107. }
  108. swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
  109. }
  110. node, err := swarmnode.New(&swarmnodeConfig)
  111. if err != nil {
  112. return err
  113. }
  114. if err := node.Start(context.Background()); err != nil {
  115. return err
  116. }
  117. n.done = make(chan struct{})
  118. n.ready = make(chan struct{})
  119. n.swarmNode = node
  120. n.config = conf
  121. savePersistentState(n.cluster.root, conf)
  122. ctx, cancel := context.WithCancel(context.Background())
  123. go func() {
  124. n.handleNodeExit(node)
  125. cancel()
  126. }()
  127. go n.handleReadyEvent(ctx, node, n.ready)
  128. go n.handleControlSocketChange(ctx, node)
  129. return nil
  130. }
  131. func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
  132. for conn := range node.ListenControlSocket(ctx) {
  133. n.mu.Lock()
  134. if n.grpcConn != conn {
  135. if conn == nil {
  136. n.controlClient = nil
  137. n.logsClient = nil
  138. } else {
  139. n.controlClient = swarmapi.NewControlClient(conn)
  140. n.logsClient = swarmapi.NewLogsClient(conn)
  141. }
  142. }
  143. n.grpcConn = conn
  144. n.mu.Unlock()
  145. n.cluster.configEvent <- struct{}{}
  146. }
  147. }
  148. func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
  149. select {
  150. case <-node.Ready():
  151. n.mu.Lock()
  152. n.err = nil
  153. n.mu.Unlock()
  154. close(ready)
  155. case <-ctx.Done():
  156. }
  157. n.cluster.configEvent <- struct{}{}
  158. }
  159. func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
  160. err := detectLockedError(node.Err(context.Background()))
  161. if err != nil {
  162. logrus.Errorf("cluster exited with error: %v", err)
  163. }
  164. n.mu.Lock()
  165. n.swarmNode = nil
  166. n.err = err
  167. close(n.done)
  168. select {
  169. case <-n.ready:
  170. n.enableReconnectWatcher()
  171. default:
  172. if n.repeatedRun {
  173. n.enableReconnectWatcher()
  174. }
  175. }
  176. n.repeatedRun = true
  177. n.mu.Unlock()
  178. }
  179. // Stop stops the current swarm node if it is running.
  180. func (n *nodeRunner) Stop() error {
  181. n.mu.Lock()
  182. if n.cancelReconnect != nil { // between restarts
  183. n.cancelReconnect()
  184. n.cancelReconnect = nil
  185. }
  186. if n.swarmNode == nil {
  187. n.mu.Unlock()
  188. return nil
  189. }
  190. n.stopping = true
  191. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
  192. defer cancel()
  193. n.mu.Unlock()
  194. if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
  195. return err
  196. }
  197. <-n.done
  198. return nil
  199. }
  200. func (n *nodeRunner) State() nodeState {
  201. if n == nil {
  202. return nodeState{status: types.LocalNodeStateInactive}
  203. }
  204. n.mu.RLock()
  205. defer n.mu.RUnlock()
  206. ns := n.nodeState
  207. if ns.err != nil || n.cancelReconnect != nil {
  208. if errors.Cause(ns.err) == errSwarmLocked {
  209. ns.status = types.LocalNodeStateLocked
  210. } else {
  211. ns.status = types.LocalNodeStateError
  212. }
  213. } else {
  214. select {
  215. case <-n.ready:
  216. ns.status = types.LocalNodeStateActive
  217. default:
  218. ns.status = types.LocalNodeStatePending
  219. }
  220. }
  221. return ns
  222. }
  223. func (n *nodeRunner) enableReconnectWatcher() {
  224. if n.stopping {
  225. return
  226. }
  227. n.reconnectDelay *= 2
  228. if n.reconnectDelay > maxReconnectDelay {
  229. n.reconnectDelay = maxReconnectDelay
  230. }
  231. logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
  232. delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
  233. n.cancelReconnect = cancel
  234. config := n.config
  235. go func() {
  236. <-delayCtx.Done()
  237. if delayCtx.Err() != context.DeadlineExceeded {
  238. return
  239. }
  240. n.mu.Lock()
  241. defer n.mu.Unlock()
  242. if n.stopping {
  243. return
  244. }
  245. config.RemoteAddr = n.cluster.getRemoteAddress()
  246. config.joinAddr = config.RemoteAddr
  247. if err := n.start(config); err != nil {
  248. n.err = err
  249. }
  250. }()
  251. }
  252. // nodeState represents information about the current state of the cluster and
  253. // provides access to the grpc clients.
  254. type nodeState struct {
  255. swarmNode *swarmnode.Node
  256. grpcConn *grpc.ClientConn
  257. controlClient swarmapi.ControlClient
  258. logsClient swarmapi.LogsClient
  259. status types.LocalNodeState
  260. actualLocalAddr string
  261. err error
  262. }
  263. // IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
  264. func (ns nodeState) IsActiveManager() bool {
  265. return ns.controlClient != nil
  266. }
  267. // IsManager returns true if node is a manager.
  268. func (ns nodeState) IsManager() bool {
  269. return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
  270. }
  271. // NodeID returns node's ID or empty string if node is inactive.
  272. func (ns nodeState) NodeID() string {
  273. if ns.swarmNode != nil {
  274. return ns.swarmNode.NodeID()
  275. }
  276. return ""
  277. }