noderunner.go 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. package cluster
  2. import (
  3. "fmt"
  4. "path/filepath"
  5. "runtime"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/Sirupsen/logrus"
  10. types "github.com/docker/docker/api/types/swarm"
  11. "github.com/docker/docker/daemon/cluster/executor/container"
  12. lncluster "github.com/docker/libnetwork/cluster"
  13. swarmapi "github.com/docker/swarmkit/api"
  14. swarmnode "github.com/docker/swarmkit/node"
  15. "github.com/pkg/errors"
  16. "golang.org/x/net/context"
  17. "google.golang.org/grpc"
  18. )
  19. // nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
  20. type nodeRunner struct {
  21. nodeState
  22. mu sync.RWMutex
  23. done chan struct{} // closed when swarmNode exits
  24. ready chan struct{} // closed when swarmNode becomes active
  25. reconnectDelay time.Duration
  26. config nodeStartConfig
  27. repeatedRun bool
  28. cancelReconnect func()
  29. stopping bool
  30. cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
  31. }
  32. // nodeStartConfig holds configuration needed to start a new node. Exported
  33. // fields of this structure are saved to disk in json. Unexported fields
  34. // contain data that shouldn't be persisted between daemon reloads.
  35. type nodeStartConfig struct {
  36. // LocalAddr is this machine's local IP or hostname, if specified.
  37. LocalAddr string
  38. // RemoteAddr is the address that was given to "swarm join". It is used
  39. // to find LocalAddr if necessary.
  40. RemoteAddr string
  41. // ListenAddr is the address we bind to, including a port.
  42. ListenAddr string
  43. // AdvertiseAddr is the address other nodes should connect to,
  44. // including a port.
  45. AdvertiseAddr string
  46. // DataPathAddr is the address that has to be used for the data path
  47. DataPathAddr string
  48. // JoinInProgress is set to true if a join operation has started, but
  49. // not completed yet.
  50. JoinInProgress bool
  51. joinAddr string
  52. forceNewCluster bool
  53. joinToken string
  54. lockKey []byte
  55. autolock bool
  56. availability types.NodeAvailability
  57. }
  58. func (n *nodeRunner) Ready() chan error {
  59. c := make(chan error, 1)
  60. n.mu.RLock()
  61. ready, done := n.ready, n.done
  62. n.mu.RUnlock()
  63. go func() {
  64. select {
  65. case <-ready:
  66. case <-done:
  67. }
  68. select {
  69. case <-ready:
  70. default:
  71. n.mu.RLock()
  72. c <- n.err
  73. n.mu.RUnlock()
  74. }
  75. close(c)
  76. }()
  77. return c
  78. }
  79. func (n *nodeRunner) Start(conf nodeStartConfig) error {
  80. n.mu.Lock()
  81. defer n.mu.Unlock()
  82. n.reconnectDelay = initialReconnectDelay
  83. return n.start(conf)
  84. }
  85. func (n *nodeRunner) start(conf nodeStartConfig) error {
  86. var control string
  87. if runtime.GOOS == "windows" {
  88. control = `\\.\pipe\` + controlSocket
  89. } else {
  90. control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
  91. }
  92. joinAddr := conf.joinAddr
  93. if joinAddr == "" && conf.JoinInProgress {
  94. // We must have been restarted while trying to join a cluster.
  95. // Continue trying to join instead of forming our own cluster.
  96. joinAddr = conf.RemoteAddr
  97. }
  98. // Hostname is not set here. Instead, it is obtained from
  99. // the node description that is reported periodically
  100. swarmnodeConfig := swarmnode.Config{
  101. ForceNewCluster: conf.forceNewCluster,
  102. ListenControlAPI: control,
  103. ListenRemoteAPI: conf.ListenAddr,
  104. AdvertiseRemoteAPI: conf.AdvertiseAddr,
  105. JoinAddr: joinAddr,
  106. StateDir: n.cluster.root,
  107. JoinToken: conf.joinToken,
  108. Executor: container.NewExecutor(n.cluster.config.Backend),
  109. HeartbeatTick: 1,
  110. ElectionTick: 3,
  111. UnlockKey: conf.lockKey,
  112. AutoLockManagers: conf.autolock,
  113. PluginGetter: n.cluster.config.Backend.PluginGetter(),
  114. }
  115. if conf.availability != "" {
  116. avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
  117. if !ok {
  118. return fmt.Errorf("invalid Availability: %q", conf.availability)
  119. }
  120. swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
  121. }
  122. node, err := swarmnode.New(&swarmnodeConfig)
  123. if err != nil {
  124. return err
  125. }
  126. if err := node.Start(context.Background()); err != nil {
  127. return err
  128. }
  129. n.done = make(chan struct{})
  130. n.ready = make(chan struct{})
  131. n.swarmNode = node
  132. if conf.joinAddr != "" {
  133. conf.JoinInProgress = true
  134. }
  135. n.config = conf
  136. savePersistentState(n.cluster.root, conf)
  137. ctx, cancel := context.WithCancel(context.Background())
  138. go func() {
  139. n.handleNodeExit(node)
  140. cancel()
  141. }()
  142. go n.handleReadyEvent(ctx, node, n.ready)
  143. go n.handleControlSocketChange(ctx, node)
  144. return nil
  145. }
  146. func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
  147. for conn := range node.ListenControlSocket(ctx) {
  148. n.mu.Lock()
  149. if n.grpcConn != conn {
  150. if conn == nil {
  151. n.controlClient = nil
  152. n.logsClient = nil
  153. } else {
  154. n.controlClient = swarmapi.NewControlClient(conn)
  155. n.logsClient = swarmapi.NewLogsClient(conn)
  156. // push store changes to daemon
  157. go n.watchClusterEvents(ctx, conn)
  158. }
  159. }
  160. n.grpcConn = conn
  161. n.mu.Unlock()
  162. n.cluster.SendClusterEvent(lncluster.EventSocketChange)
  163. }
  164. }
  165. func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) {
  166. client := swarmapi.NewWatchClient(conn)
  167. watch, err := client.Watch(ctx, &swarmapi.WatchRequest{
  168. Entries: []*swarmapi.WatchRequest_WatchEntry{
  169. {
  170. Kind: "node",
  171. Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
  172. },
  173. {
  174. Kind: "service",
  175. Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
  176. },
  177. {
  178. Kind: "network",
  179. Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
  180. },
  181. {
  182. Kind: "secret",
  183. Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
  184. },
  185. },
  186. IncludeOldObject: true,
  187. })
  188. if err != nil {
  189. logrus.WithError(err).Error("failed to watch cluster store")
  190. return
  191. }
  192. for {
  193. msg, err := watch.Recv()
  194. if err != nil {
  195. // store watch is broken
  196. logrus.WithError(err).Error("failed to receive changes from store watch API")
  197. return
  198. }
  199. select {
  200. case <-ctx.Done():
  201. return
  202. case n.cluster.watchStream <- msg:
  203. }
  204. }
  205. }
  206. func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
  207. select {
  208. case <-node.Ready():
  209. n.mu.Lock()
  210. n.err = nil
  211. if n.config.JoinInProgress {
  212. n.config.JoinInProgress = false
  213. savePersistentState(n.cluster.root, n.config)
  214. }
  215. n.mu.Unlock()
  216. close(ready)
  217. case <-ctx.Done():
  218. }
  219. n.cluster.SendClusterEvent(lncluster.EventNodeReady)
  220. }
  221. func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
  222. err := detectLockedError(node.Err(context.Background()))
  223. if err != nil {
  224. logrus.Errorf("cluster exited with error: %v", err)
  225. }
  226. n.mu.Lock()
  227. n.swarmNode = nil
  228. n.err = err
  229. close(n.done)
  230. select {
  231. case <-n.ready:
  232. n.enableReconnectWatcher()
  233. default:
  234. if n.repeatedRun {
  235. n.enableReconnectWatcher()
  236. }
  237. }
  238. n.repeatedRun = true
  239. n.mu.Unlock()
  240. }
  241. // Stop stops the current swarm node if it is running.
  242. func (n *nodeRunner) Stop() error {
  243. n.mu.Lock()
  244. if n.cancelReconnect != nil { // between restarts
  245. n.cancelReconnect()
  246. n.cancelReconnect = nil
  247. }
  248. if n.swarmNode == nil {
  249. n.mu.Unlock()
  250. return nil
  251. }
  252. n.stopping = true
  253. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
  254. defer cancel()
  255. n.mu.Unlock()
  256. if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
  257. return err
  258. }
  259. n.cluster.SendClusterEvent(lncluster.EventNodeLeave)
  260. <-n.done
  261. return nil
  262. }
  263. func (n *nodeRunner) State() nodeState {
  264. if n == nil {
  265. return nodeState{status: types.LocalNodeStateInactive}
  266. }
  267. n.mu.RLock()
  268. defer n.mu.RUnlock()
  269. ns := n.nodeState
  270. if ns.err != nil || n.cancelReconnect != nil {
  271. if errors.Cause(ns.err) == errSwarmLocked {
  272. ns.status = types.LocalNodeStateLocked
  273. } else {
  274. ns.status = types.LocalNodeStateError
  275. }
  276. } else {
  277. select {
  278. case <-n.ready:
  279. ns.status = types.LocalNodeStateActive
  280. default:
  281. ns.status = types.LocalNodeStatePending
  282. }
  283. }
  284. return ns
  285. }
  286. func (n *nodeRunner) enableReconnectWatcher() {
  287. if n.stopping {
  288. return
  289. }
  290. n.reconnectDelay *= 2
  291. if n.reconnectDelay > maxReconnectDelay {
  292. n.reconnectDelay = maxReconnectDelay
  293. }
  294. logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
  295. delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
  296. n.cancelReconnect = cancel
  297. go func() {
  298. <-delayCtx.Done()
  299. if delayCtx.Err() != context.DeadlineExceeded {
  300. return
  301. }
  302. n.mu.Lock()
  303. defer n.mu.Unlock()
  304. if n.stopping {
  305. return
  306. }
  307. if err := n.start(n.config); err != nil {
  308. n.err = err
  309. }
  310. }()
  311. }
  312. // nodeState represents information about the current state of the cluster and
  313. // provides access to the grpc clients.
  314. type nodeState struct {
  315. swarmNode *swarmnode.Node
  316. grpcConn *grpc.ClientConn
  317. controlClient swarmapi.ControlClient
  318. logsClient swarmapi.LogsClient
  319. status types.LocalNodeState
  320. actualLocalAddr string
  321. err error
  322. }
  323. // IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
  324. func (ns nodeState) IsActiveManager() bool {
  325. return ns.controlClient != nil
  326. }
  327. // IsManager returns true if node is a manager.
  328. func (ns nodeState) IsManager() bool {
  329. return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
  330. }
  331. // NodeID returns node's ID or empty string if node is inactive.
  332. func (ns nodeState) NodeID() string {
  333. if ns.swarmNode != nil {
  334. return ns.swarmNode.NodeID()
  335. }
  336. return ""
  337. }