dispatcher.go 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. package dispatcher
  2. import (
  3. "fmt"
  4. "net"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "google.golang.org/grpc"
  9. "google.golang.org/grpc/codes"
  10. "google.golang.org/grpc/transport"
  11. "github.com/Sirupsen/logrus"
  12. "github.com/docker/go-events"
  13. "github.com/docker/swarmkit/api"
  14. "github.com/docker/swarmkit/api/equality"
  15. "github.com/docker/swarmkit/ca"
  16. "github.com/docker/swarmkit/log"
  17. "github.com/docker/swarmkit/manager/state/store"
  18. "github.com/docker/swarmkit/remotes"
  19. "github.com/docker/swarmkit/watch"
  20. gogotypes "github.com/gogo/protobuf/types"
  21. "github.com/pkg/errors"
  22. "golang.org/x/net/context"
  23. )
  24. const (
  25. // DefaultHeartBeatPeriod is used for setting default value in cluster config
  26. // and in case if cluster config is missing.
  27. DefaultHeartBeatPeriod = 5 * time.Second
  28. defaultHeartBeatEpsilon = 500 * time.Millisecond
  29. defaultGracePeriodMultiplier = 3
  30. defaultRateLimitPeriod = 8 * time.Second
  31. // maxBatchItems is the threshold of queued writes that should
  32. // trigger an actual transaction to commit them to the shared store.
  33. maxBatchItems = 10000
  34. // maxBatchInterval needs to strike a balance between keeping
  35. // latency low, and realizing opportunities to combine many writes
  36. // into a single transaction. A fraction of a second feels about
  37. // right.
  38. maxBatchInterval = 100 * time.Millisecond
  39. modificationBatchLimit = 100
  40. batchingWaitTime = 100 * time.Millisecond
  41. // defaultNodeDownPeriod specifies the default time period we
  42. // wait before moving tasks assigned to down nodes to ORPHANED
  43. // state.
  44. defaultNodeDownPeriod = 24 * time.Hour
  45. )
  46. var (
  47. // ErrNodeAlreadyRegistered returned if node with same ID was already
  48. // registered with this dispatcher.
  49. ErrNodeAlreadyRegistered = errors.New("node already registered")
  50. // ErrNodeNotRegistered returned if node with such ID wasn't registered
  51. // with this dispatcher.
  52. ErrNodeNotRegistered = errors.New("node not registered")
  53. // ErrSessionInvalid returned when the session in use is no longer valid.
  54. // The node should re-register and start a new session.
  55. ErrSessionInvalid = errors.New("session invalid")
  56. // ErrNodeNotFound returned when the Node doesn't exist in raft.
  57. ErrNodeNotFound = errors.New("node not found")
  58. )
  59. // Config is configuration for Dispatcher. For default you should use
  60. // DefaultConfig.
  61. type Config struct {
  62. HeartbeatPeriod time.Duration
  63. HeartbeatEpsilon time.Duration
  64. // RateLimitPeriod specifies how often node with same ID can try to register
  65. // new session.
  66. RateLimitPeriod time.Duration
  67. GracePeriodMultiplier int
  68. }
  69. // DefaultConfig returns default config for Dispatcher.
  70. func DefaultConfig() *Config {
  71. return &Config{
  72. HeartbeatPeriod: DefaultHeartBeatPeriod,
  73. HeartbeatEpsilon: defaultHeartBeatEpsilon,
  74. RateLimitPeriod: defaultRateLimitPeriod,
  75. GracePeriodMultiplier: defaultGracePeriodMultiplier,
  76. }
  77. }
  78. // Cluster is interface which represent raft cluster. manager/state/raft.Node
  79. // is implements it. This interface needed only for easier unit-testing.
  80. type Cluster interface {
  81. GetMemberlist() map[uint64]*api.RaftMember
  82. SubscribePeers() (chan events.Event, func())
  83. MemoryStore() *store.MemoryStore
  84. }
  85. // nodeUpdate provides a new status and/or description to apply to a node
  86. // object.
  87. type nodeUpdate struct {
  88. status *api.NodeStatus
  89. description *api.NodeDescription
  90. }
  91. // clusterUpdate is an object that stores an update to the cluster that should trigger
  92. // a new session message. These are pointers to indicate the difference between
  93. // "there is no update" and "update this to nil"
  94. type clusterUpdate struct {
  95. managerUpdate *[]*api.WeightedPeer
  96. bootstrapKeyUpdate *[]*api.EncryptionKey
  97. rootCAUpdate *[]byte
  98. }
  99. // Dispatcher is responsible for dispatching tasks and tracking agent health.
  100. type Dispatcher struct {
  101. mu sync.Mutex
  102. wg sync.WaitGroup
  103. nodes *nodeStore
  104. store *store.MemoryStore
  105. lastSeenManagers []*api.WeightedPeer
  106. networkBootstrapKeys []*api.EncryptionKey
  107. lastSeenRootCert []byte
  108. config *Config
  109. cluster Cluster
  110. ctx context.Context
  111. cancel context.CancelFunc
  112. clusterUpdateQueue *watch.Queue
  113. taskUpdates map[string]*api.TaskStatus // indexed by task ID
  114. taskUpdatesLock sync.Mutex
  115. nodeUpdates map[string]nodeUpdate // indexed by node ID
  116. nodeUpdatesLock sync.Mutex
  117. downNodes *nodeStore
  118. processUpdatesTrigger chan struct{}
  119. // for waiting for the next task/node batch update
  120. processUpdatesLock sync.Mutex
  121. processUpdatesCond *sync.Cond
  122. }
  123. // New returns Dispatcher with cluster interface(usually raft.Node).
  124. func New(cluster Cluster, c *Config) *Dispatcher {
  125. d := &Dispatcher{
  126. nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
  127. downNodes: newNodeStore(defaultNodeDownPeriod, 0, 1, 0),
  128. store: cluster.MemoryStore(),
  129. cluster: cluster,
  130. processUpdatesTrigger: make(chan struct{}, 1),
  131. config: c,
  132. }
  133. d.processUpdatesCond = sync.NewCond(&d.processUpdatesLock)
  134. return d
  135. }
  136. func getWeightedPeers(cluster Cluster) []*api.WeightedPeer {
  137. members := cluster.GetMemberlist()
  138. var mgrs []*api.WeightedPeer
  139. for _, m := range members {
  140. mgrs = append(mgrs, &api.WeightedPeer{
  141. Peer: &api.Peer{
  142. NodeID: m.NodeID,
  143. Addr: m.Addr,
  144. },
  145. // TODO(stevvooe): Calculate weight of manager selection based on
  146. // cluster-level observations, such as number of connections and
  147. // load.
  148. Weight: remotes.DefaultObservationWeight,
  149. })
  150. }
  151. return mgrs
  152. }
  153. // Run runs dispatcher tasks which should be run on leader dispatcher.
  154. // Dispatcher can be stopped with cancelling ctx or calling Stop().
  155. func (d *Dispatcher) Run(ctx context.Context) error {
  156. d.taskUpdatesLock.Lock()
  157. d.taskUpdates = make(map[string]*api.TaskStatus)
  158. d.taskUpdatesLock.Unlock()
  159. d.nodeUpdatesLock.Lock()
  160. d.nodeUpdates = make(map[string]nodeUpdate)
  161. d.nodeUpdatesLock.Unlock()
  162. d.mu.Lock()
  163. if d.isRunning() {
  164. d.mu.Unlock()
  165. return errors.New("dispatcher is already running")
  166. }
  167. ctx = log.WithModule(ctx, "dispatcher")
  168. if err := d.markNodesUnknown(ctx); err != nil {
  169. log.G(ctx).Errorf(`failed to move all nodes to "unknown" state: %v`, err)
  170. }
  171. configWatcher, cancel, err := store.ViewAndWatch(
  172. d.store,
  173. func(readTx store.ReadTx) error {
  174. clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
  175. if err != nil {
  176. return err
  177. }
  178. if err == nil && len(clusters) == 1 {
  179. heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
  180. if err == nil && heartbeatPeriod > 0 {
  181. d.config.HeartbeatPeriod = heartbeatPeriod
  182. }
  183. if clusters[0].NetworkBootstrapKeys != nil {
  184. d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
  185. }
  186. d.lastSeenRootCert = clusters[0].RootCA.CACert
  187. }
  188. return nil
  189. },
  190. api.EventUpdateCluster{},
  191. )
  192. if err != nil {
  193. d.mu.Unlock()
  194. return err
  195. }
  196. // set queue here to guarantee that Close will close it
  197. d.clusterUpdateQueue = watch.NewQueue()
  198. peerWatcher, peerCancel := d.cluster.SubscribePeers()
  199. defer peerCancel()
  200. d.lastSeenManagers = getWeightedPeers(d.cluster)
  201. defer cancel()
  202. d.ctx, d.cancel = context.WithCancel(ctx)
  203. ctx = d.ctx
  204. d.wg.Add(1)
  205. defer d.wg.Done()
  206. d.mu.Unlock()
  207. publishManagers := func(peers []*api.Peer) {
  208. var mgrs []*api.WeightedPeer
  209. for _, p := range peers {
  210. mgrs = append(mgrs, &api.WeightedPeer{
  211. Peer: p,
  212. Weight: remotes.DefaultObservationWeight,
  213. })
  214. }
  215. d.mu.Lock()
  216. d.lastSeenManagers = mgrs
  217. d.mu.Unlock()
  218. d.clusterUpdateQueue.Publish(clusterUpdate{managerUpdate: &mgrs})
  219. }
  220. batchTimer := time.NewTimer(maxBatchInterval)
  221. defer batchTimer.Stop()
  222. for {
  223. select {
  224. case ev := <-peerWatcher:
  225. publishManagers(ev.([]*api.Peer))
  226. case <-d.processUpdatesTrigger:
  227. d.processUpdates(ctx)
  228. batchTimer.Reset(maxBatchInterval)
  229. case <-batchTimer.C:
  230. d.processUpdates(ctx)
  231. batchTimer.Reset(maxBatchInterval)
  232. case v := <-configWatcher:
  233. cluster := v.(api.EventUpdateCluster)
  234. d.mu.Lock()
  235. if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
  236. // ignore error, since Spec has passed validation before
  237. heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
  238. if heartbeatPeriod != d.config.HeartbeatPeriod {
  239. // only call d.nodes.updatePeriod when heartbeatPeriod changes
  240. d.config.HeartbeatPeriod = heartbeatPeriod
  241. d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
  242. }
  243. }
  244. d.lastSeenRootCert = cluster.Cluster.RootCA.CACert
  245. d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
  246. d.mu.Unlock()
  247. d.clusterUpdateQueue.Publish(clusterUpdate{
  248. bootstrapKeyUpdate: &cluster.Cluster.NetworkBootstrapKeys,
  249. rootCAUpdate: &cluster.Cluster.RootCA.CACert,
  250. })
  251. case <-ctx.Done():
  252. return nil
  253. }
  254. }
  255. }
  256. // Stop stops dispatcher and closes all grpc streams.
  257. func (d *Dispatcher) Stop() error {
  258. d.mu.Lock()
  259. if !d.isRunning() {
  260. d.mu.Unlock()
  261. return errors.New("dispatcher is already stopped")
  262. }
  263. d.cancel()
  264. d.mu.Unlock()
  265. d.nodes.Clean()
  266. d.processUpdatesLock.Lock()
  267. // In case there are any waiters. There is no chance of any starting
  268. // after this point, because they check if the context is canceled
  269. // before waiting.
  270. d.processUpdatesCond.Broadcast()
  271. d.processUpdatesLock.Unlock()
  272. d.clusterUpdateQueue.Close()
  273. d.wg.Wait()
  274. return nil
  275. }
  276. func (d *Dispatcher) isRunningLocked() (context.Context, error) {
  277. d.mu.Lock()
  278. if !d.isRunning() {
  279. d.mu.Unlock()
  280. return nil, grpc.Errorf(codes.Aborted, "dispatcher is stopped")
  281. }
  282. ctx := d.ctx
  283. d.mu.Unlock()
  284. return ctx, nil
  285. }
  286. func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
  287. log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
  288. var nodes []*api.Node
  289. var err error
  290. d.store.View(func(tx store.ReadTx) {
  291. nodes, err = store.FindNodes(tx, store.All)
  292. })
  293. if err != nil {
  294. return errors.Wrap(err, "failed to get list of nodes")
  295. }
  296. _, err = d.store.Batch(func(batch *store.Batch) error {
  297. for _, n := range nodes {
  298. err := batch.Update(func(tx store.Tx) error {
  299. // check if node is still here
  300. node := store.GetNode(tx, n.ID)
  301. if node == nil {
  302. return nil
  303. }
  304. // do not try to resurrect down nodes
  305. if node.Status.State == api.NodeStatus_DOWN {
  306. nodeCopy := node
  307. expireFunc := func() {
  308. if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
  309. log.WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
  310. }
  311. d.downNodes.Delete(nodeCopy.ID)
  312. }
  313. d.downNodes.Add(nodeCopy, expireFunc)
  314. return nil
  315. }
  316. node.Status.State = api.NodeStatus_UNKNOWN
  317. node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
  318. nodeID := node.ID
  319. expireFunc := func() {
  320. log := log.WithField("node", nodeID)
  321. log.Debug("heartbeat expiration for unknown node")
  322. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
  323. log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
  324. }
  325. }
  326. if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
  327. return errors.Wrap(err, `adding node in "unknown" state to node store failed`)
  328. }
  329. if err := store.UpdateNode(tx, node); err != nil {
  330. return errors.Wrap(err, "update failed")
  331. }
  332. return nil
  333. })
  334. if err != nil {
  335. log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`)
  336. }
  337. }
  338. return nil
  339. })
  340. return err
  341. }
  342. func (d *Dispatcher) isRunning() bool {
  343. if d.ctx == nil {
  344. return false
  345. }
  346. select {
  347. case <-d.ctx.Done():
  348. return false
  349. default:
  350. }
  351. return true
  352. }
  353. // markNodeReady updates the description of a node, updates its address, and sets status to READY
  354. // this is used during registration when a new node description is provided
  355. // and during node updates when the node description changes
  356. func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error {
  357. d.nodeUpdatesLock.Lock()
  358. d.nodeUpdates[nodeID] = nodeUpdate{
  359. status: &api.NodeStatus{
  360. State: api.NodeStatus_READY,
  361. Addr: addr,
  362. },
  363. description: description,
  364. }
  365. numUpdates := len(d.nodeUpdates)
  366. d.nodeUpdatesLock.Unlock()
  367. // Node is marked ready. Remove the node from down nodes if it
  368. // is there.
  369. d.downNodes.Delete(nodeID)
  370. if numUpdates >= maxBatchItems {
  371. select {
  372. case d.processUpdatesTrigger <- struct{}{}:
  373. case <-ctx.Done():
  374. return ctx.Err()
  375. }
  376. }
  377. // Wait until the node update batch happens before unblocking register.
  378. d.processUpdatesLock.Lock()
  379. select {
  380. case <-ctx.Done():
  381. return ctx.Err()
  382. default:
  383. }
  384. d.processUpdatesCond.Wait()
  385. d.processUpdatesLock.Unlock()
  386. return nil
  387. }
  388. // gets the node IP from the context of a grpc call
  389. func nodeIPFromContext(ctx context.Context) (string, error) {
  390. nodeInfo, err := ca.RemoteNode(ctx)
  391. if err != nil {
  392. return "", err
  393. }
  394. addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr)
  395. if err != nil {
  396. return "", errors.Wrap(err, "unable to get ip from addr:port")
  397. }
  398. return addr, nil
  399. }
  400. // register is used for registration of node with particular dispatcher.
  401. func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
  402. // prevent register until we're ready to accept it
  403. dctx, err := d.isRunningLocked()
  404. if err != nil {
  405. return "", err
  406. }
  407. if err := d.nodes.CheckRateLimit(nodeID); err != nil {
  408. return "", err
  409. }
  410. // TODO(stevvooe): Validate node specification.
  411. var node *api.Node
  412. d.store.View(func(tx store.ReadTx) {
  413. node = store.GetNode(tx, nodeID)
  414. })
  415. if node == nil {
  416. return "", ErrNodeNotFound
  417. }
  418. addr, err := nodeIPFromContext(ctx)
  419. if err != nil {
  420. log.G(ctx).Debug(err.Error())
  421. }
  422. if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil {
  423. return "", err
  424. }
  425. expireFunc := func() {
  426. log.G(ctx).Debugf("heartbeat expiration")
  427. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil {
  428. log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
  429. }
  430. }
  431. rn := d.nodes.Add(node, expireFunc)
  432. // NOTE(stevvooe): We need be a little careful with re-registration. The
  433. // current implementation just matches the node id and then gives away the
  434. // sessionID. If we ever want to use sessionID as a secret, which we may
  435. // want to, this is giving away the keys to the kitchen.
  436. //
  437. // The right behavior is going to be informed by identity. Basically, each
  438. // time a node registers, we invalidate the session and issue a new
  439. // session, once identity is proven. This will cause misbehaved agents to
  440. // be kicked when multiple connections are made.
  441. return rn.SessionID, nil
  442. }
  443. // UpdateTaskStatus updates status of task. Node should send such updates
  444. // on every status change of its tasks.
  445. func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) {
  446. nodeInfo, err := ca.RemoteNode(ctx)
  447. if err != nil {
  448. return nil, err
  449. }
  450. nodeID := nodeInfo.NodeID
  451. fields := logrus.Fields{
  452. "node.id": nodeID,
  453. "node.session": r.SessionID,
  454. "method": "(*Dispatcher).UpdateTaskStatus",
  455. }
  456. if nodeInfo.ForwardedBy != nil {
  457. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  458. }
  459. log := log.G(ctx).WithFields(fields)
  460. dctx, err := d.isRunningLocked()
  461. if err != nil {
  462. return nil, err
  463. }
  464. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  465. return nil, err
  466. }
  467. // Validate task updates
  468. for _, u := range r.Updates {
  469. if u.Status == nil {
  470. log.WithField("task.id", u.TaskID).Warn("task report has nil status")
  471. continue
  472. }
  473. var t *api.Task
  474. d.store.View(func(tx store.ReadTx) {
  475. t = store.GetTask(tx, u.TaskID)
  476. })
  477. if t == nil {
  478. log.WithField("task.id", u.TaskID).Warn("cannot find target task in store")
  479. continue
  480. }
  481. if t.NodeID != nodeID {
  482. err := grpc.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
  483. log.WithField("task.id", u.TaskID).Error(err)
  484. return nil, err
  485. }
  486. }
  487. d.taskUpdatesLock.Lock()
  488. // Enqueue task updates
  489. for _, u := range r.Updates {
  490. if u.Status == nil {
  491. continue
  492. }
  493. d.taskUpdates[u.TaskID] = u.Status
  494. }
  495. numUpdates := len(d.taskUpdates)
  496. d.taskUpdatesLock.Unlock()
  497. if numUpdates >= maxBatchItems {
  498. select {
  499. case d.processUpdatesTrigger <- struct{}{}:
  500. case <-dctx.Done():
  501. }
  502. }
  503. return nil, nil
  504. }
  505. func (d *Dispatcher) processUpdates(ctx context.Context) {
  506. var (
  507. taskUpdates map[string]*api.TaskStatus
  508. nodeUpdates map[string]nodeUpdate
  509. )
  510. d.taskUpdatesLock.Lock()
  511. if len(d.taskUpdates) != 0 {
  512. taskUpdates = d.taskUpdates
  513. d.taskUpdates = make(map[string]*api.TaskStatus)
  514. }
  515. d.taskUpdatesLock.Unlock()
  516. d.nodeUpdatesLock.Lock()
  517. if len(d.nodeUpdates) != 0 {
  518. nodeUpdates = d.nodeUpdates
  519. d.nodeUpdates = make(map[string]nodeUpdate)
  520. }
  521. d.nodeUpdatesLock.Unlock()
  522. if len(taskUpdates) == 0 && len(nodeUpdates) == 0 {
  523. return
  524. }
  525. log := log.G(ctx).WithFields(logrus.Fields{
  526. "method": "(*Dispatcher).processUpdates",
  527. })
  528. _, err := d.store.Batch(func(batch *store.Batch) error {
  529. for taskID, status := range taskUpdates {
  530. err := batch.Update(func(tx store.Tx) error {
  531. logger := log.WithField("task.id", taskID)
  532. task := store.GetTask(tx, taskID)
  533. if task == nil {
  534. logger.Errorf("task unavailable")
  535. return nil
  536. }
  537. logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
  538. if task.Status == *status {
  539. logger.Debug("task status identical, ignoring")
  540. return nil
  541. }
  542. if task.Status.State > status.State {
  543. logger.Debug("task status invalid transition")
  544. return nil
  545. }
  546. task.Status = *status
  547. if err := store.UpdateTask(tx, task); err != nil {
  548. logger.WithError(err).Error("failed to update task status")
  549. return nil
  550. }
  551. logger.Debug("task status updated")
  552. return nil
  553. })
  554. if err != nil {
  555. log.WithError(err).Error("dispatcher task update transaction failed")
  556. }
  557. }
  558. for nodeID, nodeUpdate := range nodeUpdates {
  559. err := batch.Update(func(tx store.Tx) error {
  560. logger := log.WithField("node.id", nodeID)
  561. node := store.GetNode(tx, nodeID)
  562. if node == nil {
  563. logger.Errorf("node unavailable")
  564. return nil
  565. }
  566. if nodeUpdate.status != nil {
  567. node.Status.State = nodeUpdate.status.State
  568. node.Status.Message = nodeUpdate.status.Message
  569. if nodeUpdate.status.Addr != "" {
  570. node.Status.Addr = nodeUpdate.status.Addr
  571. }
  572. }
  573. if nodeUpdate.description != nil {
  574. node.Description = nodeUpdate.description
  575. }
  576. if err := store.UpdateNode(tx, node); err != nil {
  577. logger.WithError(err).Error("failed to update node status")
  578. return nil
  579. }
  580. logger.Debug("node status updated")
  581. return nil
  582. })
  583. if err != nil {
  584. log.WithError(err).Error("dispatcher node update transaction failed")
  585. }
  586. }
  587. return nil
  588. })
  589. if err != nil {
  590. log.WithError(err).Error("dispatcher batch failed")
  591. }
  592. d.processUpdatesCond.Broadcast()
  593. }
  594. // Tasks is a stream of tasks state for node. Each message contains full list
  595. // of tasks which should be run on node, if task is not present in that list,
  596. // it should be terminated.
  597. func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
  598. nodeInfo, err := ca.RemoteNode(stream.Context())
  599. if err != nil {
  600. return err
  601. }
  602. nodeID := nodeInfo.NodeID
  603. dctx, err := d.isRunningLocked()
  604. if err != nil {
  605. return err
  606. }
  607. fields := logrus.Fields{
  608. "node.id": nodeID,
  609. "node.session": r.SessionID,
  610. "method": "(*Dispatcher).Tasks",
  611. }
  612. if nodeInfo.ForwardedBy != nil {
  613. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  614. }
  615. log.G(stream.Context()).WithFields(fields).Debugf("")
  616. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  617. return err
  618. }
  619. tasksMap := make(map[string]*api.Task)
  620. nodeTasks, cancel, err := store.ViewAndWatch(
  621. d.store,
  622. func(readTx store.ReadTx) error {
  623. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  624. if err != nil {
  625. return err
  626. }
  627. for _, t := range tasks {
  628. tasksMap[t.ID] = t
  629. }
  630. return nil
  631. },
  632. api.EventCreateTask{Task: &api.Task{NodeID: nodeID},
  633. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  634. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  635. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  636. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  637. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  638. )
  639. if err != nil {
  640. return err
  641. }
  642. defer cancel()
  643. for {
  644. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  645. return err
  646. }
  647. var tasks []*api.Task
  648. for _, t := range tasksMap {
  649. // dispatcher only sends tasks that have been assigned to a node
  650. if t != nil && t.Status.State >= api.TaskStateAssigned {
  651. tasks = append(tasks, t)
  652. }
  653. }
  654. if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
  655. return err
  656. }
  657. // bursty events should be processed in batches and sent out snapshot
  658. var (
  659. modificationCnt int
  660. batchingTimer *time.Timer
  661. batchingTimeout <-chan time.Time
  662. )
  663. batchingLoop:
  664. for modificationCnt < modificationBatchLimit {
  665. select {
  666. case event := <-nodeTasks:
  667. switch v := event.(type) {
  668. case api.EventCreateTask:
  669. tasksMap[v.Task.ID] = v.Task
  670. modificationCnt++
  671. case api.EventUpdateTask:
  672. if oldTask, exists := tasksMap[v.Task.ID]; exists {
  673. // States ASSIGNED and below are set by the orchestrator/scheduler,
  674. // not the agent, so tasks in these states need to be sent to the
  675. // agent even if nothing else has changed.
  676. if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
  677. // this update should not trigger action at agent
  678. tasksMap[v.Task.ID] = v.Task
  679. continue
  680. }
  681. }
  682. tasksMap[v.Task.ID] = v.Task
  683. modificationCnt++
  684. case api.EventDeleteTask:
  685. delete(tasksMap, v.Task.ID)
  686. modificationCnt++
  687. }
  688. if batchingTimer != nil {
  689. batchingTimer.Reset(batchingWaitTime)
  690. } else {
  691. batchingTimer = time.NewTimer(batchingWaitTime)
  692. batchingTimeout = batchingTimer.C
  693. }
  694. case <-batchingTimeout:
  695. break batchingLoop
  696. case <-stream.Context().Done():
  697. return stream.Context().Err()
  698. case <-dctx.Done():
  699. return dctx.Err()
  700. }
  701. }
  702. if batchingTimer != nil {
  703. batchingTimer.Stop()
  704. }
  705. }
  706. }
  707. // Assignments is a stream of assignments for a node. Each message contains
  708. // either full list of tasks and secrets for the node, or an incremental update.
  709. func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
  710. nodeInfo, err := ca.RemoteNode(stream.Context())
  711. if err != nil {
  712. return err
  713. }
  714. nodeID := nodeInfo.NodeID
  715. dctx, err := d.isRunningLocked()
  716. if err != nil {
  717. return err
  718. }
  719. fields := logrus.Fields{
  720. "node.id": nodeID,
  721. "node.session": r.SessionID,
  722. "method": "(*Dispatcher).Assignments",
  723. }
  724. if nodeInfo.ForwardedBy != nil {
  725. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  726. }
  727. log := log.G(stream.Context()).WithFields(fields)
  728. log.Debugf("")
  729. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  730. return err
  731. }
  732. var (
  733. sequence int64
  734. appliesTo string
  735. assignments = newAssignmentSet(log)
  736. )
  737. sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
  738. sequence++
  739. msg.AppliesTo = appliesTo
  740. msg.ResultsIn = strconv.FormatInt(sequence, 10)
  741. appliesTo = msg.ResultsIn
  742. msg.Type = assignmentType
  743. if err := stream.Send(&msg); err != nil {
  744. return err
  745. }
  746. return nil
  747. }
  748. // TODO(aaronl): Also send node secrets that should be exposed to
  749. // this node.
  750. nodeTasks, cancel, err := store.ViewAndWatch(
  751. d.store,
  752. func(readTx store.ReadTx) error {
  753. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  754. if err != nil {
  755. return err
  756. }
  757. for _, t := range tasks {
  758. assignments.addOrUpdateTask(readTx, t)
  759. }
  760. return nil
  761. },
  762. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  763. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  764. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  765. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  766. )
  767. if err != nil {
  768. return err
  769. }
  770. defer cancel()
  771. if err := sendMessage(assignments.message(), api.AssignmentsMessage_COMPLETE); err != nil {
  772. return err
  773. }
  774. for {
  775. // Check for session expiration
  776. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  777. return err
  778. }
  779. // bursty events should be processed in batches and sent out together
  780. var (
  781. modificationCnt int
  782. batchingTimer *time.Timer
  783. batchingTimeout <-chan time.Time
  784. )
  785. oneModification := func() {
  786. modificationCnt++
  787. if batchingTimer != nil {
  788. batchingTimer.Reset(batchingWaitTime)
  789. } else {
  790. batchingTimer = time.NewTimer(batchingWaitTime)
  791. batchingTimeout = batchingTimer.C
  792. }
  793. }
  794. // The batching loop waits for 50 ms after the most recent
  795. // change, or until modificationBatchLimit is reached. The
  796. // worst case latency is modificationBatchLimit * batchingWaitTime,
  797. // which is 10 seconds.
  798. batchingLoop:
  799. for modificationCnt < modificationBatchLimit {
  800. select {
  801. case event := <-nodeTasks:
  802. switch v := event.(type) {
  803. // We don't monitor EventCreateTask because tasks are
  804. // never created in the ASSIGNED state. First tasks are
  805. // created by the orchestrator, then the scheduler moves
  806. // them to ASSIGNED. If this ever changes, we will need
  807. // to monitor task creations as well.
  808. case api.EventUpdateTask:
  809. d.store.View(func(readTx store.ReadTx) {
  810. if assignments.addOrUpdateTask(readTx, v.Task) {
  811. oneModification()
  812. }
  813. })
  814. case api.EventDeleteTask:
  815. if assignments.removeTask(v.Task) {
  816. oneModification()
  817. }
  818. // TODO(aaronl): For node secrets, we'll need to handle
  819. // EventCreateSecret.
  820. }
  821. case <-batchingTimeout:
  822. break batchingLoop
  823. case <-stream.Context().Done():
  824. return stream.Context().Err()
  825. case <-dctx.Done():
  826. return dctx.Err()
  827. }
  828. }
  829. if batchingTimer != nil {
  830. batchingTimer.Stop()
  831. }
  832. if modificationCnt > 0 {
  833. if err := sendMessage(assignments.message(), api.AssignmentsMessage_INCREMENTAL); err != nil {
  834. return err
  835. }
  836. }
  837. }
  838. }
  839. func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
  840. _, err := d.store.Batch(func(batch *store.Batch) error {
  841. var (
  842. tasks []*api.Task
  843. err error
  844. )
  845. d.store.View(func(tx store.ReadTx) {
  846. tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
  847. })
  848. if err != nil {
  849. return err
  850. }
  851. for _, task := range tasks {
  852. // Tasks running on an unreachable node need to be marked as
  853. // orphaned since we have no idea whether the task is still running
  854. // or not.
  855. //
  856. // This only applies for tasks that could have made progress since
  857. // the agent became unreachable (assigned<->running)
  858. //
  859. // Tasks in a final state (e.g. rejected) *cannot* have made
  860. // progress, therefore there's no point in marking them as orphaned
  861. if task.Status.State >= api.TaskStateAssigned && task.Status.State <= api.TaskStateRunning {
  862. task.Status.State = api.TaskStateOrphaned
  863. }
  864. if err := batch.Update(func(tx store.Tx) error {
  865. err := store.UpdateTask(tx, task)
  866. if err != nil {
  867. return err
  868. }
  869. return nil
  870. }); err != nil {
  871. return err
  872. }
  873. }
  874. return nil
  875. })
  876. return err
  877. }
  878. // markNodeNotReady sets the node state to some state other than READY
  879. func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error {
  880. dctx, err := d.isRunningLocked()
  881. if err != nil {
  882. return err
  883. }
  884. // Node is down. Add it to down nodes so that we can keep
  885. // track of tasks assigned to the node.
  886. var node *api.Node
  887. d.store.View(func(readTx store.ReadTx) {
  888. node = store.GetNode(readTx, id)
  889. if node == nil {
  890. err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id)
  891. }
  892. })
  893. if err != nil {
  894. return err
  895. }
  896. expireFunc := func() {
  897. if err := d.moveTasksToOrphaned(id); err != nil {
  898. log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
  899. }
  900. d.downNodes.Delete(id)
  901. }
  902. d.downNodes.Add(node, expireFunc)
  903. status := &api.NodeStatus{
  904. State: state,
  905. Message: message,
  906. }
  907. d.nodeUpdatesLock.Lock()
  908. // pluck the description out of nodeUpdates. this protects against a case
  909. // where a node is marked ready and a description is added, but then the
  910. // node is immediately marked not ready. this preserves that description
  911. d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description}
  912. numUpdates := len(d.nodeUpdates)
  913. d.nodeUpdatesLock.Unlock()
  914. if numUpdates >= maxBatchItems {
  915. select {
  916. case d.processUpdatesTrigger <- struct{}{}:
  917. case <-dctx.Done():
  918. }
  919. }
  920. if rn := d.nodes.Delete(id); rn == nil {
  921. return errors.Errorf("node %s is not found in local storage", id)
  922. }
  923. return nil
  924. }
  925. // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
  926. // Node should send new heartbeat earlier than now + TTL, otherwise it will
  927. // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
  928. func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) {
  929. nodeInfo, err := ca.RemoteNode(ctx)
  930. if err != nil {
  931. return nil, err
  932. }
  933. period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
  934. return &api.HeartbeatResponse{Period: period}, err
  935. }
  936. func (d *Dispatcher) getManagers() []*api.WeightedPeer {
  937. d.mu.Lock()
  938. defer d.mu.Unlock()
  939. return d.lastSeenManagers
  940. }
  941. func (d *Dispatcher) getNetworkBootstrapKeys() []*api.EncryptionKey {
  942. d.mu.Lock()
  943. defer d.mu.Unlock()
  944. return d.networkBootstrapKeys
  945. }
  946. func (d *Dispatcher) getRootCACert() []byte {
  947. d.mu.Lock()
  948. defer d.mu.Unlock()
  949. return d.lastSeenRootCert
  950. }
  951. // Session is a stream which controls agent connection.
  952. // Each message contains list of backup Managers with weights. Also there is
  953. // a special boolean field Disconnect which if true indicates that node should
  954. // reconnect to another Manager immediately.
  955. func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error {
  956. ctx := stream.Context()
  957. nodeInfo, err := ca.RemoteNode(ctx)
  958. if err != nil {
  959. return err
  960. }
  961. nodeID := nodeInfo.NodeID
  962. dctx, err := d.isRunningLocked()
  963. if err != nil {
  964. return err
  965. }
  966. var sessionID string
  967. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  968. // register the node.
  969. sessionID, err = d.register(ctx, nodeID, r.Description)
  970. if err != nil {
  971. return err
  972. }
  973. } else {
  974. sessionID = r.SessionID
  975. // get the node IP addr
  976. addr, err := nodeIPFromContext(stream.Context())
  977. if err != nil {
  978. log.G(ctx).Debugf(err.Error())
  979. }
  980. // update the node description
  981. if err := d.markNodeReady(dctx, nodeID, r.Description, addr); err != nil {
  982. return err
  983. }
  984. }
  985. fields := logrus.Fields{
  986. "node.id": nodeID,
  987. "node.session": sessionID,
  988. "method": "(*Dispatcher).Session",
  989. }
  990. if nodeInfo.ForwardedBy != nil {
  991. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  992. }
  993. log := log.G(ctx).WithFields(fields)
  994. var nodeObj *api.Node
  995. nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error {
  996. nodeObj = store.GetNode(readTx, nodeID)
  997. return nil
  998. }, api.EventUpdateNode{Node: &api.Node{ID: nodeID},
  999. Checks: []api.NodeCheckFunc{api.NodeCheckID}},
  1000. )
  1001. if cancel != nil {
  1002. defer cancel()
  1003. }
  1004. if err != nil {
  1005. log.WithError(err).Error("ViewAndWatch Node failed")
  1006. }
  1007. if _, err = d.nodes.GetWithSession(nodeID, sessionID); err != nil {
  1008. return err
  1009. }
  1010. if err := stream.Send(&api.SessionMessage{
  1011. SessionID: sessionID,
  1012. Node: nodeObj,
  1013. Managers: d.getManagers(),
  1014. NetworkBootstrapKeys: d.getNetworkBootstrapKeys(),
  1015. RootCA: d.getRootCACert(),
  1016. }); err != nil {
  1017. return err
  1018. }
  1019. clusterUpdatesCh, clusterCancel := d.clusterUpdateQueue.Watch()
  1020. defer clusterCancel()
  1021. // disconnectNode is a helper forcibly shutdown connection
  1022. disconnectNode := func() error {
  1023. // force disconnect by shutting down the stream.
  1024. transportStream, ok := transport.StreamFromContext(stream.Context())
  1025. if ok {
  1026. // if we have the transport stream, we can signal a disconnect
  1027. // in the client.
  1028. if err := transportStream.ServerTransport().Close(); err != nil {
  1029. log.WithError(err).Error("session end")
  1030. }
  1031. }
  1032. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DISCONNECTED, "node is currently trying to find new manager"); err != nil {
  1033. log.WithError(err).Error("failed to remove node")
  1034. }
  1035. // still return an abort if the transport closure was ineffective.
  1036. return grpc.Errorf(codes.Aborted, "node must disconnect")
  1037. }
  1038. for {
  1039. // After each message send, we need to check the nodes sessionID hasn't
  1040. // changed. If it has, we will shut down the stream and make the node
  1041. // re-register.
  1042. node, err := d.nodes.GetWithSession(nodeID, sessionID)
  1043. if err != nil {
  1044. return err
  1045. }
  1046. var (
  1047. disconnect bool
  1048. mgrs []*api.WeightedPeer
  1049. netKeys []*api.EncryptionKey
  1050. rootCert []byte
  1051. )
  1052. select {
  1053. case ev := <-clusterUpdatesCh:
  1054. update := ev.(clusterUpdate)
  1055. if update.managerUpdate != nil {
  1056. mgrs = *update.managerUpdate
  1057. }
  1058. if update.bootstrapKeyUpdate != nil {
  1059. netKeys = *update.bootstrapKeyUpdate
  1060. }
  1061. if update.rootCAUpdate != nil {
  1062. rootCert = *update.rootCAUpdate
  1063. }
  1064. case ev := <-nodeUpdates:
  1065. nodeObj = ev.(api.EventUpdateNode).Node
  1066. case <-stream.Context().Done():
  1067. return stream.Context().Err()
  1068. case <-node.Disconnect:
  1069. disconnect = true
  1070. case <-dctx.Done():
  1071. disconnect = true
  1072. }
  1073. if mgrs == nil {
  1074. mgrs = d.getManagers()
  1075. }
  1076. if netKeys == nil {
  1077. netKeys = d.getNetworkBootstrapKeys()
  1078. }
  1079. if rootCert == nil {
  1080. rootCert = d.getRootCACert()
  1081. }
  1082. if err := stream.Send(&api.SessionMessage{
  1083. SessionID: sessionID,
  1084. Node: nodeObj,
  1085. Managers: mgrs,
  1086. NetworkBootstrapKeys: netKeys,
  1087. RootCA: rootCert,
  1088. }); err != nil {
  1089. return err
  1090. }
  1091. if disconnect {
  1092. return disconnectNode()
  1093. }
  1094. }
  1095. }