dispatcher.go 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. package dispatcher
  2. import (
  3. "fmt"
  4. "net"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "github.com/docker/go-events"
  9. "github.com/docker/go-metrics"
  10. "github.com/docker/swarmkit/api"
  11. "github.com/docker/swarmkit/api/equality"
  12. "github.com/docker/swarmkit/ca"
  13. "github.com/docker/swarmkit/log"
  14. "github.com/docker/swarmkit/manager/drivers"
  15. "github.com/docker/swarmkit/manager/state/store"
  16. "github.com/docker/swarmkit/protobuf/ptypes"
  17. "github.com/docker/swarmkit/remotes"
  18. "github.com/docker/swarmkit/watch"
  19. gogotypes "github.com/gogo/protobuf/types"
  20. "github.com/pkg/errors"
  21. "github.com/sirupsen/logrus"
  22. "golang.org/x/net/context"
  23. "google.golang.org/grpc/codes"
  24. "google.golang.org/grpc/status"
  25. "google.golang.org/grpc/transport"
  26. )
  27. const (
  28. // DefaultHeartBeatPeriod is used for setting default value in cluster config
  29. // and in case if cluster config is missing.
  30. DefaultHeartBeatPeriod = 5 * time.Second
  31. defaultHeartBeatEpsilon = 500 * time.Millisecond
  32. defaultGracePeriodMultiplier = 3
  33. defaultRateLimitPeriod = 8 * time.Second
  34. // maxBatchItems is the threshold of queued writes that should
  35. // trigger an actual transaction to commit them to the shared store.
  36. maxBatchItems = 10000
  37. // maxBatchInterval needs to strike a balance between keeping
  38. // latency low, and realizing opportunities to combine many writes
  39. // into a single transaction. A fraction of a second feels about
  40. // right.
  41. maxBatchInterval = 100 * time.Millisecond
  42. modificationBatchLimit = 100
  43. batchingWaitTime = 100 * time.Millisecond
  44. // defaultNodeDownPeriod specifies the default time period we
  45. // wait before moving tasks assigned to down nodes to ORPHANED
  46. // state.
  47. defaultNodeDownPeriod = 24 * time.Hour
  48. )
  49. var (
  50. // ErrNodeAlreadyRegistered returned if node with same ID was already
  51. // registered with this dispatcher.
  52. ErrNodeAlreadyRegistered = errors.New("node already registered")
  53. // ErrNodeNotRegistered returned if node with such ID wasn't registered
  54. // with this dispatcher.
  55. ErrNodeNotRegistered = errors.New("node not registered")
  56. // ErrSessionInvalid returned when the session in use is no longer valid.
  57. // The node should re-register and start a new session.
  58. ErrSessionInvalid = errors.New("session invalid")
  59. // ErrNodeNotFound returned when the Node doesn't exist in raft.
  60. ErrNodeNotFound = errors.New("node not found")
  61. // Scheduling delay timer.
  62. schedulingDelayTimer metrics.Timer
  63. )
  64. func init() {
  65. ns := metrics.NewNamespace("swarm", "dispatcher", nil)
  66. schedulingDelayTimer = ns.NewTimer("scheduling_delay",
  67. "Scheduling delay is the time a task takes to go from NEW to RUNNING state.")
  68. metrics.Register(ns)
  69. }
  70. // Config is configuration for Dispatcher. For default you should use
  71. // DefaultConfig.
  72. type Config struct {
  73. HeartbeatPeriod time.Duration
  74. HeartbeatEpsilon time.Duration
  75. // RateLimitPeriod specifies how often node with same ID can try to register
  76. // new session.
  77. RateLimitPeriod time.Duration
  78. GracePeriodMultiplier int
  79. }
  80. // DefaultConfig returns default config for Dispatcher.
  81. func DefaultConfig() *Config {
  82. return &Config{
  83. HeartbeatPeriod: DefaultHeartBeatPeriod,
  84. HeartbeatEpsilon: defaultHeartBeatEpsilon,
  85. RateLimitPeriod: defaultRateLimitPeriod,
  86. GracePeriodMultiplier: defaultGracePeriodMultiplier,
  87. }
  88. }
  89. // Cluster is interface which represent raft cluster. manager/state/raft.Node
  90. // is implements it. This interface needed only for easier unit-testing.
  91. type Cluster interface {
  92. GetMemberlist() map[uint64]*api.RaftMember
  93. SubscribePeers() (chan events.Event, func())
  94. MemoryStore() *store.MemoryStore
  95. }
  96. // nodeUpdate provides a new status and/or description to apply to a node
  97. // object.
  98. type nodeUpdate struct {
  99. status *api.NodeStatus
  100. description *api.NodeDescription
  101. }
  102. // clusterUpdate is an object that stores an update to the cluster that should trigger
  103. // a new session message. These are pointers to indicate the difference between
  104. // "there is no update" and "update this to nil"
  105. type clusterUpdate struct {
  106. managerUpdate *[]*api.WeightedPeer
  107. bootstrapKeyUpdate *[]*api.EncryptionKey
  108. rootCAUpdate *[]byte
  109. }
  110. // Dispatcher is responsible for dispatching tasks and tracking agent health.
  111. type Dispatcher struct {
  112. mu sync.Mutex
  113. wg sync.WaitGroup
  114. nodes *nodeStore
  115. store *store.MemoryStore
  116. lastSeenManagers []*api.WeightedPeer
  117. networkBootstrapKeys []*api.EncryptionKey
  118. lastSeenRootCert []byte
  119. config *Config
  120. cluster Cluster
  121. ctx context.Context
  122. cancel context.CancelFunc
  123. clusterUpdateQueue *watch.Queue
  124. dp *drivers.DriverProvider
  125. securityConfig *ca.SecurityConfig
  126. taskUpdates map[string]*api.TaskStatus // indexed by task ID
  127. taskUpdatesLock sync.Mutex
  128. nodeUpdates map[string]nodeUpdate // indexed by node ID
  129. nodeUpdatesLock sync.Mutex
  130. downNodes *nodeStore
  131. processUpdatesTrigger chan struct{}
  132. // for waiting for the next task/node batch update
  133. processUpdatesLock sync.Mutex
  134. processUpdatesCond *sync.Cond
  135. }
  136. // New returns Dispatcher with cluster interface(usually raft.Node).
  137. func New(cluster Cluster, c *Config, dp *drivers.DriverProvider, securityConfig *ca.SecurityConfig) *Dispatcher {
  138. d := &Dispatcher{
  139. dp: dp,
  140. nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
  141. downNodes: newNodeStore(defaultNodeDownPeriod, 0, 1, 0),
  142. store: cluster.MemoryStore(),
  143. cluster: cluster,
  144. processUpdatesTrigger: make(chan struct{}, 1),
  145. config: c,
  146. securityConfig: securityConfig,
  147. }
  148. d.processUpdatesCond = sync.NewCond(&d.processUpdatesLock)
  149. return d
  150. }
  151. func getWeightedPeers(cluster Cluster) []*api.WeightedPeer {
  152. members := cluster.GetMemberlist()
  153. var mgrs []*api.WeightedPeer
  154. for _, m := range members {
  155. mgrs = append(mgrs, &api.WeightedPeer{
  156. Peer: &api.Peer{
  157. NodeID: m.NodeID,
  158. Addr: m.Addr,
  159. },
  160. // TODO(stevvooe): Calculate weight of manager selection based on
  161. // cluster-level observations, such as number of connections and
  162. // load.
  163. Weight: remotes.DefaultObservationWeight,
  164. })
  165. }
  166. return mgrs
  167. }
  168. // Run runs dispatcher tasks which should be run on leader dispatcher.
  169. // Dispatcher can be stopped with cancelling ctx or calling Stop().
  170. func (d *Dispatcher) Run(ctx context.Context) error {
  171. ctx = log.WithModule(ctx, "dispatcher")
  172. log.G(ctx).Info("dispatcher starting")
  173. d.taskUpdatesLock.Lock()
  174. d.taskUpdates = make(map[string]*api.TaskStatus)
  175. d.taskUpdatesLock.Unlock()
  176. d.nodeUpdatesLock.Lock()
  177. d.nodeUpdates = make(map[string]nodeUpdate)
  178. d.nodeUpdatesLock.Unlock()
  179. d.mu.Lock()
  180. if d.isRunning() {
  181. d.mu.Unlock()
  182. return errors.New("dispatcher is already running")
  183. }
  184. if err := d.markNodesUnknown(ctx); err != nil {
  185. log.G(ctx).Errorf(`failed to move all nodes to "unknown" state: %v`, err)
  186. }
  187. configWatcher, cancel, err := store.ViewAndWatch(
  188. d.store,
  189. func(readTx store.ReadTx) error {
  190. clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
  191. if err != nil {
  192. return err
  193. }
  194. if err == nil && len(clusters) == 1 {
  195. heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
  196. if err == nil && heartbeatPeriod > 0 {
  197. d.config.HeartbeatPeriod = heartbeatPeriod
  198. }
  199. if clusters[0].NetworkBootstrapKeys != nil {
  200. d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
  201. }
  202. d.lastSeenRootCert = clusters[0].RootCA.CACert
  203. }
  204. return nil
  205. },
  206. api.EventUpdateCluster{},
  207. )
  208. if err != nil {
  209. d.mu.Unlock()
  210. return err
  211. }
  212. // set queue here to guarantee that Close will close it
  213. d.clusterUpdateQueue = watch.NewQueue()
  214. peerWatcher, peerCancel := d.cluster.SubscribePeers()
  215. defer peerCancel()
  216. d.lastSeenManagers = getWeightedPeers(d.cluster)
  217. defer cancel()
  218. d.ctx, d.cancel = context.WithCancel(ctx)
  219. ctx = d.ctx
  220. d.wg.Add(1)
  221. defer d.wg.Done()
  222. d.mu.Unlock()
  223. publishManagers := func(peers []*api.Peer) {
  224. var mgrs []*api.WeightedPeer
  225. for _, p := range peers {
  226. mgrs = append(mgrs, &api.WeightedPeer{
  227. Peer: p,
  228. Weight: remotes.DefaultObservationWeight,
  229. })
  230. }
  231. d.mu.Lock()
  232. d.lastSeenManagers = mgrs
  233. d.mu.Unlock()
  234. d.clusterUpdateQueue.Publish(clusterUpdate{managerUpdate: &mgrs})
  235. }
  236. batchTimer := time.NewTimer(maxBatchInterval)
  237. defer batchTimer.Stop()
  238. for {
  239. select {
  240. case ev := <-peerWatcher:
  241. publishManagers(ev.([]*api.Peer))
  242. case <-d.processUpdatesTrigger:
  243. d.processUpdates(ctx)
  244. batchTimer.Reset(maxBatchInterval)
  245. case <-batchTimer.C:
  246. d.processUpdates(ctx)
  247. batchTimer.Reset(maxBatchInterval)
  248. case v := <-configWatcher:
  249. cluster := v.(api.EventUpdateCluster)
  250. d.mu.Lock()
  251. if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
  252. // ignore error, since Spec has passed validation before
  253. heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
  254. if heartbeatPeriod != d.config.HeartbeatPeriod {
  255. // only call d.nodes.updatePeriod when heartbeatPeriod changes
  256. d.config.HeartbeatPeriod = heartbeatPeriod
  257. d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
  258. }
  259. }
  260. d.lastSeenRootCert = cluster.Cluster.RootCA.CACert
  261. d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
  262. d.mu.Unlock()
  263. d.clusterUpdateQueue.Publish(clusterUpdate{
  264. bootstrapKeyUpdate: &cluster.Cluster.NetworkBootstrapKeys,
  265. rootCAUpdate: &cluster.Cluster.RootCA.CACert,
  266. })
  267. case <-ctx.Done():
  268. return nil
  269. }
  270. }
  271. }
  272. // Stop stops dispatcher and closes all grpc streams.
  273. func (d *Dispatcher) Stop() error {
  274. d.mu.Lock()
  275. if !d.isRunning() {
  276. d.mu.Unlock()
  277. return errors.New("dispatcher is already stopped")
  278. }
  279. log := log.G(d.ctx).WithField("method", "(*Dispatcher).Stop")
  280. log.Info("dispatcher stopping")
  281. d.cancel()
  282. d.mu.Unlock()
  283. d.nodes.Clean()
  284. d.processUpdatesLock.Lock()
  285. // In case there are any waiters. There is no chance of any starting
  286. // after this point, because they check if the context is canceled
  287. // before waiting.
  288. d.processUpdatesCond.Broadcast()
  289. d.processUpdatesLock.Unlock()
  290. d.clusterUpdateQueue.Close()
  291. d.wg.Wait()
  292. return nil
  293. }
  294. func (d *Dispatcher) isRunningLocked() (context.Context, error) {
  295. d.mu.Lock()
  296. if !d.isRunning() {
  297. d.mu.Unlock()
  298. return nil, status.Errorf(codes.Aborted, "dispatcher is stopped")
  299. }
  300. ctx := d.ctx
  301. d.mu.Unlock()
  302. return ctx, nil
  303. }
  304. func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
  305. log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
  306. var nodes []*api.Node
  307. var err error
  308. d.store.View(func(tx store.ReadTx) {
  309. nodes, err = store.FindNodes(tx, store.All)
  310. })
  311. if err != nil {
  312. return errors.Wrap(err, "failed to get list of nodes")
  313. }
  314. err = d.store.Batch(func(batch *store.Batch) error {
  315. for _, n := range nodes {
  316. err := batch.Update(func(tx store.Tx) error {
  317. // check if node is still here
  318. node := store.GetNode(tx, n.ID)
  319. if node == nil {
  320. return nil
  321. }
  322. // do not try to resurrect down nodes
  323. if node.Status.State == api.NodeStatus_DOWN {
  324. nodeCopy := node
  325. expireFunc := func() {
  326. log.Infof("moving tasks to orphaned state for node: %s", nodeCopy.ID)
  327. if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
  328. log.WithError(err).Errorf(`failed to move all tasks for node %s to "ORPHANED" state`, node.ID)
  329. }
  330. d.downNodes.Delete(nodeCopy.ID)
  331. }
  332. log.Infof(`node %s was found to be down when marking unknown on dispatcher start`, node.ID)
  333. d.downNodes.Add(nodeCopy, expireFunc)
  334. return nil
  335. }
  336. node.Status.State = api.NodeStatus_UNKNOWN
  337. node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
  338. nodeID := node.ID
  339. expireFunc := func() {
  340. log := log.WithField("node", nodeID)
  341. log.Info(`heartbeat expiration for node %s in state "unknown"`, nodeID)
  342. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
  343. log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
  344. }
  345. }
  346. if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
  347. return errors.Wrapf(err, `adding node %s in "unknown" state to node store failed`, nodeID)
  348. }
  349. if err := store.UpdateNode(tx, node); err != nil {
  350. return errors.Wrapf(err, "update for node %s failed", nodeID)
  351. }
  352. return nil
  353. })
  354. if err != nil {
  355. log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`)
  356. }
  357. }
  358. return nil
  359. })
  360. return err
  361. }
  362. func (d *Dispatcher) isRunning() bool {
  363. if d.ctx == nil {
  364. return false
  365. }
  366. select {
  367. case <-d.ctx.Done():
  368. return false
  369. default:
  370. }
  371. return true
  372. }
  373. // markNodeReady updates the description of a node, updates its address, and sets status to READY
  374. // this is used during registration when a new node description is provided
  375. // and during node updates when the node description changes
  376. func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error {
  377. d.nodeUpdatesLock.Lock()
  378. d.nodeUpdates[nodeID] = nodeUpdate{
  379. status: &api.NodeStatus{
  380. State: api.NodeStatus_READY,
  381. Addr: addr,
  382. },
  383. description: description,
  384. }
  385. numUpdates := len(d.nodeUpdates)
  386. d.nodeUpdatesLock.Unlock()
  387. // Node is marked ready. Remove the node from down nodes if it
  388. // is there.
  389. d.downNodes.Delete(nodeID)
  390. if numUpdates >= maxBatchItems {
  391. select {
  392. case d.processUpdatesTrigger <- struct{}{}:
  393. case <-ctx.Done():
  394. return ctx.Err()
  395. }
  396. }
  397. // Wait until the node update batch happens before unblocking register.
  398. d.processUpdatesLock.Lock()
  399. defer d.processUpdatesLock.Unlock()
  400. select {
  401. case <-ctx.Done():
  402. return ctx.Err()
  403. default:
  404. }
  405. d.processUpdatesCond.Wait()
  406. return nil
  407. }
  408. // gets the node IP from the context of a grpc call
  409. func nodeIPFromContext(ctx context.Context) (string, error) {
  410. nodeInfo, err := ca.RemoteNode(ctx)
  411. if err != nil {
  412. return "", err
  413. }
  414. addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr)
  415. if err != nil {
  416. return "", errors.Wrap(err, "unable to get ip from addr:port")
  417. }
  418. return addr, nil
  419. }
  420. // register is used for registration of node with particular dispatcher.
  421. func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
  422. logLocal := log.G(ctx).WithField("method", "(*Dispatcher).register")
  423. // prevent register until we're ready to accept it
  424. dctx, err := d.isRunningLocked()
  425. if err != nil {
  426. return "", err
  427. }
  428. if err := d.nodes.CheckRateLimit(nodeID); err != nil {
  429. return "", err
  430. }
  431. // TODO(stevvooe): Validate node specification.
  432. var node *api.Node
  433. d.store.View(func(tx store.ReadTx) {
  434. node = store.GetNode(tx, nodeID)
  435. })
  436. if node == nil {
  437. return "", ErrNodeNotFound
  438. }
  439. addr, err := nodeIPFromContext(ctx)
  440. if err != nil {
  441. logLocal.WithError(err).Debug("failed to get remote node IP")
  442. }
  443. if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil {
  444. return "", err
  445. }
  446. expireFunc := func() {
  447. log.G(ctx).Debug("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID)
  448. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil {
  449. log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
  450. }
  451. }
  452. rn := d.nodes.Add(node, expireFunc)
  453. logLocal.Infof("worker %s was successfully registered", nodeID)
  454. // NOTE(stevvooe): We need be a little careful with re-registration. The
  455. // current implementation just matches the node id and then gives away the
  456. // sessionID. If we ever want to use sessionID as a secret, which we may
  457. // want to, this is giving away the keys to the kitchen.
  458. //
  459. // The right behavior is going to be informed by identity. Basically, each
  460. // time a node registers, we invalidate the session and issue a new
  461. // session, once identity is proven. This will cause misbehaved agents to
  462. // be kicked when multiple connections are made.
  463. return rn.SessionID, nil
  464. }
  465. // UpdateTaskStatus updates status of task. Node should send such updates
  466. // on every status change of its tasks.
  467. func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) {
  468. nodeInfo, err := ca.RemoteNode(ctx)
  469. if err != nil {
  470. return nil, err
  471. }
  472. nodeID := nodeInfo.NodeID
  473. fields := logrus.Fields{
  474. "node.id": nodeID,
  475. "node.session": r.SessionID,
  476. "method": "(*Dispatcher).UpdateTaskStatus",
  477. }
  478. if nodeInfo.ForwardedBy != nil {
  479. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  480. }
  481. log := log.G(ctx).WithFields(fields)
  482. dctx, err := d.isRunningLocked()
  483. if err != nil {
  484. return nil, err
  485. }
  486. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  487. return nil, err
  488. }
  489. validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates))
  490. // Validate task updates
  491. for _, u := range r.Updates {
  492. if u.Status == nil {
  493. log.WithField("task.id", u.TaskID).Warn("task report has nil status")
  494. continue
  495. }
  496. var t *api.Task
  497. d.store.View(func(tx store.ReadTx) {
  498. t = store.GetTask(tx, u.TaskID)
  499. })
  500. if t == nil {
  501. // Task may have been deleted
  502. log.WithField("task.id", u.TaskID).Debug("cannot find target task in store")
  503. continue
  504. }
  505. if t.NodeID != nodeID {
  506. err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
  507. log.WithField("task.id", u.TaskID).Error(err)
  508. return nil, err
  509. }
  510. validTaskUpdates = append(validTaskUpdates, u)
  511. }
  512. d.taskUpdatesLock.Lock()
  513. // Enqueue task updates
  514. for _, u := range validTaskUpdates {
  515. d.taskUpdates[u.TaskID] = u.Status
  516. }
  517. numUpdates := len(d.taskUpdates)
  518. d.taskUpdatesLock.Unlock()
  519. if numUpdates >= maxBatchItems {
  520. select {
  521. case d.processUpdatesTrigger <- struct{}{}:
  522. case <-dctx.Done():
  523. }
  524. }
  525. return nil, nil
  526. }
  527. func (d *Dispatcher) processUpdates(ctx context.Context) {
  528. var (
  529. taskUpdates map[string]*api.TaskStatus
  530. nodeUpdates map[string]nodeUpdate
  531. )
  532. d.taskUpdatesLock.Lock()
  533. if len(d.taskUpdates) != 0 {
  534. taskUpdates = d.taskUpdates
  535. d.taskUpdates = make(map[string]*api.TaskStatus)
  536. }
  537. d.taskUpdatesLock.Unlock()
  538. d.nodeUpdatesLock.Lock()
  539. if len(d.nodeUpdates) != 0 {
  540. nodeUpdates = d.nodeUpdates
  541. d.nodeUpdates = make(map[string]nodeUpdate)
  542. }
  543. d.nodeUpdatesLock.Unlock()
  544. if len(taskUpdates) == 0 && len(nodeUpdates) == 0 {
  545. return
  546. }
  547. log := log.G(ctx).WithFields(logrus.Fields{
  548. "method": "(*Dispatcher).processUpdates",
  549. })
  550. err := d.store.Batch(func(batch *store.Batch) error {
  551. for taskID, status := range taskUpdates {
  552. err := batch.Update(func(tx store.Tx) error {
  553. logger := log.WithField("task.id", taskID)
  554. task := store.GetTask(tx, taskID)
  555. if task == nil {
  556. // Task may have been deleted
  557. logger.Debug("cannot find target task in store")
  558. return nil
  559. }
  560. logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
  561. if task.Status == *status {
  562. logger.Debug("task status identical, ignoring")
  563. return nil
  564. }
  565. if task.Status.State > status.State {
  566. logger.Debug("task status invalid transition")
  567. return nil
  568. }
  569. // Update scheduling delay metric for running tasks.
  570. // We use the status update time on the leader to calculate the scheduling delay.
  571. // Because of this, the recorded scheduling delay will be an overestimate and include
  572. // the network delay between the worker and the leader.
  573. // This is not ideal, but its a known overestimation, rather than using the status update time
  574. // from the worker node, which may cause unknown incorrect results due to possible clock skew.
  575. if status.State == api.TaskStateRunning {
  576. start := time.Unix(status.AppliedAt.GetSeconds(), int64(status.AppliedAt.GetNanos()))
  577. schedulingDelayTimer.UpdateSince(start)
  578. }
  579. task.Status = *status
  580. task.Status.AppliedBy = d.securityConfig.ClientTLSCreds.NodeID()
  581. task.Status.AppliedAt = ptypes.MustTimestampProto(time.Now())
  582. if err := store.UpdateTask(tx, task); err != nil {
  583. logger.WithError(err).Error("failed to update task status")
  584. return nil
  585. }
  586. logger.Debug("dispatcher committed status update to store")
  587. return nil
  588. })
  589. if err != nil {
  590. log.WithError(err).Error("dispatcher task update transaction failed")
  591. }
  592. }
  593. for nodeID, nodeUpdate := range nodeUpdates {
  594. err := batch.Update(func(tx store.Tx) error {
  595. logger := log.WithField("node.id", nodeID)
  596. node := store.GetNode(tx, nodeID)
  597. if node == nil {
  598. logger.Errorf("node unavailable")
  599. return nil
  600. }
  601. if nodeUpdate.status != nil {
  602. node.Status.State = nodeUpdate.status.State
  603. node.Status.Message = nodeUpdate.status.Message
  604. if nodeUpdate.status.Addr != "" {
  605. node.Status.Addr = nodeUpdate.status.Addr
  606. }
  607. }
  608. if nodeUpdate.description != nil {
  609. node.Description = nodeUpdate.description
  610. }
  611. if err := store.UpdateNode(tx, node); err != nil {
  612. logger.WithError(err).Error("failed to update node status")
  613. return nil
  614. }
  615. logger.Debug("node status updated")
  616. return nil
  617. })
  618. if err != nil {
  619. log.WithError(err).Error("dispatcher node update transaction failed")
  620. }
  621. }
  622. return nil
  623. })
  624. if err != nil {
  625. log.WithError(err).Error("dispatcher batch failed")
  626. }
  627. d.processUpdatesCond.Broadcast()
  628. }
  629. // Tasks is a stream of tasks state for node. Each message contains full list
  630. // of tasks which should be run on node, if task is not present in that list,
  631. // it should be terminated.
  632. func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
  633. nodeInfo, err := ca.RemoteNode(stream.Context())
  634. if err != nil {
  635. return err
  636. }
  637. nodeID := nodeInfo.NodeID
  638. dctx, err := d.isRunningLocked()
  639. if err != nil {
  640. return err
  641. }
  642. fields := logrus.Fields{
  643. "node.id": nodeID,
  644. "node.session": r.SessionID,
  645. "method": "(*Dispatcher).Tasks",
  646. }
  647. if nodeInfo.ForwardedBy != nil {
  648. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  649. }
  650. log.G(stream.Context()).WithFields(fields).Debug("")
  651. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  652. return err
  653. }
  654. tasksMap := make(map[string]*api.Task)
  655. nodeTasks, cancel, err := store.ViewAndWatch(
  656. d.store,
  657. func(readTx store.ReadTx) error {
  658. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  659. if err != nil {
  660. return err
  661. }
  662. for _, t := range tasks {
  663. tasksMap[t.ID] = t
  664. }
  665. return nil
  666. },
  667. api.EventCreateTask{Task: &api.Task{NodeID: nodeID},
  668. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  669. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  670. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  671. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  672. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  673. )
  674. if err != nil {
  675. return err
  676. }
  677. defer cancel()
  678. for {
  679. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  680. return err
  681. }
  682. var tasks []*api.Task
  683. for _, t := range tasksMap {
  684. // dispatcher only sends tasks that have been assigned to a node
  685. if t != nil && t.Status.State >= api.TaskStateAssigned {
  686. tasks = append(tasks, t)
  687. }
  688. }
  689. if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
  690. return err
  691. }
  692. // bursty events should be processed in batches and sent out snapshot
  693. var (
  694. modificationCnt int
  695. batchingTimer *time.Timer
  696. batchingTimeout <-chan time.Time
  697. )
  698. batchingLoop:
  699. for modificationCnt < modificationBatchLimit {
  700. select {
  701. case event := <-nodeTasks:
  702. switch v := event.(type) {
  703. case api.EventCreateTask:
  704. tasksMap[v.Task.ID] = v.Task
  705. modificationCnt++
  706. case api.EventUpdateTask:
  707. if oldTask, exists := tasksMap[v.Task.ID]; exists {
  708. // States ASSIGNED and below are set by the orchestrator/scheduler,
  709. // not the agent, so tasks in these states need to be sent to the
  710. // agent even if nothing else has changed.
  711. if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
  712. // this update should not trigger action at agent
  713. tasksMap[v.Task.ID] = v.Task
  714. continue
  715. }
  716. }
  717. tasksMap[v.Task.ID] = v.Task
  718. modificationCnt++
  719. case api.EventDeleteTask:
  720. delete(tasksMap, v.Task.ID)
  721. modificationCnt++
  722. }
  723. if batchingTimer != nil {
  724. batchingTimer.Reset(batchingWaitTime)
  725. } else {
  726. batchingTimer = time.NewTimer(batchingWaitTime)
  727. batchingTimeout = batchingTimer.C
  728. }
  729. case <-batchingTimeout:
  730. break batchingLoop
  731. case <-stream.Context().Done():
  732. return stream.Context().Err()
  733. case <-dctx.Done():
  734. return dctx.Err()
  735. }
  736. }
  737. if batchingTimer != nil {
  738. batchingTimer.Stop()
  739. }
  740. }
  741. }
  742. // Assignments is a stream of assignments for a node. Each message contains
  743. // either full list of tasks and secrets for the node, or an incremental update.
  744. func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
  745. nodeInfo, err := ca.RemoteNode(stream.Context())
  746. if err != nil {
  747. return err
  748. }
  749. nodeID := nodeInfo.NodeID
  750. dctx, err := d.isRunningLocked()
  751. if err != nil {
  752. return err
  753. }
  754. fields := logrus.Fields{
  755. "node.id": nodeID,
  756. "node.session": r.SessionID,
  757. "method": "(*Dispatcher).Assignments",
  758. }
  759. if nodeInfo.ForwardedBy != nil {
  760. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  761. }
  762. log := log.G(stream.Context()).WithFields(fields)
  763. log.Debug("")
  764. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  765. return err
  766. }
  767. var (
  768. sequence int64
  769. appliesTo string
  770. assignments = newAssignmentSet(log, d.dp)
  771. )
  772. sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
  773. sequence++
  774. msg.AppliesTo = appliesTo
  775. msg.ResultsIn = strconv.FormatInt(sequence, 10)
  776. appliesTo = msg.ResultsIn
  777. msg.Type = assignmentType
  778. return stream.Send(&msg)
  779. }
  780. // TODO(aaronl): Also send node secrets that should be exposed to
  781. // this node.
  782. nodeTasks, cancel, err := store.ViewAndWatch(
  783. d.store,
  784. func(readTx store.ReadTx) error {
  785. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  786. if err != nil {
  787. return err
  788. }
  789. for _, t := range tasks {
  790. assignments.addOrUpdateTask(readTx, t)
  791. }
  792. return nil
  793. },
  794. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  795. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  796. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  797. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  798. )
  799. if err != nil {
  800. return err
  801. }
  802. defer cancel()
  803. if err := sendMessage(assignments.message(), api.AssignmentsMessage_COMPLETE); err != nil {
  804. return err
  805. }
  806. for {
  807. // Check for session expiration
  808. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  809. return err
  810. }
  811. // bursty events should be processed in batches and sent out together
  812. var (
  813. modificationCnt int
  814. batchingTimer *time.Timer
  815. batchingTimeout <-chan time.Time
  816. )
  817. oneModification := func() {
  818. modificationCnt++
  819. if batchingTimer != nil {
  820. batchingTimer.Reset(batchingWaitTime)
  821. } else {
  822. batchingTimer = time.NewTimer(batchingWaitTime)
  823. batchingTimeout = batchingTimer.C
  824. }
  825. }
  826. // The batching loop waits for 50 ms after the most recent
  827. // change, or until modificationBatchLimit is reached. The
  828. // worst case latency is modificationBatchLimit * batchingWaitTime,
  829. // which is 10 seconds.
  830. batchingLoop:
  831. for modificationCnt < modificationBatchLimit {
  832. select {
  833. case event := <-nodeTasks:
  834. switch v := event.(type) {
  835. // We don't monitor EventCreateTask because tasks are
  836. // never created in the ASSIGNED state. First tasks are
  837. // created by the orchestrator, then the scheduler moves
  838. // them to ASSIGNED. If this ever changes, we will need
  839. // to monitor task creations as well.
  840. case api.EventUpdateTask:
  841. d.store.View(func(readTx store.ReadTx) {
  842. if assignments.addOrUpdateTask(readTx, v.Task) {
  843. oneModification()
  844. }
  845. })
  846. case api.EventDeleteTask:
  847. if assignments.removeTask(v.Task) {
  848. oneModification()
  849. }
  850. // TODO(aaronl): For node secrets, we'll need to handle
  851. // EventCreateSecret.
  852. }
  853. case <-batchingTimeout:
  854. break batchingLoop
  855. case <-stream.Context().Done():
  856. return stream.Context().Err()
  857. case <-dctx.Done():
  858. return dctx.Err()
  859. }
  860. }
  861. if batchingTimer != nil {
  862. batchingTimer.Stop()
  863. }
  864. if modificationCnt > 0 {
  865. if err := sendMessage(assignments.message(), api.AssignmentsMessage_INCREMENTAL); err != nil {
  866. return err
  867. }
  868. }
  869. }
  870. }
  871. func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
  872. err := d.store.Batch(func(batch *store.Batch) error {
  873. var (
  874. tasks []*api.Task
  875. err error
  876. )
  877. d.store.View(func(tx store.ReadTx) {
  878. tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
  879. })
  880. if err != nil {
  881. return err
  882. }
  883. for _, task := range tasks {
  884. // Tasks running on an unreachable node need to be marked as
  885. // orphaned since we have no idea whether the task is still running
  886. // or not.
  887. //
  888. // This only applies for tasks that could have made progress since
  889. // the agent became unreachable (assigned<->running)
  890. //
  891. // Tasks in a final state (e.g. rejected) *cannot* have made
  892. // progress, therefore there's no point in marking them as orphaned
  893. if task.Status.State >= api.TaskStateAssigned && task.Status.State <= api.TaskStateRunning {
  894. task.Status.State = api.TaskStateOrphaned
  895. }
  896. if err := batch.Update(func(tx store.Tx) error {
  897. err := store.UpdateTask(tx, task)
  898. if err != nil {
  899. return err
  900. }
  901. return nil
  902. }); err != nil {
  903. return err
  904. }
  905. }
  906. return nil
  907. })
  908. return err
  909. }
  910. // markNodeNotReady sets the node state to some state other than READY
  911. func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error {
  912. logLocal := log.G(d.ctx).WithField("method", "(*Dispatcher).markNodeNotReady")
  913. dctx, err := d.isRunningLocked()
  914. if err != nil {
  915. return err
  916. }
  917. // Node is down. Add it to down nodes so that we can keep
  918. // track of tasks assigned to the node.
  919. var node *api.Node
  920. d.store.View(func(readTx store.ReadTx) {
  921. node = store.GetNode(readTx, id)
  922. if node == nil {
  923. err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id)
  924. }
  925. })
  926. if err != nil {
  927. return err
  928. }
  929. expireFunc := func() {
  930. log.G(dctx).Debugf(`worker timed-out %s in "down" state, moving all tasks to "ORPHANED" state`, id)
  931. if err := d.moveTasksToOrphaned(id); err != nil {
  932. log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
  933. }
  934. d.downNodes.Delete(id)
  935. }
  936. d.downNodes.Add(node, expireFunc)
  937. logLocal.Debugf("added node %s to down nodes list", node.ID)
  938. status := &api.NodeStatus{
  939. State: state,
  940. Message: message,
  941. }
  942. d.nodeUpdatesLock.Lock()
  943. // pluck the description out of nodeUpdates. this protects against a case
  944. // where a node is marked ready and a description is added, but then the
  945. // node is immediately marked not ready. this preserves that description
  946. d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description}
  947. numUpdates := len(d.nodeUpdates)
  948. d.nodeUpdatesLock.Unlock()
  949. if numUpdates >= maxBatchItems {
  950. select {
  951. case d.processUpdatesTrigger <- struct{}{}:
  952. case <-dctx.Done():
  953. }
  954. }
  955. if rn := d.nodes.Delete(id); rn == nil {
  956. return errors.Errorf("node %s is not found in local storage", id)
  957. }
  958. logLocal.Debugf("deleted node %s from node store", node.ID)
  959. return nil
  960. }
  961. // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
  962. // Node should send new heartbeat earlier than now + TTL, otherwise it will
  963. // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
  964. func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) {
  965. nodeInfo, err := ca.RemoteNode(ctx)
  966. if err != nil {
  967. return nil, err
  968. }
  969. period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
  970. log.G(ctx).WithField("method", "(*Dispatcher).Heartbeat").Debugf("received heartbeat from worker %v, expect next heartbeat in %v", nodeInfo, period)
  971. return &api.HeartbeatResponse{Period: period}, err
  972. }
  973. func (d *Dispatcher) getManagers() []*api.WeightedPeer {
  974. d.mu.Lock()
  975. defer d.mu.Unlock()
  976. return d.lastSeenManagers
  977. }
  978. func (d *Dispatcher) getNetworkBootstrapKeys() []*api.EncryptionKey {
  979. d.mu.Lock()
  980. defer d.mu.Unlock()
  981. return d.networkBootstrapKeys
  982. }
  983. func (d *Dispatcher) getRootCACert() []byte {
  984. d.mu.Lock()
  985. defer d.mu.Unlock()
  986. return d.lastSeenRootCert
  987. }
  988. // Session is a stream which controls agent connection.
  989. // Each message contains list of backup Managers with weights. Also there is
  990. // a special boolean field Disconnect which if true indicates that node should
  991. // reconnect to another Manager immediately.
  992. func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error {
  993. ctx := stream.Context()
  994. nodeInfo, err := ca.RemoteNode(ctx)
  995. if err != nil {
  996. return err
  997. }
  998. nodeID := nodeInfo.NodeID
  999. dctx, err := d.isRunningLocked()
  1000. if err != nil {
  1001. return err
  1002. }
  1003. var sessionID string
  1004. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  1005. // register the node.
  1006. sessionID, err = d.register(ctx, nodeID, r.Description)
  1007. if err != nil {
  1008. return err
  1009. }
  1010. } else {
  1011. sessionID = r.SessionID
  1012. // get the node IP addr
  1013. addr, err := nodeIPFromContext(stream.Context())
  1014. if err != nil {
  1015. log.G(ctx).WithError(err).Debug("failed to get remote node IP")
  1016. }
  1017. // update the node description
  1018. if err := d.markNodeReady(dctx, nodeID, r.Description, addr); err != nil {
  1019. return err
  1020. }
  1021. }
  1022. fields := logrus.Fields{
  1023. "node.id": nodeID,
  1024. "node.session": sessionID,
  1025. "method": "(*Dispatcher).Session",
  1026. }
  1027. if nodeInfo.ForwardedBy != nil {
  1028. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  1029. }
  1030. log := log.G(ctx).WithFields(fields)
  1031. var nodeObj *api.Node
  1032. nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error {
  1033. nodeObj = store.GetNode(readTx, nodeID)
  1034. return nil
  1035. }, api.EventUpdateNode{Node: &api.Node{ID: nodeID},
  1036. Checks: []api.NodeCheckFunc{api.NodeCheckID}},
  1037. )
  1038. if cancel != nil {
  1039. defer cancel()
  1040. }
  1041. if err != nil {
  1042. log.WithError(err).Error("ViewAndWatch Node failed")
  1043. }
  1044. if _, err = d.nodes.GetWithSession(nodeID, sessionID); err != nil {
  1045. return err
  1046. }
  1047. clusterUpdatesCh, clusterCancel := d.clusterUpdateQueue.Watch()
  1048. defer clusterCancel()
  1049. if err := stream.Send(&api.SessionMessage{
  1050. SessionID: sessionID,
  1051. Node: nodeObj,
  1052. Managers: d.getManagers(),
  1053. NetworkBootstrapKeys: d.getNetworkBootstrapKeys(),
  1054. RootCA: d.getRootCACert(),
  1055. }); err != nil {
  1056. return err
  1057. }
  1058. // disconnectNode is a helper forcibly shutdown connection
  1059. disconnectNode := func() error {
  1060. // force disconnect by shutting down the stream.
  1061. transportStream, ok := transport.StreamFromContext(stream.Context())
  1062. if ok {
  1063. // if we have the transport stream, we can signal a disconnect
  1064. // in the client.
  1065. if err := transportStream.ServerTransport().Close(); err != nil {
  1066. log.WithError(err).Error("session end")
  1067. }
  1068. }
  1069. log.Infof("dispatcher session dropped, marking node %s down", nodeID)
  1070. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DISCONNECTED, "node is currently trying to find new manager"); err != nil {
  1071. log.WithError(err).Error("failed to remove node")
  1072. }
  1073. // still return an abort if the transport closure was ineffective.
  1074. return status.Errorf(codes.Aborted, "node must disconnect")
  1075. }
  1076. for {
  1077. // After each message send, we need to check the nodes sessionID hasn't
  1078. // changed. If it has, we will shut down the stream and make the node
  1079. // re-register.
  1080. node, err := d.nodes.GetWithSession(nodeID, sessionID)
  1081. if err != nil {
  1082. return err
  1083. }
  1084. var (
  1085. disconnect bool
  1086. mgrs []*api.WeightedPeer
  1087. netKeys []*api.EncryptionKey
  1088. rootCert []byte
  1089. )
  1090. select {
  1091. case ev := <-clusterUpdatesCh:
  1092. update := ev.(clusterUpdate)
  1093. if update.managerUpdate != nil {
  1094. mgrs = *update.managerUpdate
  1095. }
  1096. if update.bootstrapKeyUpdate != nil {
  1097. netKeys = *update.bootstrapKeyUpdate
  1098. }
  1099. if update.rootCAUpdate != nil {
  1100. rootCert = *update.rootCAUpdate
  1101. }
  1102. case ev := <-nodeUpdates:
  1103. nodeObj = ev.(api.EventUpdateNode).Node
  1104. case <-stream.Context().Done():
  1105. return stream.Context().Err()
  1106. case <-node.Disconnect:
  1107. disconnect = true
  1108. case <-dctx.Done():
  1109. disconnect = true
  1110. }
  1111. if mgrs == nil {
  1112. mgrs = d.getManagers()
  1113. }
  1114. if netKeys == nil {
  1115. netKeys = d.getNetworkBootstrapKeys()
  1116. }
  1117. if rootCert == nil {
  1118. rootCert = d.getRootCACert()
  1119. }
  1120. if err := stream.Send(&api.SessionMessage{
  1121. SessionID: sessionID,
  1122. Node: nodeObj,
  1123. Managers: mgrs,
  1124. NetworkBootstrapKeys: netKeys,
  1125. RootCA: rootCert,
  1126. }); err != nil {
  1127. return err
  1128. }
  1129. if disconnect {
  1130. return disconnectNode()
  1131. }
  1132. }
  1133. }