dispatcher.go 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276
  1. package dispatcher
  2. import (
  3. "fmt"
  4. "net"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "github.com/docker/go-events"
  9. "github.com/docker/go-metrics"
  10. "github.com/docker/swarmkit/api"
  11. "github.com/docker/swarmkit/api/equality"
  12. "github.com/docker/swarmkit/ca"
  13. "github.com/docker/swarmkit/log"
  14. "github.com/docker/swarmkit/manager/drivers"
  15. "github.com/docker/swarmkit/manager/state/store"
  16. "github.com/docker/swarmkit/protobuf/ptypes"
  17. "github.com/docker/swarmkit/remotes"
  18. "github.com/docker/swarmkit/watch"
  19. gogotypes "github.com/gogo/protobuf/types"
  20. "github.com/pkg/errors"
  21. "github.com/sirupsen/logrus"
  22. "golang.org/x/net/context"
  23. "google.golang.org/grpc/codes"
  24. "google.golang.org/grpc/status"
  25. "google.golang.org/grpc/transport"
  26. )
  27. const (
  28. // DefaultHeartBeatPeriod is used for setting default value in cluster config
  29. // and in case if cluster config is missing.
  30. DefaultHeartBeatPeriod = 5 * time.Second
  31. defaultHeartBeatEpsilon = 500 * time.Millisecond
  32. defaultGracePeriodMultiplier = 3
  33. defaultRateLimitPeriod = 8 * time.Second
  34. // maxBatchItems is the threshold of queued writes that should
  35. // trigger an actual transaction to commit them to the shared store.
  36. maxBatchItems = 10000
  37. // maxBatchInterval needs to strike a balance between keeping
  38. // latency low, and realizing opportunities to combine many writes
  39. // into a single transaction. A fraction of a second feels about
  40. // right.
  41. maxBatchInterval = 100 * time.Millisecond
  42. modificationBatchLimit = 100
  43. batchingWaitTime = 100 * time.Millisecond
  44. // defaultNodeDownPeriod specifies the default time period we
  45. // wait before moving tasks assigned to down nodes to ORPHANED
  46. // state.
  47. defaultNodeDownPeriod = 24 * time.Hour
  48. )
  49. var (
  50. // ErrNodeAlreadyRegistered returned if node with same ID was already
  51. // registered with this dispatcher.
  52. ErrNodeAlreadyRegistered = errors.New("node already registered")
  53. // ErrNodeNotRegistered returned if node with such ID wasn't registered
  54. // with this dispatcher.
  55. ErrNodeNotRegistered = errors.New("node not registered")
  56. // ErrSessionInvalid returned when the session in use is no longer valid.
  57. // The node should re-register and start a new session.
  58. ErrSessionInvalid = errors.New("session invalid")
  59. // ErrNodeNotFound returned when the Node doesn't exist in raft.
  60. ErrNodeNotFound = errors.New("node not found")
  61. // Scheduling delay timer.
  62. schedulingDelayTimer metrics.Timer
  63. )
  64. func init() {
  65. ns := metrics.NewNamespace("swarm", "dispatcher", nil)
  66. schedulingDelayTimer = ns.NewTimer("scheduling_delay",
  67. "Scheduling delay is the time a task takes to go from NEW to RUNNING state.")
  68. metrics.Register(ns)
  69. }
  70. // Config is configuration for Dispatcher. For default you should use
  71. // DefaultConfig.
  72. type Config struct {
  73. HeartbeatPeriod time.Duration
  74. HeartbeatEpsilon time.Duration
  75. // RateLimitPeriod specifies how often node with same ID can try to register
  76. // new session.
  77. RateLimitPeriod time.Duration
  78. GracePeriodMultiplier int
  79. }
  80. // DefaultConfig returns default config for Dispatcher.
  81. func DefaultConfig() *Config {
  82. return &Config{
  83. HeartbeatPeriod: DefaultHeartBeatPeriod,
  84. HeartbeatEpsilon: defaultHeartBeatEpsilon,
  85. RateLimitPeriod: defaultRateLimitPeriod,
  86. GracePeriodMultiplier: defaultGracePeriodMultiplier,
  87. }
  88. }
  89. // Cluster is interface which represent raft cluster. manager/state/raft.Node
  90. // is implements it. This interface needed only for easier unit-testing.
  91. type Cluster interface {
  92. GetMemberlist() map[uint64]*api.RaftMember
  93. SubscribePeers() (chan events.Event, func())
  94. MemoryStore() *store.MemoryStore
  95. }
  96. // nodeUpdate provides a new status and/or description to apply to a node
  97. // object.
  98. type nodeUpdate struct {
  99. status *api.NodeStatus
  100. description *api.NodeDescription
  101. }
  102. // clusterUpdate is an object that stores an update to the cluster that should trigger
  103. // a new session message. These are pointers to indicate the difference between
  104. // "there is no update" and "update this to nil"
  105. type clusterUpdate struct {
  106. managerUpdate *[]*api.WeightedPeer
  107. bootstrapKeyUpdate *[]*api.EncryptionKey
  108. rootCAUpdate *[]byte
  109. }
  110. // Dispatcher is responsible for dispatching tasks and tracking agent health.
  111. type Dispatcher struct {
  112. mu sync.Mutex
  113. wg sync.WaitGroup
  114. nodes *nodeStore
  115. store *store.MemoryStore
  116. lastSeenManagers []*api.WeightedPeer
  117. networkBootstrapKeys []*api.EncryptionKey
  118. lastSeenRootCert []byte
  119. config *Config
  120. cluster Cluster
  121. ctx context.Context
  122. cancel context.CancelFunc
  123. clusterUpdateQueue *watch.Queue
  124. dp *drivers.DriverProvider
  125. securityConfig *ca.SecurityConfig
  126. taskUpdates map[string]*api.TaskStatus // indexed by task ID
  127. taskUpdatesLock sync.Mutex
  128. nodeUpdates map[string]nodeUpdate // indexed by node ID
  129. nodeUpdatesLock sync.Mutex
  130. downNodes *nodeStore
  131. processUpdatesTrigger chan struct{}
  132. // for waiting for the next task/node batch update
  133. processUpdatesLock sync.Mutex
  134. processUpdatesCond *sync.Cond
  135. }
  136. // New returns Dispatcher with cluster interface(usually raft.Node).
  137. func New(cluster Cluster, c *Config, dp *drivers.DriverProvider, securityConfig *ca.SecurityConfig) *Dispatcher {
  138. d := &Dispatcher{
  139. dp: dp,
  140. nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
  141. downNodes: newNodeStore(defaultNodeDownPeriod, 0, 1, 0),
  142. store: cluster.MemoryStore(),
  143. cluster: cluster,
  144. processUpdatesTrigger: make(chan struct{}, 1),
  145. config: c,
  146. securityConfig: securityConfig,
  147. }
  148. d.processUpdatesCond = sync.NewCond(&d.processUpdatesLock)
  149. return d
  150. }
  151. func getWeightedPeers(cluster Cluster) []*api.WeightedPeer {
  152. members := cluster.GetMemberlist()
  153. var mgrs []*api.WeightedPeer
  154. for _, m := range members {
  155. mgrs = append(mgrs, &api.WeightedPeer{
  156. Peer: &api.Peer{
  157. NodeID: m.NodeID,
  158. Addr: m.Addr,
  159. },
  160. // TODO(stevvooe): Calculate weight of manager selection based on
  161. // cluster-level observations, such as number of connections and
  162. // load.
  163. Weight: remotes.DefaultObservationWeight,
  164. })
  165. }
  166. return mgrs
  167. }
  168. // Run runs dispatcher tasks which should be run on leader dispatcher.
  169. // Dispatcher can be stopped with cancelling ctx or calling Stop().
  170. func (d *Dispatcher) Run(ctx context.Context) error {
  171. d.taskUpdatesLock.Lock()
  172. d.taskUpdates = make(map[string]*api.TaskStatus)
  173. d.taskUpdatesLock.Unlock()
  174. d.nodeUpdatesLock.Lock()
  175. d.nodeUpdates = make(map[string]nodeUpdate)
  176. d.nodeUpdatesLock.Unlock()
  177. d.mu.Lock()
  178. if d.isRunning() {
  179. d.mu.Unlock()
  180. return errors.New("dispatcher is already running")
  181. }
  182. ctx = log.WithModule(ctx, "dispatcher")
  183. if err := d.markNodesUnknown(ctx); err != nil {
  184. log.G(ctx).Errorf(`failed to move all nodes to "unknown" state: %v`, err)
  185. }
  186. configWatcher, cancel, err := store.ViewAndWatch(
  187. d.store,
  188. func(readTx store.ReadTx) error {
  189. clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
  190. if err != nil {
  191. return err
  192. }
  193. if err == nil && len(clusters) == 1 {
  194. heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
  195. if err == nil && heartbeatPeriod > 0 {
  196. d.config.HeartbeatPeriod = heartbeatPeriod
  197. }
  198. if clusters[0].NetworkBootstrapKeys != nil {
  199. d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
  200. }
  201. d.lastSeenRootCert = clusters[0].RootCA.CACert
  202. }
  203. return nil
  204. },
  205. api.EventUpdateCluster{},
  206. )
  207. if err != nil {
  208. d.mu.Unlock()
  209. return err
  210. }
  211. // set queue here to guarantee that Close will close it
  212. d.clusterUpdateQueue = watch.NewQueue()
  213. peerWatcher, peerCancel := d.cluster.SubscribePeers()
  214. defer peerCancel()
  215. d.lastSeenManagers = getWeightedPeers(d.cluster)
  216. defer cancel()
  217. d.ctx, d.cancel = context.WithCancel(ctx)
  218. ctx = d.ctx
  219. d.wg.Add(1)
  220. defer d.wg.Done()
  221. d.mu.Unlock()
  222. publishManagers := func(peers []*api.Peer) {
  223. var mgrs []*api.WeightedPeer
  224. for _, p := range peers {
  225. mgrs = append(mgrs, &api.WeightedPeer{
  226. Peer: p,
  227. Weight: remotes.DefaultObservationWeight,
  228. })
  229. }
  230. d.mu.Lock()
  231. d.lastSeenManagers = mgrs
  232. d.mu.Unlock()
  233. d.clusterUpdateQueue.Publish(clusterUpdate{managerUpdate: &mgrs})
  234. }
  235. batchTimer := time.NewTimer(maxBatchInterval)
  236. defer batchTimer.Stop()
  237. for {
  238. select {
  239. case ev := <-peerWatcher:
  240. publishManagers(ev.([]*api.Peer))
  241. case <-d.processUpdatesTrigger:
  242. d.processUpdates(ctx)
  243. batchTimer.Reset(maxBatchInterval)
  244. case <-batchTimer.C:
  245. d.processUpdates(ctx)
  246. batchTimer.Reset(maxBatchInterval)
  247. case v := <-configWatcher:
  248. cluster := v.(api.EventUpdateCluster)
  249. d.mu.Lock()
  250. if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
  251. // ignore error, since Spec has passed validation before
  252. heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
  253. if heartbeatPeriod != d.config.HeartbeatPeriod {
  254. // only call d.nodes.updatePeriod when heartbeatPeriod changes
  255. d.config.HeartbeatPeriod = heartbeatPeriod
  256. d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
  257. }
  258. }
  259. d.lastSeenRootCert = cluster.Cluster.RootCA.CACert
  260. d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
  261. d.mu.Unlock()
  262. d.clusterUpdateQueue.Publish(clusterUpdate{
  263. bootstrapKeyUpdate: &cluster.Cluster.NetworkBootstrapKeys,
  264. rootCAUpdate: &cluster.Cluster.RootCA.CACert,
  265. })
  266. case <-ctx.Done():
  267. return nil
  268. }
  269. }
  270. }
  271. // Stop stops dispatcher and closes all grpc streams.
  272. func (d *Dispatcher) Stop() error {
  273. d.mu.Lock()
  274. if !d.isRunning() {
  275. d.mu.Unlock()
  276. return errors.New("dispatcher is already stopped")
  277. }
  278. d.cancel()
  279. d.mu.Unlock()
  280. d.nodes.Clean()
  281. d.processUpdatesLock.Lock()
  282. // In case there are any waiters. There is no chance of any starting
  283. // after this point, because they check if the context is canceled
  284. // before waiting.
  285. d.processUpdatesCond.Broadcast()
  286. d.processUpdatesLock.Unlock()
  287. d.clusterUpdateQueue.Close()
  288. d.wg.Wait()
  289. return nil
  290. }
  291. func (d *Dispatcher) isRunningLocked() (context.Context, error) {
  292. d.mu.Lock()
  293. if !d.isRunning() {
  294. d.mu.Unlock()
  295. return nil, status.Errorf(codes.Aborted, "dispatcher is stopped")
  296. }
  297. ctx := d.ctx
  298. d.mu.Unlock()
  299. return ctx, nil
  300. }
  301. func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
  302. log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
  303. var nodes []*api.Node
  304. var err error
  305. d.store.View(func(tx store.ReadTx) {
  306. nodes, err = store.FindNodes(tx, store.All)
  307. })
  308. if err != nil {
  309. return errors.Wrap(err, "failed to get list of nodes")
  310. }
  311. err = d.store.Batch(func(batch *store.Batch) error {
  312. for _, n := range nodes {
  313. err := batch.Update(func(tx store.Tx) error {
  314. // check if node is still here
  315. node := store.GetNode(tx, n.ID)
  316. if node == nil {
  317. return nil
  318. }
  319. // do not try to resurrect down nodes
  320. if node.Status.State == api.NodeStatus_DOWN {
  321. nodeCopy := node
  322. expireFunc := func() {
  323. if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
  324. log.WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
  325. }
  326. d.downNodes.Delete(nodeCopy.ID)
  327. }
  328. d.downNodes.Add(nodeCopy, expireFunc)
  329. return nil
  330. }
  331. node.Status.State = api.NodeStatus_UNKNOWN
  332. node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
  333. nodeID := node.ID
  334. expireFunc := func() {
  335. log := log.WithField("node", nodeID)
  336. log.Debug("heartbeat expiration for unknown node")
  337. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
  338. log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
  339. }
  340. }
  341. if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
  342. return errors.Wrap(err, `adding node in "unknown" state to node store failed`)
  343. }
  344. if err := store.UpdateNode(tx, node); err != nil {
  345. return errors.Wrap(err, "update failed")
  346. }
  347. return nil
  348. })
  349. if err != nil {
  350. log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`)
  351. }
  352. }
  353. return nil
  354. })
  355. return err
  356. }
  357. func (d *Dispatcher) isRunning() bool {
  358. if d.ctx == nil {
  359. return false
  360. }
  361. select {
  362. case <-d.ctx.Done():
  363. return false
  364. default:
  365. }
  366. return true
  367. }
  368. // markNodeReady updates the description of a node, updates its address, and sets status to READY
  369. // this is used during registration when a new node description is provided
  370. // and during node updates when the node description changes
  371. func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error {
  372. d.nodeUpdatesLock.Lock()
  373. d.nodeUpdates[nodeID] = nodeUpdate{
  374. status: &api.NodeStatus{
  375. State: api.NodeStatus_READY,
  376. Addr: addr,
  377. },
  378. description: description,
  379. }
  380. numUpdates := len(d.nodeUpdates)
  381. d.nodeUpdatesLock.Unlock()
  382. // Node is marked ready. Remove the node from down nodes if it
  383. // is there.
  384. d.downNodes.Delete(nodeID)
  385. if numUpdates >= maxBatchItems {
  386. select {
  387. case d.processUpdatesTrigger <- struct{}{}:
  388. case <-ctx.Done():
  389. return ctx.Err()
  390. }
  391. }
  392. // Wait until the node update batch happens before unblocking register.
  393. d.processUpdatesLock.Lock()
  394. defer d.processUpdatesLock.Unlock()
  395. select {
  396. case <-ctx.Done():
  397. return ctx.Err()
  398. default:
  399. }
  400. d.processUpdatesCond.Wait()
  401. return nil
  402. }
  403. // gets the node IP from the context of a grpc call
  404. func nodeIPFromContext(ctx context.Context) (string, error) {
  405. nodeInfo, err := ca.RemoteNode(ctx)
  406. if err != nil {
  407. return "", err
  408. }
  409. addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr)
  410. if err != nil {
  411. return "", errors.Wrap(err, "unable to get ip from addr:port")
  412. }
  413. return addr, nil
  414. }
  415. // register is used for registration of node with particular dispatcher.
  416. func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
  417. // prevent register until we're ready to accept it
  418. dctx, err := d.isRunningLocked()
  419. if err != nil {
  420. return "", err
  421. }
  422. if err := d.nodes.CheckRateLimit(nodeID); err != nil {
  423. return "", err
  424. }
  425. // TODO(stevvooe): Validate node specification.
  426. var node *api.Node
  427. d.store.View(func(tx store.ReadTx) {
  428. node = store.GetNode(tx, nodeID)
  429. })
  430. if node == nil {
  431. return "", ErrNodeNotFound
  432. }
  433. addr, err := nodeIPFromContext(ctx)
  434. if err != nil {
  435. log.G(ctx).WithError(err).Debug("failed to get remote node IP")
  436. }
  437. if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil {
  438. return "", err
  439. }
  440. expireFunc := func() {
  441. log.G(ctx).Debug("heartbeat expiration")
  442. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil {
  443. log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
  444. }
  445. }
  446. rn := d.nodes.Add(node, expireFunc)
  447. // NOTE(stevvooe): We need be a little careful with re-registration. The
  448. // current implementation just matches the node id and then gives away the
  449. // sessionID. If we ever want to use sessionID as a secret, which we may
  450. // want to, this is giving away the keys to the kitchen.
  451. //
  452. // The right behavior is going to be informed by identity. Basically, each
  453. // time a node registers, we invalidate the session and issue a new
  454. // session, once identity is proven. This will cause misbehaved agents to
  455. // be kicked when multiple connections are made.
  456. return rn.SessionID, nil
  457. }
  458. // UpdateTaskStatus updates status of task. Node should send such updates
  459. // on every status change of its tasks.
  460. func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) {
  461. nodeInfo, err := ca.RemoteNode(ctx)
  462. if err != nil {
  463. return nil, err
  464. }
  465. nodeID := nodeInfo.NodeID
  466. fields := logrus.Fields{
  467. "node.id": nodeID,
  468. "node.session": r.SessionID,
  469. "method": "(*Dispatcher).UpdateTaskStatus",
  470. }
  471. if nodeInfo.ForwardedBy != nil {
  472. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  473. }
  474. log := log.G(ctx).WithFields(fields)
  475. dctx, err := d.isRunningLocked()
  476. if err != nil {
  477. return nil, err
  478. }
  479. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  480. return nil, err
  481. }
  482. validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates))
  483. // Validate task updates
  484. for _, u := range r.Updates {
  485. if u.Status == nil {
  486. log.WithField("task.id", u.TaskID).Warn("task report has nil status")
  487. continue
  488. }
  489. var t *api.Task
  490. d.store.View(func(tx store.ReadTx) {
  491. t = store.GetTask(tx, u.TaskID)
  492. })
  493. if t == nil {
  494. // Task may have been deleted
  495. log.WithField("task.id", u.TaskID).Debug("cannot find target task in store")
  496. continue
  497. }
  498. if t.NodeID != nodeID {
  499. err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
  500. log.WithField("task.id", u.TaskID).Error(err)
  501. return nil, err
  502. }
  503. validTaskUpdates = append(validTaskUpdates, u)
  504. }
  505. d.taskUpdatesLock.Lock()
  506. // Enqueue task updates
  507. for _, u := range validTaskUpdates {
  508. d.taskUpdates[u.TaskID] = u.Status
  509. }
  510. numUpdates := len(d.taskUpdates)
  511. d.taskUpdatesLock.Unlock()
  512. if numUpdates >= maxBatchItems {
  513. select {
  514. case d.processUpdatesTrigger <- struct{}{}:
  515. case <-dctx.Done():
  516. }
  517. }
  518. return nil, nil
  519. }
  520. func (d *Dispatcher) processUpdates(ctx context.Context) {
  521. var (
  522. taskUpdates map[string]*api.TaskStatus
  523. nodeUpdates map[string]nodeUpdate
  524. )
  525. d.taskUpdatesLock.Lock()
  526. if len(d.taskUpdates) != 0 {
  527. taskUpdates = d.taskUpdates
  528. d.taskUpdates = make(map[string]*api.TaskStatus)
  529. }
  530. d.taskUpdatesLock.Unlock()
  531. d.nodeUpdatesLock.Lock()
  532. if len(d.nodeUpdates) != 0 {
  533. nodeUpdates = d.nodeUpdates
  534. d.nodeUpdates = make(map[string]nodeUpdate)
  535. }
  536. d.nodeUpdatesLock.Unlock()
  537. if len(taskUpdates) == 0 && len(nodeUpdates) == 0 {
  538. return
  539. }
  540. log := log.G(ctx).WithFields(logrus.Fields{
  541. "method": "(*Dispatcher).processUpdates",
  542. })
  543. err := d.store.Batch(func(batch *store.Batch) error {
  544. for taskID, status := range taskUpdates {
  545. err := batch.Update(func(tx store.Tx) error {
  546. logger := log.WithField("task.id", taskID)
  547. task := store.GetTask(tx, taskID)
  548. if task == nil {
  549. // Task may have been deleted
  550. logger.Debug("cannot find target task in store")
  551. return nil
  552. }
  553. logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
  554. if task.Status == *status {
  555. logger.Debug("task status identical, ignoring")
  556. return nil
  557. }
  558. if task.Status.State > status.State {
  559. logger.Debug("task status invalid transition")
  560. return nil
  561. }
  562. // Update scheduling delay metric for running tasks.
  563. // We use the status update time on the leader to calculate the scheduling delay.
  564. // Because of this, the recorded scheduling delay will be an overestimate and include
  565. // the network delay between the worker and the leader.
  566. // This is not ideal, but its a known overestimation, rather than using the status update time
  567. // from the worker node, which may cause unknown incorrect results due to possible clock skew.
  568. if status.State == api.TaskStateRunning {
  569. start := time.Unix(status.AppliedAt.GetSeconds(), int64(status.AppliedAt.GetNanos()))
  570. schedulingDelayTimer.UpdateSince(start)
  571. }
  572. task.Status = *status
  573. task.Status.AppliedBy = d.securityConfig.ClientTLSCreds.NodeID()
  574. task.Status.AppliedAt = ptypes.MustTimestampProto(time.Now())
  575. if err := store.UpdateTask(tx, task); err != nil {
  576. logger.WithError(err).Error("failed to update task status")
  577. return nil
  578. }
  579. logger.Debug("dispatcher committed status update to store")
  580. return nil
  581. })
  582. if err != nil {
  583. log.WithError(err).Error("dispatcher task update transaction failed")
  584. }
  585. }
  586. for nodeID, nodeUpdate := range nodeUpdates {
  587. err := batch.Update(func(tx store.Tx) error {
  588. logger := log.WithField("node.id", nodeID)
  589. node := store.GetNode(tx, nodeID)
  590. if node == nil {
  591. logger.Errorf("node unavailable")
  592. return nil
  593. }
  594. if nodeUpdate.status != nil {
  595. node.Status.State = nodeUpdate.status.State
  596. node.Status.Message = nodeUpdate.status.Message
  597. if nodeUpdate.status.Addr != "" {
  598. node.Status.Addr = nodeUpdate.status.Addr
  599. }
  600. }
  601. if nodeUpdate.description != nil {
  602. node.Description = nodeUpdate.description
  603. }
  604. if err := store.UpdateNode(tx, node); err != nil {
  605. logger.WithError(err).Error("failed to update node status")
  606. return nil
  607. }
  608. logger.Debug("node status updated")
  609. return nil
  610. })
  611. if err != nil {
  612. log.WithError(err).Error("dispatcher node update transaction failed")
  613. }
  614. }
  615. return nil
  616. })
  617. if err != nil {
  618. log.WithError(err).Error("dispatcher batch failed")
  619. }
  620. d.processUpdatesCond.Broadcast()
  621. }
  622. // Tasks is a stream of tasks state for node. Each message contains full list
  623. // of tasks which should be run on node, if task is not present in that list,
  624. // it should be terminated.
  625. func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
  626. nodeInfo, err := ca.RemoteNode(stream.Context())
  627. if err != nil {
  628. return err
  629. }
  630. nodeID := nodeInfo.NodeID
  631. dctx, err := d.isRunningLocked()
  632. if err != nil {
  633. return err
  634. }
  635. fields := logrus.Fields{
  636. "node.id": nodeID,
  637. "node.session": r.SessionID,
  638. "method": "(*Dispatcher).Tasks",
  639. }
  640. if nodeInfo.ForwardedBy != nil {
  641. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  642. }
  643. log.G(stream.Context()).WithFields(fields).Debug("")
  644. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  645. return err
  646. }
  647. tasksMap := make(map[string]*api.Task)
  648. nodeTasks, cancel, err := store.ViewAndWatch(
  649. d.store,
  650. func(readTx store.ReadTx) error {
  651. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  652. if err != nil {
  653. return err
  654. }
  655. for _, t := range tasks {
  656. tasksMap[t.ID] = t
  657. }
  658. return nil
  659. },
  660. api.EventCreateTask{Task: &api.Task{NodeID: nodeID},
  661. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  662. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  663. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  664. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  665. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  666. )
  667. if err != nil {
  668. return err
  669. }
  670. defer cancel()
  671. for {
  672. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  673. return err
  674. }
  675. var tasks []*api.Task
  676. for _, t := range tasksMap {
  677. // dispatcher only sends tasks that have been assigned to a node
  678. if t != nil && t.Status.State >= api.TaskStateAssigned {
  679. tasks = append(tasks, t)
  680. }
  681. }
  682. if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
  683. return err
  684. }
  685. // bursty events should be processed in batches and sent out snapshot
  686. var (
  687. modificationCnt int
  688. batchingTimer *time.Timer
  689. batchingTimeout <-chan time.Time
  690. )
  691. batchingLoop:
  692. for modificationCnt < modificationBatchLimit {
  693. select {
  694. case event := <-nodeTasks:
  695. switch v := event.(type) {
  696. case api.EventCreateTask:
  697. tasksMap[v.Task.ID] = v.Task
  698. modificationCnt++
  699. case api.EventUpdateTask:
  700. if oldTask, exists := tasksMap[v.Task.ID]; exists {
  701. // States ASSIGNED and below are set by the orchestrator/scheduler,
  702. // not the agent, so tasks in these states need to be sent to the
  703. // agent even if nothing else has changed.
  704. if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
  705. // this update should not trigger action at agent
  706. tasksMap[v.Task.ID] = v.Task
  707. continue
  708. }
  709. }
  710. tasksMap[v.Task.ID] = v.Task
  711. modificationCnt++
  712. case api.EventDeleteTask:
  713. delete(tasksMap, v.Task.ID)
  714. modificationCnt++
  715. }
  716. if batchingTimer != nil {
  717. batchingTimer.Reset(batchingWaitTime)
  718. } else {
  719. batchingTimer = time.NewTimer(batchingWaitTime)
  720. batchingTimeout = batchingTimer.C
  721. }
  722. case <-batchingTimeout:
  723. break batchingLoop
  724. case <-stream.Context().Done():
  725. return stream.Context().Err()
  726. case <-dctx.Done():
  727. return dctx.Err()
  728. }
  729. }
  730. if batchingTimer != nil {
  731. batchingTimer.Stop()
  732. }
  733. }
  734. }
  735. // Assignments is a stream of assignments for a node. Each message contains
  736. // either full list of tasks and secrets for the node, or an incremental update.
  737. func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
  738. nodeInfo, err := ca.RemoteNode(stream.Context())
  739. if err != nil {
  740. return err
  741. }
  742. nodeID := nodeInfo.NodeID
  743. dctx, err := d.isRunningLocked()
  744. if err != nil {
  745. return err
  746. }
  747. fields := logrus.Fields{
  748. "node.id": nodeID,
  749. "node.session": r.SessionID,
  750. "method": "(*Dispatcher).Assignments",
  751. }
  752. if nodeInfo.ForwardedBy != nil {
  753. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  754. }
  755. log := log.G(stream.Context()).WithFields(fields)
  756. log.Debug("")
  757. if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  758. return err
  759. }
  760. var (
  761. sequence int64
  762. appliesTo string
  763. assignments = newAssignmentSet(log, d.dp)
  764. )
  765. sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
  766. sequence++
  767. msg.AppliesTo = appliesTo
  768. msg.ResultsIn = strconv.FormatInt(sequence, 10)
  769. appliesTo = msg.ResultsIn
  770. msg.Type = assignmentType
  771. return stream.Send(&msg)
  772. }
  773. // TODO(aaronl): Also send node secrets that should be exposed to
  774. // this node.
  775. nodeTasks, cancel, err := store.ViewAndWatch(
  776. d.store,
  777. func(readTx store.ReadTx) error {
  778. tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
  779. if err != nil {
  780. return err
  781. }
  782. for _, t := range tasks {
  783. assignments.addOrUpdateTask(readTx, t)
  784. }
  785. return nil
  786. },
  787. api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
  788. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  789. api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
  790. Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
  791. )
  792. if err != nil {
  793. return err
  794. }
  795. defer cancel()
  796. if err := sendMessage(assignments.message(), api.AssignmentsMessage_COMPLETE); err != nil {
  797. return err
  798. }
  799. for {
  800. // Check for session expiration
  801. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  802. return err
  803. }
  804. // bursty events should be processed in batches and sent out together
  805. var (
  806. modificationCnt int
  807. batchingTimer *time.Timer
  808. batchingTimeout <-chan time.Time
  809. )
  810. oneModification := func() {
  811. modificationCnt++
  812. if batchingTimer != nil {
  813. batchingTimer.Reset(batchingWaitTime)
  814. } else {
  815. batchingTimer = time.NewTimer(batchingWaitTime)
  816. batchingTimeout = batchingTimer.C
  817. }
  818. }
  819. // The batching loop waits for 50 ms after the most recent
  820. // change, or until modificationBatchLimit is reached. The
  821. // worst case latency is modificationBatchLimit * batchingWaitTime,
  822. // which is 10 seconds.
  823. batchingLoop:
  824. for modificationCnt < modificationBatchLimit {
  825. select {
  826. case event := <-nodeTasks:
  827. switch v := event.(type) {
  828. // We don't monitor EventCreateTask because tasks are
  829. // never created in the ASSIGNED state. First tasks are
  830. // created by the orchestrator, then the scheduler moves
  831. // them to ASSIGNED. If this ever changes, we will need
  832. // to monitor task creations as well.
  833. case api.EventUpdateTask:
  834. d.store.View(func(readTx store.ReadTx) {
  835. if assignments.addOrUpdateTask(readTx, v.Task) {
  836. oneModification()
  837. }
  838. })
  839. case api.EventDeleteTask:
  840. if assignments.removeTask(v.Task) {
  841. oneModification()
  842. }
  843. // TODO(aaronl): For node secrets, we'll need to handle
  844. // EventCreateSecret.
  845. }
  846. case <-batchingTimeout:
  847. break batchingLoop
  848. case <-stream.Context().Done():
  849. return stream.Context().Err()
  850. case <-dctx.Done():
  851. return dctx.Err()
  852. }
  853. }
  854. if batchingTimer != nil {
  855. batchingTimer.Stop()
  856. }
  857. if modificationCnt > 0 {
  858. if err := sendMessage(assignments.message(), api.AssignmentsMessage_INCREMENTAL); err != nil {
  859. return err
  860. }
  861. }
  862. }
  863. }
  864. func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
  865. err := d.store.Batch(func(batch *store.Batch) error {
  866. var (
  867. tasks []*api.Task
  868. err error
  869. )
  870. d.store.View(func(tx store.ReadTx) {
  871. tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
  872. })
  873. if err != nil {
  874. return err
  875. }
  876. for _, task := range tasks {
  877. // Tasks running on an unreachable node need to be marked as
  878. // orphaned since we have no idea whether the task is still running
  879. // or not.
  880. //
  881. // This only applies for tasks that could have made progress since
  882. // the agent became unreachable (assigned<->running)
  883. //
  884. // Tasks in a final state (e.g. rejected) *cannot* have made
  885. // progress, therefore there's no point in marking them as orphaned
  886. if task.Status.State >= api.TaskStateAssigned && task.Status.State <= api.TaskStateRunning {
  887. task.Status.State = api.TaskStateOrphaned
  888. }
  889. if err := batch.Update(func(tx store.Tx) error {
  890. err := store.UpdateTask(tx, task)
  891. if err != nil {
  892. return err
  893. }
  894. return nil
  895. }); err != nil {
  896. return err
  897. }
  898. }
  899. return nil
  900. })
  901. return err
  902. }
  903. // markNodeNotReady sets the node state to some state other than READY
  904. func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error {
  905. dctx, err := d.isRunningLocked()
  906. if err != nil {
  907. return err
  908. }
  909. // Node is down. Add it to down nodes so that we can keep
  910. // track of tasks assigned to the node.
  911. var node *api.Node
  912. d.store.View(func(readTx store.ReadTx) {
  913. node = store.GetNode(readTx, id)
  914. if node == nil {
  915. err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id)
  916. }
  917. })
  918. if err != nil {
  919. return err
  920. }
  921. expireFunc := func() {
  922. if err := d.moveTasksToOrphaned(id); err != nil {
  923. log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
  924. }
  925. d.downNodes.Delete(id)
  926. }
  927. d.downNodes.Add(node, expireFunc)
  928. status := &api.NodeStatus{
  929. State: state,
  930. Message: message,
  931. }
  932. d.nodeUpdatesLock.Lock()
  933. // pluck the description out of nodeUpdates. this protects against a case
  934. // where a node is marked ready and a description is added, but then the
  935. // node is immediately marked not ready. this preserves that description
  936. d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description}
  937. numUpdates := len(d.nodeUpdates)
  938. d.nodeUpdatesLock.Unlock()
  939. if numUpdates >= maxBatchItems {
  940. select {
  941. case d.processUpdatesTrigger <- struct{}{}:
  942. case <-dctx.Done():
  943. }
  944. }
  945. if rn := d.nodes.Delete(id); rn == nil {
  946. return errors.Errorf("node %s is not found in local storage", id)
  947. }
  948. return nil
  949. }
  950. // Heartbeat is heartbeat method for nodes. It returns new TTL in response.
  951. // Node should send new heartbeat earlier than now + TTL, otherwise it will
  952. // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
  953. func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) {
  954. nodeInfo, err := ca.RemoteNode(ctx)
  955. if err != nil {
  956. return nil, err
  957. }
  958. period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID)
  959. return &api.HeartbeatResponse{Period: period}, err
  960. }
  961. func (d *Dispatcher) getManagers() []*api.WeightedPeer {
  962. d.mu.Lock()
  963. defer d.mu.Unlock()
  964. return d.lastSeenManagers
  965. }
  966. func (d *Dispatcher) getNetworkBootstrapKeys() []*api.EncryptionKey {
  967. d.mu.Lock()
  968. defer d.mu.Unlock()
  969. return d.networkBootstrapKeys
  970. }
  971. func (d *Dispatcher) getRootCACert() []byte {
  972. d.mu.Lock()
  973. defer d.mu.Unlock()
  974. return d.lastSeenRootCert
  975. }
  976. // Session is a stream which controls agent connection.
  977. // Each message contains list of backup Managers with weights. Also there is
  978. // a special boolean field Disconnect which if true indicates that node should
  979. // reconnect to another Manager immediately.
  980. func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error {
  981. ctx := stream.Context()
  982. nodeInfo, err := ca.RemoteNode(ctx)
  983. if err != nil {
  984. return err
  985. }
  986. nodeID := nodeInfo.NodeID
  987. dctx, err := d.isRunningLocked()
  988. if err != nil {
  989. return err
  990. }
  991. var sessionID string
  992. if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
  993. // register the node.
  994. sessionID, err = d.register(ctx, nodeID, r.Description)
  995. if err != nil {
  996. return err
  997. }
  998. } else {
  999. sessionID = r.SessionID
  1000. // get the node IP addr
  1001. addr, err := nodeIPFromContext(stream.Context())
  1002. if err != nil {
  1003. log.G(ctx).WithError(err).Debug("failed to get remote node IP")
  1004. }
  1005. // update the node description
  1006. if err := d.markNodeReady(dctx, nodeID, r.Description, addr); err != nil {
  1007. return err
  1008. }
  1009. }
  1010. fields := logrus.Fields{
  1011. "node.id": nodeID,
  1012. "node.session": sessionID,
  1013. "method": "(*Dispatcher).Session",
  1014. }
  1015. if nodeInfo.ForwardedBy != nil {
  1016. fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
  1017. }
  1018. log := log.G(ctx).WithFields(fields)
  1019. var nodeObj *api.Node
  1020. nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error {
  1021. nodeObj = store.GetNode(readTx, nodeID)
  1022. return nil
  1023. }, api.EventUpdateNode{Node: &api.Node{ID: nodeID},
  1024. Checks: []api.NodeCheckFunc{api.NodeCheckID}},
  1025. )
  1026. if cancel != nil {
  1027. defer cancel()
  1028. }
  1029. if err != nil {
  1030. log.WithError(err).Error("ViewAndWatch Node failed")
  1031. }
  1032. if _, err = d.nodes.GetWithSession(nodeID, sessionID); err != nil {
  1033. return err
  1034. }
  1035. clusterUpdatesCh, clusterCancel := d.clusterUpdateQueue.Watch()
  1036. defer clusterCancel()
  1037. if err := stream.Send(&api.SessionMessage{
  1038. SessionID: sessionID,
  1039. Node: nodeObj,
  1040. Managers: d.getManagers(),
  1041. NetworkBootstrapKeys: d.getNetworkBootstrapKeys(),
  1042. RootCA: d.getRootCACert(),
  1043. }); err != nil {
  1044. return err
  1045. }
  1046. // disconnectNode is a helper forcibly shutdown connection
  1047. disconnectNode := func() error {
  1048. // force disconnect by shutting down the stream.
  1049. transportStream, ok := transport.StreamFromContext(stream.Context())
  1050. if ok {
  1051. // if we have the transport stream, we can signal a disconnect
  1052. // in the client.
  1053. if err := transportStream.ServerTransport().Close(); err != nil {
  1054. log.WithError(err).Error("session end")
  1055. }
  1056. }
  1057. if err := d.markNodeNotReady(nodeID, api.NodeStatus_DISCONNECTED, "node is currently trying to find new manager"); err != nil {
  1058. log.WithError(err).Error("failed to remove node")
  1059. }
  1060. // still return an abort if the transport closure was ineffective.
  1061. return status.Errorf(codes.Aborted, "node must disconnect")
  1062. }
  1063. for {
  1064. // After each message send, we need to check the nodes sessionID hasn't
  1065. // changed. If it has, we will shut down the stream and make the node
  1066. // re-register.
  1067. node, err := d.nodes.GetWithSession(nodeID, sessionID)
  1068. if err != nil {
  1069. return err
  1070. }
  1071. var (
  1072. disconnect bool
  1073. mgrs []*api.WeightedPeer
  1074. netKeys []*api.EncryptionKey
  1075. rootCert []byte
  1076. )
  1077. select {
  1078. case ev := <-clusterUpdatesCh:
  1079. update := ev.(clusterUpdate)
  1080. if update.managerUpdate != nil {
  1081. mgrs = *update.managerUpdate
  1082. }
  1083. if update.bootstrapKeyUpdate != nil {
  1084. netKeys = *update.bootstrapKeyUpdate
  1085. }
  1086. if update.rootCAUpdate != nil {
  1087. rootCert = *update.rootCAUpdate
  1088. }
  1089. case ev := <-nodeUpdates:
  1090. nodeObj = ev.(api.EventUpdateNode).Node
  1091. case <-stream.Context().Done():
  1092. return stream.Context().Err()
  1093. case <-node.Disconnect:
  1094. disconnect = true
  1095. case <-dctx.Done():
  1096. disconnect = true
  1097. }
  1098. if mgrs == nil {
  1099. mgrs = d.getManagers()
  1100. }
  1101. if netKeys == nil {
  1102. netKeys = d.getNetworkBootstrapKeys()
  1103. }
  1104. if rootCert == nil {
  1105. rootCert = d.getRootCACert()
  1106. }
  1107. if err := stream.Send(&api.SessionMessage{
  1108. SessionID: sessionID,
  1109. Node: nodeObj,
  1110. Managers: mgrs,
  1111. NetworkBootstrapKeys: netKeys,
  1112. RootCA: rootCert,
  1113. }); err != nil {
  1114. return err
  1115. }
  1116. if disconnect {
  1117. return disconnectNode()
  1118. }
  1119. }
  1120. }