node.go 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. package node
  2. import (
  3. "bytes"
  4. "crypto/tls"
  5. "encoding/json"
  6. "io/ioutil"
  7. "net"
  8. "os"
  9. "path/filepath"
  10. "reflect"
  11. "sort"
  12. "strings"
  13. "sync"
  14. "time"
  15. "github.com/Sirupsen/logrus"
  16. "github.com/boltdb/bolt"
  17. "github.com/docker/docker/pkg/plugingetter"
  18. metrics "github.com/docker/go-metrics"
  19. "github.com/docker/swarmkit/agent"
  20. "github.com/docker/swarmkit/agent/exec"
  21. "github.com/docker/swarmkit/api"
  22. "github.com/docker/swarmkit/ca"
  23. "github.com/docker/swarmkit/connectionbroker"
  24. "github.com/docker/swarmkit/ioutils"
  25. "github.com/docker/swarmkit/log"
  26. "github.com/docker/swarmkit/manager"
  27. "github.com/docker/swarmkit/manager/encryption"
  28. "github.com/docker/swarmkit/remotes"
  29. "github.com/docker/swarmkit/watch"
  30. "github.com/docker/swarmkit/xnet"
  31. grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
  32. "github.com/pkg/errors"
  33. "golang.org/x/net/context"
  34. "google.golang.org/grpc"
  35. "google.golang.org/grpc/codes"
  36. "google.golang.org/grpc/credentials"
  37. )
  38. const (
  39. stateFilename = "state.json"
  40. roleChangeTimeout = 16 * time.Second
  41. )
  42. var (
  43. nodeInfo metrics.LabeledGauge
  44. nodeManager metrics.Gauge
  45. errNodeStarted = errors.New("node: already started")
  46. errNodeNotStarted = errors.New("node: not started")
  47. certDirectory = "certificates"
  48. // ErrInvalidUnlockKey is returned when we can't decrypt the TLS certificate
  49. ErrInvalidUnlockKey = errors.New("node is locked, and needs a valid unlock key")
  50. )
  51. func init() {
  52. ns := metrics.NewNamespace("swarm", "node", nil)
  53. nodeInfo = ns.NewLabeledGauge("info", "Information related to the swarm", "",
  54. "swarm_id",
  55. "node_id",
  56. )
  57. nodeManager = ns.NewGauge("manager", "Whether this node is a manager or not", "")
  58. metrics.Register(ns)
  59. }
  60. // Config provides values for a Node.
  61. type Config struct {
  62. // Hostname is the name of host for agent instance.
  63. Hostname string
  64. // JoinAddr specifies node that should be used for the initial connection to
  65. // other manager in cluster. This should be only one address and optional,
  66. // the actual remotes come from the stored state.
  67. JoinAddr string
  68. // StateDir specifies the directory the node uses to keep the state of the
  69. // remote managers and certificates.
  70. StateDir string
  71. // JoinToken is the token to be used on the first certificate request.
  72. JoinToken string
  73. // ExternalCAs is a list of CAs to which a manager node
  74. // will make certificate signing requests for node certificates.
  75. ExternalCAs []*api.ExternalCA
  76. // ForceNewCluster creates a new cluster from current raft state.
  77. ForceNewCluster bool
  78. // ListenControlAPI specifies address the control API should listen on.
  79. ListenControlAPI string
  80. // ListenRemoteAPI specifies the address for the remote API that agents
  81. // and raft members connect to.
  82. ListenRemoteAPI string
  83. // AdvertiseRemoteAPI specifies the address that should be advertised
  84. // for connections to the remote API (including the raft service).
  85. AdvertiseRemoteAPI string
  86. // Executor specifies the executor to use for the agent.
  87. Executor exec.Executor
  88. // ElectionTick defines the amount of ticks needed without
  89. // leader to trigger a new election
  90. ElectionTick uint32
  91. // HeartbeatTick defines the amount of ticks between each
  92. // heartbeat sent to other members for health-check purposes
  93. HeartbeatTick uint32
  94. // AutoLockManagers determines whether or not an unlock key will be generated
  95. // when bootstrapping a new cluster for the first time
  96. AutoLockManagers bool
  97. // UnlockKey is the key to unlock a node - used for decrypting at rest. This
  98. // only applies to nodes that have already joined a cluster.
  99. UnlockKey []byte
  100. // Availability allows a user to control the current scheduling status of a node
  101. Availability api.NodeSpec_Availability
  102. // PluginGetter provides access to docker's plugin inventory.
  103. PluginGetter plugingetter.PluginGetter
  104. }
  105. // Node implements the primary node functionality for a member of a swarm
  106. // cluster. Node handles workloads and may also run as a manager.
  107. type Node struct {
  108. sync.RWMutex
  109. config *Config
  110. remotes *persistentRemotes
  111. connBroker *connectionbroker.Broker
  112. role string
  113. roleCond *sync.Cond
  114. conn *grpc.ClientConn
  115. connCond *sync.Cond
  116. nodeID string
  117. started chan struct{}
  118. startOnce sync.Once
  119. stopped chan struct{}
  120. stopOnce sync.Once
  121. ready chan struct{} // closed when agent has completed registration and manager(if enabled) is ready to receive control requests
  122. closed chan struct{}
  123. err error
  124. agent *agent.Agent
  125. manager *manager.Manager
  126. notifyNodeChange chan *agent.NodeChanges // used by the agent to relay node updates from the dispatcher Session stream to (*Node).run
  127. unlockKey []byte
  128. }
  129. type lastSeenRole struct {
  130. role api.NodeRole
  131. }
  132. // observe notes the latest value of this node role, and returns true if it
  133. // is the first seen value, or is different from the most recently seen value.
  134. func (l *lastSeenRole) observe(newRole api.NodeRole) bool {
  135. changed := l.role != newRole
  136. l.role = newRole
  137. return changed
  138. }
  139. // RemoteAPIAddr returns address on which remote manager api listens.
  140. // Returns nil if node is not manager.
  141. func (n *Node) RemoteAPIAddr() (string, error) {
  142. n.RLock()
  143. defer n.RUnlock()
  144. if n.manager == nil {
  145. return "", errors.New("manager is not running")
  146. }
  147. addr := n.manager.Addr()
  148. if addr == "" {
  149. return "", errors.New("manager addr is not set")
  150. }
  151. return addr, nil
  152. }
  153. // New returns new Node instance.
  154. func New(c *Config) (*Node, error) {
  155. if err := os.MkdirAll(c.StateDir, 0700); err != nil {
  156. return nil, err
  157. }
  158. stateFile := filepath.Join(c.StateDir, stateFilename)
  159. dt, err := ioutil.ReadFile(stateFile)
  160. var p []api.Peer
  161. if err != nil && !os.IsNotExist(err) {
  162. return nil, err
  163. }
  164. if err == nil {
  165. if err := json.Unmarshal(dt, &p); err != nil {
  166. return nil, err
  167. }
  168. }
  169. n := &Node{
  170. remotes: newPersistentRemotes(stateFile, p...),
  171. role: ca.WorkerRole,
  172. config: c,
  173. started: make(chan struct{}),
  174. stopped: make(chan struct{}),
  175. closed: make(chan struct{}),
  176. ready: make(chan struct{}),
  177. notifyNodeChange: make(chan *agent.NodeChanges, 1),
  178. unlockKey: c.UnlockKey,
  179. }
  180. if n.config.JoinAddr != "" || n.config.ForceNewCluster {
  181. n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename))
  182. if n.config.JoinAddr != "" {
  183. n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, remotes.DefaultObservationWeight)
  184. }
  185. }
  186. n.connBroker = connectionbroker.New(n.remotes)
  187. n.roleCond = sync.NewCond(n.RLocker())
  188. n.connCond = sync.NewCond(n.RLocker())
  189. return n, nil
  190. }
  191. // BindRemote starts a listener that exposes the remote API.
  192. func (n *Node) BindRemote(ctx context.Context, listenAddr string, advertiseAddr string) error {
  193. n.RLock()
  194. defer n.RUnlock()
  195. if n.manager == nil {
  196. return errors.New("manager is not running")
  197. }
  198. return n.manager.BindRemote(ctx, manager.RemoteAddrs{
  199. ListenAddr: listenAddr,
  200. AdvertiseAddr: advertiseAddr,
  201. })
  202. }
  203. // Start starts a node instance.
  204. func (n *Node) Start(ctx context.Context) error {
  205. err := errNodeStarted
  206. n.startOnce.Do(func() {
  207. close(n.started)
  208. go n.run(ctx)
  209. err = nil // clear error above, only once.
  210. })
  211. return err
  212. }
  213. func (n *Node) currentRole() api.NodeRole {
  214. n.Lock()
  215. currentRole := api.NodeRoleWorker
  216. if n.role == ca.ManagerRole {
  217. currentRole = api.NodeRoleManager
  218. }
  219. n.Unlock()
  220. return currentRole
  221. }
  222. func (n *Node) run(ctx context.Context) (err error) {
  223. defer func() {
  224. n.err = err
  225. close(n.closed)
  226. }()
  227. ctx, cancel := context.WithCancel(ctx)
  228. defer cancel()
  229. ctx = log.WithModule(ctx, "node")
  230. go func(ctx context.Context) {
  231. select {
  232. case <-ctx.Done():
  233. case <-n.stopped:
  234. cancel()
  235. }
  236. }(ctx)
  237. paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory))
  238. securityConfig, err := n.loadSecurityConfig(ctx, paths)
  239. if err != nil {
  240. return err
  241. }
  242. renewer := ca.NewTLSRenewer(securityConfig, n.connBroker, paths.RootCA)
  243. ctx = log.WithLogger(ctx, log.G(ctx).WithField("node.id", n.NodeID()))
  244. taskDBPath := filepath.Join(n.config.StateDir, "worker", "tasks.db")
  245. if err := os.MkdirAll(filepath.Dir(taskDBPath), 0777); err != nil {
  246. return err
  247. }
  248. db, err := bolt.Open(taskDBPath, 0666, nil)
  249. if err != nil {
  250. return err
  251. }
  252. defer db.Close()
  253. agentDone := make(chan struct{})
  254. go func() {
  255. // lastNodeDesiredRole is the last-seen value of Node.Spec.DesiredRole,
  256. // used to make role changes "edge triggered" and avoid renewal loops.
  257. lastNodeDesiredRole := lastSeenRole{role: n.currentRole()}
  258. for {
  259. select {
  260. case <-agentDone:
  261. return
  262. case nodeChanges := <-n.notifyNodeChange:
  263. currentRole := n.currentRole()
  264. if nodeChanges.Node != nil {
  265. // This is a bit complex to be backward compatible with older CAs that
  266. // don't support the Node.Role field. They only use what's presently
  267. // called DesiredRole.
  268. // 1) If DesiredRole changes, kick off a certificate renewal. The renewal
  269. // is delayed slightly to give Role time to change as well if this is
  270. // a newer CA. If the certificate we get back doesn't have the expected
  271. // role, we continue renewing with exponential backoff.
  272. // 2) If the server is sending us IssuanceStateRotate, renew the cert as
  273. // requested by the CA.
  274. desiredRoleChanged := lastNodeDesiredRole.observe(nodeChanges.Node.Spec.DesiredRole)
  275. if desiredRoleChanged {
  276. switch nodeChanges.Node.Spec.DesiredRole {
  277. case api.NodeRoleManager:
  278. renewer.SetExpectedRole(ca.ManagerRole)
  279. case api.NodeRoleWorker:
  280. renewer.SetExpectedRole(ca.WorkerRole)
  281. }
  282. }
  283. if desiredRoleChanged || nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate {
  284. renewer.Renew()
  285. }
  286. }
  287. if nodeChanges.RootCert != nil {
  288. // We only want to update the root CA if this is a worker node. Manager nodes directly watch the raft
  289. // store and update the root CA, with the necessary signer, from the raft store (since the managers
  290. // need the CA key as well to potentially issue new TLS certificates).
  291. if currentRole == api.NodeRoleManager || bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) {
  292. continue
  293. }
  294. newRootCA, err := ca.NewRootCA(nodeChanges.RootCert, nil, nil, ca.DefaultNodeCertExpiration, nil)
  295. if err != nil {
  296. log.G(ctx).WithError(err).Error("invalid new root certificate from the dispatcher")
  297. continue
  298. }
  299. if err := securityConfig.UpdateRootCA(&newRootCA, newRootCA.Pool); err != nil {
  300. log.G(ctx).WithError(err).Error("could not use new root CA from dispatcher")
  301. continue
  302. }
  303. if err := ca.SaveRootCA(newRootCA, paths.RootCA); err != nil {
  304. log.G(ctx).WithError(err).Error("could not save new root certificate from the dispatcher")
  305. continue
  306. }
  307. }
  308. }
  309. }
  310. }()
  311. var wg sync.WaitGroup
  312. wg.Add(3)
  313. nodeInfo.WithValues(
  314. securityConfig.ClientTLSCreds.Organization(),
  315. securityConfig.ClientTLSCreds.NodeID(),
  316. ).Set(1)
  317. if n.currentRole() == api.NodeRoleManager {
  318. nodeManager.Set(1)
  319. } else {
  320. nodeManager.Set(0)
  321. }
  322. updates := renewer.Start(ctx)
  323. go func() {
  324. for certUpdate := range updates {
  325. if certUpdate.Err != nil {
  326. logrus.Warnf("error renewing TLS certificate: %v", certUpdate.Err)
  327. continue
  328. }
  329. n.Lock()
  330. n.role = certUpdate.Role
  331. n.roleCond.Broadcast()
  332. n.Unlock()
  333. // Export the new role.
  334. if n.currentRole() == api.NodeRoleManager {
  335. nodeManager.Set(1)
  336. } else {
  337. nodeManager.Set(0)
  338. }
  339. }
  340. wg.Done()
  341. }()
  342. role := n.role
  343. managerReady := make(chan struct{})
  344. agentReady := make(chan struct{})
  345. var managerErr error
  346. var agentErr error
  347. go func() {
  348. managerErr = n.superviseManager(ctx, securityConfig, paths.RootCA, managerReady, renewer) // store err and loop
  349. wg.Done()
  350. cancel()
  351. }()
  352. go func() {
  353. agentErr = n.runAgent(ctx, db, securityConfig, agentReady)
  354. wg.Done()
  355. cancel()
  356. close(agentDone)
  357. }()
  358. go func() {
  359. <-agentReady
  360. if role == ca.ManagerRole {
  361. workerRole := make(chan struct{})
  362. waitRoleCtx, waitRoleCancel := context.WithCancel(ctx)
  363. go func() {
  364. if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil {
  365. close(workerRole)
  366. }
  367. }()
  368. select {
  369. case <-managerReady:
  370. case <-workerRole:
  371. }
  372. waitRoleCancel()
  373. }
  374. close(n.ready)
  375. }()
  376. wg.Wait()
  377. if managerErr != nil && errors.Cause(managerErr) != context.Canceled {
  378. return managerErr
  379. }
  380. if agentErr != nil && errors.Cause(agentErr) != context.Canceled {
  381. return agentErr
  382. }
  383. return err
  384. }
  385. // Stop stops node execution
  386. func (n *Node) Stop(ctx context.Context) error {
  387. select {
  388. case <-n.started:
  389. default:
  390. return errNodeNotStarted
  391. }
  392. // ask agent to clean up assignments
  393. n.Lock()
  394. if n.agent != nil {
  395. if err := n.agent.Leave(ctx); err != nil {
  396. log.G(ctx).WithError(err).Error("agent failed to clean up assignments")
  397. }
  398. }
  399. n.Unlock()
  400. n.stopOnce.Do(func() {
  401. close(n.stopped)
  402. })
  403. select {
  404. case <-n.closed:
  405. return nil
  406. case <-ctx.Done():
  407. return ctx.Err()
  408. }
  409. }
  410. // Err returns the error that caused the node to shutdown or nil. Err blocks
  411. // until the node has fully shut down.
  412. func (n *Node) Err(ctx context.Context) error {
  413. select {
  414. case <-n.closed:
  415. return n.err
  416. case <-ctx.Done():
  417. return ctx.Err()
  418. }
  419. }
  420. func (n *Node) runAgent(ctx context.Context, db *bolt.DB, securityConfig *ca.SecurityConfig, ready chan<- struct{}) error {
  421. waitCtx, waitCancel := context.WithCancel(ctx)
  422. remotesCh := n.remotes.WaitSelect(ctx)
  423. controlCh := n.ListenControlSocket(waitCtx)
  424. waitPeer:
  425. for {
  426. select {
  427. case <-ctx.Done():
  428. break waitPeer
  429. case <-remotesCh:
  430. break waitPeer
  431. case conn := <-controlCh:
  432. if conn != nil {
  433. break waitPeer
  434. }
  435. }
  436. }
  437. waitCancel()
  438. select {
  439. case <-ctx.Done():
  440. return ctx.Err()
  441. default:
  442. }
  443. secChangeQueue := watch.NewQueue()
  444. defer secChangeQueue.Close()
  445. secChangesCh, secChangesCancel := secChangeQueue.Watch()
  446. defer secChangesCancel()
  447. securityConfig.SetWatch(secChangeQueue)
  448. rootCA := securityConfig.RootCA()
  449. issuer := securityConfig.IssuerInfo()
  450. agentConfig := &agent.Config{
  451. Hostname: n.config.Hostname,
  452. ConnBroker: n.connBroker,
  453. Executor: n.config.Executor,
  454. DB: db,
  455. NotifyNodeChange: n.notifyNodeChange,
  456. NotifyTLSChange: secChangesCh,
  457. Credentials: securityConfig.ClientTLSCreds,
  458. NodeTLSInfo: &api.NodeTLSInfo{
  459. TrustRoot: rootCA.Certs,
  460. CertIssuerPublicKey: issuer.PublicKey,
  461. CertIssuerSubject: issuer.Subject,
  462. },
  463. }
  464. // if a join address has been specified, then if the agent fails to connect due to a TLS error, fail fast - don't
  465. // keep re-trying to join
  466. if n.config.JoinAddr != "" {
  467. agentConfig.SessionTracker = &firstSessionErrorTracker{}
  468. }
  469. a, err := agent.New(agentConfig)
  470. if err != nil {
  471. return err
  472. }
  473. if err := a.Start(ctx); err != nil {
  474. return err
  475. }
  476. n.Lock()
  477. n.agent = a
  478. n.Unlock()
  479. defer func() {
  480. n.Lock()
  481. n.agent = nil
  482. n.Unlock()
  483. }()
  484. go func() {
  485. <-a.Ready()
  486. close(ready)
  487. }()
  488. // todo: manually call stop on context cancellation?
  489. return a.Err(context.Background())
  490. }
  491. // Ready returns a channel that is closed after node's initialization has
  492. // completes for the first time.
  493. func (n *Node) Ready() <-chan struct{} {
  494. return n.ready
  495. }
  496. func (n *Node) setControlSocket(conn *grpc.ClientConn) {
  497. n.Lock()
  498. if n.conn != nil {
  499. n.conn.Close()
  500. }
  501. n.conn = conn
  502. n.connBroker.SetLocalConn(conn)
  503. n.connCond.Broadcast()
  504. n.Unlock()
  505. }
  506. // ListenControlSocket listens changes of a connection for managing the
  507. // cluster control api
  508. func (n *Node) ListenControlSocket(ctx context.Context) <-chan *grpc.ClientConn {
  509. c := make(chan *grpc.ClientConn, 1)
  510. n.RLock()
  511. conn := n.conn
  512. c <- conn
  513. done := make(chan struct{})
  514. go func() {
  515. select {
  516. case <-ctx.Done():
  517. n.connCond.Broadcast()
  518. case <-done:
  519. }
  520. }()
  521. go func() {
  522. defer close(c)
  523. defer close(done)
  524. defer n.RUnlock()
  525. for {
  526. select {
  527. case <-ctx.Done():
  528. return
  529. default:
  530. }
  531. if conn == n.conn {
  532. n.connCond.Wait()
  533. continue
  534. }
  535. conn = n.conn
  536. select {
  537. case c <- conn:
  538. case <-ctx.Done():
  539. return
  540. }
  541. }
  542. }()
  543. return c
  544. }
  545. // NodeID returns current node's ID. May be empty if not set.
  546. func (n *Node) NodeID() string {
  547. n.RLock()
  548. defer n.RUnlock()
  549. return n.nodeID
  550. }
  551. // Manager returns manager instance started by node. May be nil.
  552. func (n *Node) Manager() *manager.Manager {
  553. n.RLock()
  554. defer n.RUnlock()
  555. return n.manager
  556. }
  557. // Agent returns agent instance started by node. May be nil.
  558. func (n *Node) Agent() *agent.Agent {
  559. n.RLock()
  560. defer n.RUnlock()
  561. return n.agent
  562. }
  563. // IsStateDirty returns true if any objects have been added to raft which make
  564. // the state "dirty". Currently, the existence of any object other than the
  565. // default cluster or the local node implies a dirty state.
  566. func (n *Node) IsStateDirty() (bool, error) {
  567. n.RLock()
  568. defer n.RUnlock()
  569. if n.manager == nil {
  570. return false, errors.New("node is not a manager")
  571. }
  572. return n.manager.IsStateDirty()
  573. }
  574. // Remotes returns a list of known peers known to node.
  575. func (n *Node) Remotes() []api.Peer {
  576. weights := n.remotes.Weights()
  577. remotes := make([]api.Peer, 0, len(weights))
  578. for p := range weights {
  579. remotes = append(remotes, p)
  580. }
  581. return remotes
  582. }
  583. func (n *Node) loadSecurityConfig(ctx context.Context, paths *ca.SecurityConfigPaths) (*ca.SecurityConfig, error) {
  584. var securityConfig *ca.SecurityConfig
  585. krw := ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
  586. if err := krw.Migrate(); err != nil {
  587. return nil, err
  588. }
  589. // Check if we already have a valid certificates on disk.
  590. rootCA, err := ca.GetLocalRootCA(paths.RootCA)
  591. if err != nil && err != ca.ErrNoLocalRootCA {
  592. return nil, err
  593. }
  594. if err == nil {
  595. // if forcing a new cluster, we allow the certificates to be expired - a new set will be generated
  596. securityConfig, err = ca.LoadSecurityConfig(ctx, rootCA, krw, n.config.ForceNewCluster)
  597. if err != nil {
  598. _, isInvalidKEK := errors.Cause(err).(ca.ErrInvalidKEK)
  599. if isInvalidKEK {
  600. return nil, ErrInvalidUnlockKey
  601. } else if !os.IsNotExist(err) {
  602. return nil, errors.Wrapf(err, "error while loading TLS certificate in %s", paths.Node.Cert)
  603. }
  604. }
  605. }
  606. if securityConfig == nil {
  607. if n.config.JoinAddr == "" {
  608. // if we're not joining a cluster, bootstrap a new one - and we have to set the unlock key
  609. n.unlockKey = nil
  610. if n.config.AutoLockManagers {
  611. n.unlockKey = encryption.GenerateSecretKey()
  612. }
  613. krw = ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
  614. rootCA, err = ca.CreateRootCA(ca.DefaultRootCN)
  615. if err != nil {
  616. return nil, err
  617. }
  618. if err := ca.SaveRootCA(rootCA, paths.RootCA); err != nil {
  619. return nil, err
  620. }
  621. log.G(ctx).Debug("generated CA key and certificate")
  622. } else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk
  623. rootCA, err = ca.DownloadRootCA(ctx, paths.RootCA, n.config.JoinToken, n.connBroker)
  624. if err != nil {
  625. return nil, err
  626. }
  627. log.G(ctx).Debug("downloaded CA certificate")
  628. }
  629. // Obtain new certs and setup TLS certificates renewal for this node:
  630. // - If certificates weren't present on disk, we call CreateSecurityConfig, which blocks
  631. // until a valid certificate has been issued.
  632. // - We wait for CreateSecurityConfig to finish since we need a certificate to operate.
  633. // Attempt to load certificate from disk
  634. securityConfig, err = ca.LoadSecurityConfig(ctx, rootCA, krw, n.config.ForceNewCluster)
  635. if err == nil {
  636. log.G(ctx).WithFields(logrus.Fields{
  637. "node.id": securityConfig.ClientTLSCreds.NodeID(),
  638. }).Debugf("loaded TLS certificate")
  639. } else {
  640. if _, ok := errors.Cause(err).(ca.ErrInvalidKEK); ok {
  641. return nil, ErrInvalidUnlockKey
  642. }
  643. log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", krw.Target())
  644. securityConfig, err = rootCA.CreateSecurityConfig(ctx, krw, ca.CertificateRequestConfig{
  645. Token: n.config.JoinToken,
  646. Availability: n.config.Availability,
  647. ConnBroker: n.connBroker,
  648. })
  649. if err != nil {
  650. return nil, err
  651. }
  652. }
  653. }
  654. n.Lock()
  655. n.role = securityConfig.ClientTLSCreds.Role()
  656. n.nodeID = securityConfig.ClientTLSCreds.NodeID()
  657. n.roleCond.Broadcast()
  658. n.Unlock()
  659. return securityConfig, nil
  660. }
  661. func (n *Node) initManagerConnection(ctx context.Context, ready chan<- struct{}) error {
  662. opts := []grpc.DialOption{
  663. grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
  664. grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor),
  665. }
  666. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
  667. opts = append(opts, grpc.WithTransportCredentials(insecureCreds))
  668. addr := n.config.ListenControlAPI
  669. opts = append(opts, grpc.WithDialer(
  670. func(addr string, timeout time.Duration) (net.Conn, error) {
  671. return xnet.DialTimeoutLocal(addr, timeout)
  672. }))
  673. conn, err := grpc.Dial(addr, opts...)
  674. if err != nil {
  675. return err
  676. }
  677. client := api.NewHealthClient(conn)
  678. for {
  679. resp, err := client.Check(ctx, &api.HealthCheckRequest{Service: "ControlAPI"})
  680. if err != nil {
  681. return err
  682. }
  683. if resp.Status == api.HealthCheckResponse_SERVING {
  684. break
  685. }
  686. time.Sleep(500 * time.Millisecond)
  687. }
  688. n.setControlSocket(conn)
  689. if ready != nil {
  690. close(ready)
  691. }
  692. return nil
  693. }
  694. func (n *Node) waitRole(ctx context.Context, role string) error {
  695. n.roleCond.L.Lock()
  696. if role == n.role {
  697. n.roleCond.L.Unlock()
  698. return nil
  699. }
  700. finishCh := make(chan struct{})
  701. defer close(finishCh)
  702. go func() {
  703. select {
  704. case <-finishCh:
  705. case <-ctx.Done():
  706. // call broadcast to shutdown this function
  707. n.roleCond.Broadcast()
  708. }
  709. }()
  710. defer n.roleCond.L.Unlock()
  711. for role != n.role {
  712. n.roleCond.Wait()
  713. select {
  714. case <-ctx.Done():
  715. return ctx.Err()
  716. default:
  717. }
  718. }
  719. return nil
  720. }
  721. func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, workerRole <-chan struct{}) (bool, error) {
  722. var remoteAPI *manager.RemoteAddrs
  723. if n.config.ListenRemoteAPI != "" {
  724. remoteAPI = &manager.RemoteAddrs{
  725. ListenAddr: n.config.ListenRemoteAPI,
  726. AdvertiseAddr: n.config.AdvertiseRemoteAPI,
  727. }
  728. }
  729. remoteAddr, _ := n.remotes.Select(n.NodeID())
  730. m, err := manager.New(&manager.Config{
  731. ForceNewCluster: n.config.ForceNewCluster,
  732. RemoteAPI: remoteAPI,
  733. ControlAPI: n.config.ListenControlAPI,
  734. SecurityConfig: securityConfig,
  735. ExternalCAs: n.config.ExternalCAs,
  736. JoinRaft: remoteAddr.Addr,
  737. StateDir: n.config.StateDir,
  738. HeartbeatTick: n.config.HeartbeatTick,
  739. ElectionTick: n.config.ElectionTick,
  740. AutoLockManagers: n.config.AutoLockManagers,
  741. UnlockKey: n.unlockKey,
  742. Availability: n.config.Availability,
  743. PluginGetter: n.config.PluginGetter,
  744. RootCAPaths: rootPaths,
  745. })
  746. if err != nil {
  747. return false, err
  748. }
  749. done := make(chan struct{})
  750. var runErr error
  751. go func(logger *logrus.Entry) {
  752. if err := m.Run(log.WithLogger(context.Background(), logger)); err != nil {
  753. runErr = err
  754. }
  755. close(done)
  756. }(log.G(ctx))
  757. var clearData bool
  758. defer func() {
  759. n.Lock()
  760. n.manager = nil
  761. n.Unlock()
  762. m.Stop(ctx, clearData)
  763. <-done
  764. n.setControlSocket(nil)
  765. }()
  766. n.Lock()
  767. n.manager = m
  768. n.Unlock()
  769. connCtx, connCancel := context.WithCancel(ctx)
  770. defer connCancel()
  771. go n.initManagerConnection(connCtx, ready)
  772. // wait for manager stop or for role change
  773. select {
  774. case <-done:
  775. return false, runErr
  776. case <-workerRole:
  777. log.G(ctx).Info("role changed to worker, stopping manager")
  778. clearData = true
  779. case <-m.RemovedFromRaft():
  780. log.G(ctx).Info("manager removed from raft cluster, stopping manager")
  781. clearData = true
  782. case <-ctx.Done():
  783. return false, ctx.Err()
  784. }
  785. return clearData, nil
  786. }
  787. func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, renewer *ca.TLSRenewer) error {
  788. for {
  789. if err := n.waitRole(ctx, ca.ManagerRole); err != nil {
  790. return err
  791. }
  792. workerRole := make(chan struct{})
  793. waitRoleCtx, waitRoleCancel := context.WithCancel(ctx)
  794. go func() {
  795. if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil {
  796. close(workerRole)
  797. }
  798. }()
  799. wasRemoved, err := n.runManager(ctx, securityConfig, rootPaths, ready, workerRole)
  800. if err != nil {
  801. waitRoleCancel()
  802. return errors.Wrap(err, "manager stopped")
  803. }
  804. // If the manager stopped running and our role is still
  805. // "manager", it's possible that the manager was demoted and
  806. // the agent hasn't realized this yet. We should wait for the
  807. // role to change instead of restarting the manager immediately.
  808. err = func() error {
  809. timer := time.NewTimer(roleChangeTimeout)
  810. defer timer.Stop()
  811. defer waitRoleCancel()
  812. select {
  813. case <-timer.C:
  814. case <-workerRole:
  815. return nil
  816. case <-ctx.Done():
  817. return ctx.Err()
  818. }
  819. if !wasRemoved {
  820. log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
  821. return nil
  822. }
  823. // We need to be extra careful about restarting the
  824. // manager. It may cause the node to wrongly join under
  825. // a new Raft ID. Since we didn't see a role change
  826. // yet, force a certificate renewal. If the certificate
  827. // comes back with a worker role, we know we shouldn't
  828. // restart the manager. However, if we don't see
  829. // workerRole get closed, it means we didn't switch to
  830. // a worker certificate, either because we couldn't
  831. // contact a working CA, or because we've been
  832. // re-promoted. In this case, we must assume we were
  833. // re-promoted, and restart the manager.
  834. log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal")
  835. timer.Reset(roleChangeTimeout)
  836. renewer.Renew()
  837. // Now that the renewal request has been sent to the
  838. // renewal goroutine, wait for a change in role.
  839. select {
  840. case <-timer.C:
  841. log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
  842. case <-workerRole:
  843. case <-ctx.Done():
  844. return ctx.Err()
  845. }
  846. return nil
  847. }()
  848. if err != nil {
  849. return err
  850. }
  851. ready = nil
  852. }
  853. }
  854. type persistentRemotes struct {
  855. sync.RWMutex
  856. c *sync.Cond
  857. remotes.Remotes
  858. storePath string
  859. lastSavedState []api.Peer
  860. }
  861. func newPersistentRemotes(f string, peers ...api.Peer) *persistentRemotes {
  862. pr := &persistentRemotes{
  863. storePath: f,
  864. Remotes: remotes.NewRemotes(peers...),
  865. }
  866. pr.c = sync.NewCond(pr.RLocker())
  867. return pr
  868. }
  869. func (s *persistentRemotes) Observe(peer api.Peer, weight int) {
  870. s.Lock()
  871. defer s.Unlock()
  872. s.Remotes.Observe(peer, weight)
  873. s.c.Broadcast()
  874. if err := s.save(); err != nil {
  875. logrus.Errorf("error writing cluster state file: %v", err)
  876. return
  877. }
  878. return
  879. }
  880. func (s *persistentRemotes) Remove(peers ...api.Peer) {
  881. s.Lock()
  882. defer s.Unlock()
  883. s.Remotes.Remove(peers...)
  884. if err := s.save(); err != nil {
  885. logrus.Errorf("error writing cluster state file: %v", err)
  886. return
  887. }
  888. return
  889. }
  890. func (s *persistentRemotes) save() error {
  891. weights := s.Weights()
  892. remotes := make([]api.Peer, 0, len(weights))
  893. for r := range weights {
  894. remotes = append(remotes, r)
  895. }
  896. sort.Sort(sortablePeers(remotes))
  897. if reflect.DeepEqual(remotes, s.lastSavedState) {
  898. return nil
  899. }
  900. dt, err := json.Marshal(remotes)
  901. if err != nil {
  902. return err
  903. }
  904. s.lastSavedState = remotes
  905. return ioutils.AtomicWriteFile(s.storePath, dt, 0600)
  906. }
  907. // WaitSelect waits until at least one remote becomes available and then selects one.
  908. func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer {
  909. c := make(chan api.Peer, 1)
  910. s.RLock()
  911. done := make(chan struct{})
  912. go func() {
  913. select {
  914. case <-ctx.Done():
  915. s.c.Broadcast()
  916. case <-done:
  917. }
  918. }()
  919. go func() {
  920. defer s.RUnlock()
  921. defer close(c)
  922. defer close(done)
  923. for {
  924. if ctx.Err() != nil {
  925. return
  926. }
  927. p, err := s.Select()
  928. if err == nil {
  929. c <- p
  930. return
  931. }
  932. s.c.Wait()
  933. }
  934. }()
  935. return c
  936. }
  937. // sortablePeers is a sort wrapper for []api.Peer
  938. type sortablePeers []api.Peer
  939. func (sp sortablePeers) Less(i, j int) bool { return sp[i].NodeID < sp[j].NodeID }
  940. func (sp sortablePeers) Len() int { return len(sp) }
  941. func (sp sortablePeers) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
  942. // firstSessionErrorTracker is a utility that helps determine whether the agent should exit after
  943. // a TLS failure on establishing the first session. This should only happen if a join address
  944. // is specified. If establishing the first session succeeds, but later on some session fails
  945. // because of a TLS error, we don't want to exit the agent because a previously successful
  946. // session indicates that the TLS error may be a transient issue.
  947. type firstSessionErrorTracker struct {
  948. mu sync.Mutex
  949. pastFirstSession bool
  950. err error
  951. }
  952. func (fs *firstSessionErrorTracker) SessionEstablished() {
  953. fs.mu.Lock()
  954. fs.pastFirstSession = true
  955. fs.mu.Unlock()
  956. }
  957. func (fs *firstSessionErrorTracker) SessionError(err error) {
  958. fs.mu.Lock()
  959. fs.err = err
  960. fs.mu.Unlock()
  961. }
  962. func (fs *firstSessionErrorTracker) SessionClosed() error {
  963. fs.mu.Lock()
  964. defer fs.mu.Unlock()
  965. // unfortunately grpc connection errors are type grpc.rpcError, which are not exposed, and we can't get at the underlying error type
  966. if !fs.pastFirstSession && grpc.Code(fs.err) == codes.Internal &&
  967. strings.HasPrefix(grpc.ErrorDesc(fs.err), "connection error") && strings.Contains(grpc.ErrorDesc(fs.err), "transport: x509:") {
  968. return fs.err
  969. }
  970. return nil
  971. }