controller.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. package container // import "github.com/docker/docker/daemon/cluster/executor/container"
  2. import (
  3. "context"
  4. "fmt"
  5. "os"
  6. "strconv"
  7. "strings"
  8. "time"
  9. "github.com/docker/docker/api/types"
  10. "github.com/docker/docker/api/types/events"
  11. executorpkg "github.com/docker/docker/daemon/cluster/executor"
  12. "github.com/docker/docker/libnetwork"
  13. "github.com/docker/go-connections/nat"
  14. gogotypes "github.com/gogo/protobuf/types"
  15. "github.com/moby/swarmkit/v2/agent/exec"
  16. "github.com/moby/swarmkit/v2/api"
  17. "github.com/moby/swarmkit/v2/log"
  18. "github.com/pkg/errors"
  19. "golang.org/x/time/rate"
  20. )
  21. const defaultGossipConvergeDelay = 2 * time.Second
  22. // waitNodeAttachmentsTimeout defines the total period of time we should wait
  23. // for node attachments to be ready before giving up on starting a task
  24. const waitNodeAttachmentsTimeout = 30 * time.Second
  25. // controller implements agent.Controller against docker's API.
  26. //
  27. // Most operations against docker's API are done through the container name,
  28. // which is unique to the task.
  29. type controller struct {
  30. task *api.Task
  31. adapter *containerAdapter
  32. closed chan struct{}
  33. err error
  34. pulled chan struct{} // closed after pull
  35. cancelPull func() // cancels pull context if not nil
  36. pullErr error // pull error, only read after pulled closed
  37. }
  38. var _ exec.Controller = &controller{}
  39. // NewController returns a docker exec runner for the provided task.
  40. func newController(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
  41. adapter, err := newContainerAdapter(b, i, v, task, node, dependencies)
  42. if err != nil {
  43. return nil, err
  44. }
  45. return &controller{
  46. task: task,
  47. adapter: adapter,
  48. closed: make(chan struct{}),
  49. }, nil
  50. }
  51. func (r *controller) Task() (*api.Task, error) {
  52. return r.task, nil
  53. }
  54. // ContainerStatus returns the container-specific status for the task.
  55. func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
  56. ctnr, err := r.adapter.inspect(ctx)
  57. if err != nil {
  58. if isUnknownContainer(err) {
  59. return nil, nil
  60. }
  61. return nil, err
  62. }
  63. return parseContainerStatus(ctnr)
  64. }
  65. func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) {
  66. ctnr, err := r.adapter.inspect(ctx)
  67. if err != nil {
  68. if isUnknownContainer(err) {
  69. return nil, nil
  70. }
  71. return nil, err
  72. }
  73. return parsePortStatus(ctnr)
  74. }
  75. // Update tasks a recent task update and applies it to the container.
  76. func (r *controller) Update(ctx context.Context, t *api.Task) error {
  77. // TODO(stevvooe): While assignment of tasks is idempotent, we do allow
  78. // updates of metadata, such as labelling, as well as any other properties
  79. // that make sense.
  80. return nil
  81. }
  82. // Prepare creates a container and ensures the image is pulled.
  83. //
  84. // If the container has already be created, exec.ErrTaskPrepared is returned.
  85. func (r *controller) Prepare(ctx context.Context) error {
  86. if err := r.checkClosed(); err != nil {
  87. return err
  88. }
  89. // Before we create networks, we need to make sure that the node has all of
  90. // the network attachments that the task needs. This will block until that
  91. // is the case or the context has expired.
  92. // NOTE(dperny): Prepare doesn't time out on its own (that is, the context
  93. // passed in does not expire after any period of time), which means if the
  94. // node attachment never arrives (for example, if the network's IP address
  95. // space is exhausted), then the tasks on the node will park in PREPARING
  96. // forever (or until the node dies). To avoid this case, we create a new
  97. // context with a fixed deadline, and give up. In normal operation, a node
  98. // update with the node IP address should come in hot on the tail of the
  99. // task being assigned to the node, and this should exit on the order of
  100. // milliseconds, but to be extra conservative we'll give it 30 seconds to
  101. // time out before giving up.
  102. waitNodeAttachmentsContext, waitCancel := context.WithTimeout(ctx, waitNodeAttachmentsTimeout)
  103. defer waitCancel()
  104. if err := r.adapter.waitNodeAttachments(waitNodeAttachmentsContext); err != nil {
  105. return err
  106. }
  107. // could take a while for the cluster volumes to become available. set for
  108. // 5 minutes, I guess?
  109. // TODO(dperny): do this more intelligently. return a better error.
  110. waitClusterVolumesCtx, wcvcancel := context.WithTimeout(ctx, 5*time.Minute)
  111. defer wcvcancel()
  112. if err := r.adapter.waitClusterVolumes(waitClusterVolumesCtx); err != nil {
  113. return err
  114. }
  115. // Make sure all the networks that the task needs are created.
  116. if err := r.adapter.createNetworks(ctx); err != nil {
  117. return err
  118. }
  119. // Make sure all the volumes that the task needs are created.
  120. if err := r.adapter.createVolumes(ctx); err != nil {
  121. return err
  122. }
  123. if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
  124. if r.pulled == nil {
  125. // Fork the pull to a different context to allow pull to continue
  126. // on re-entrant calls to Prepare. This ensures that Prepare can be
  127. // idempotent and not incur the extra cost of pulling when
  128. // cancelled on updates.
  129. var pctx context.Context
  130. r.pulled = make(chan struct{})
  131. pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller.
  132. go func() {
  133. defer close(r.pulled)
  134. r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled
  135. }()
  136. }
  137. select {
  138. case <-ctx.Done():
  139. return ctx.Err()
  140. case <-r.pulled:
  141. if r.pullErr != nil {
  142. // NOTE(stevvooe): We always try to pull the image to make sure we have
  143. // the most up to date version. This will return an error, but we only
  144. // log it. If the image truly doesn't exist, the create below will
  145. // error out.
  146. //
  147. // This gives us some nice behavior where we use up to date versions of
  148. // mutable tags, but will still run if the old image is available but a
  149. // registry is down.
  150. //
  151. // If you don't want this behavior, lock down your image to an
  152. // immutable tag or digest.
  153. log.G(ctx).WithError(r.pullErr).Error("pulling image failed")
  154. }
  155. }
  156. }
  157. if err := r.adapter.create(ctx); err != nil {
  158. if isContainerCreateNameConflict(err) {
  159. if _, err := r.adapter.inspect(ctx); err != nil {
  160. return err
  161. }
  162. // container is already created. success!
  163. return exec.ErrTaskPrepared
  164. }
  165. return err
  166. }
  167. return nil
  168. }
  169. // Start the container. An error will be returned if the container is already started.
  170. func (r *controller) Start(ctx context.Context) error {
  171. if err := r.checkClosed(); err != nil {
  172. return err
  173. }
  174. ctnr, err := r.adapter.inspect(ctx)
  175. if err != nil {
  176. return err
  177. }
  178. // Detect whether the container has *ever* been started. If so, we don't
  179. // issue the start.
  180. //
  181. // TODO(stevvooe): This is very racy. While reading inspect, another could
  182. // start the process and we could end up starting it twice.
  183. if ctnr.State.Status != "created" {
  184. return exec.ErrTaskStarted
  185. }
  186. var lnErr libnetwork.ErrNoSuchNetwork
  187. for {
  188. if err := r.adapter.start(ctx); err != nil {
  189. if errors.As(err, &lnErr) {
  190. // Retry network creation again if we
  191. // failed because some of the networks
  192. // were not found.
  193. if err := r.adapter.createNetworks(ctx); err != nil {
  194. return err
  195. }
  196. continue
  197. }
  198. return errors.Wrap(err, "starting container failed")
  199. }
  200. break
  201. }
  202. // no health check
  203. if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" {
  204. if err := r.adapter.activateServiceBinding(); err != nil {
  205. log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name())
  206. return err
  207. }
  208. return nil
  209. }
  210. // wait for container to be healthy
  211. eventq := r.adapter.events(ctx)
  212. var healthErr error
  213. for {
  214. select {
  215. case event := <-eventq:
  216. if !r.matchevent(event) {
  217. continue
  218. }
  219. switch event.Action {
  220. case events.ActionDie: // exit on terminal events
  221. ctnr, err := r.adapter.inspect(ctx)
  222. if err != nil {
  223. return errors.Wrap(err, "die event received")
  224. } else if ctnr.State.ExitCode != 0 {
  225. return &exitError{code: ctnr.State.ExitCode, cause: healthErr}
  226. }
  227. return nil
  228. case events.ActionDestroy:
  229. // If we get here, something has gone wrong but we want to exit
  230. // and report anyways.
  231. return ErrContainerDestroyed
  232. case events.ActionHealthStatusUnhealthy:
  233. // in this case, we stop the container and report unhealthy status
  234. if err := r.Shutdown(ctx); err != nil {
  235. return errors.Wrap(err, "unhealthy container shutdown failed")
  236. }
  237. // set health check error, and wait for container to fully exit ("die" event)
  238. healthErr = ErrContainerUnhealthy
  239. case events.ActionHealthStatusHealthy:
  240. if err := r.adapter.activateServiceBinding(); err != nil {
  241. log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name())
  242. return err
  243. }
  244. return nil
  245. }
  246. case <-ctx.Done():
  247. return ctx.Err()
  248. case <-r.closed:
  249. return r.err
  250. }
  251. }
  252. }
  253. // Wait on the container to exit.
  254. func (r *controller) Wait(pctx context.Context) error {
  255. if err := r.checkClosed(); err != nil {
  256. return err
  257. }
  258. ctx, cancel := context.WithCancel(pctx)
  259. defer cancel()
  260. healthErr := make(chan error, 1)
  261. go func() {
  262. ectx, cancel := context.WithCancel(ctx) // cancel event context on first event
  263. defer cancel()
  264. if err := r.checkHealth(ectx); err == ErrContainerUnhealthy {
  265. healthErr <- ErrContainerUnhealthy
  266. if err := r.Shutdown(ectx); err != nil {
  267. log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy")
  268. }
  269. }
  270. }()
  271. waitC, err := r.adapter.wait(ctx)
  272. if err != nil {
  273. return err
  274. }
  275. if status := <-waitC; status.ExitCode() != 0 {
  276. exitErr := &exitError{
  277. code: status.ExitCode(),
  278. }
  279. // Set the cause if it is knowable.
  280. select {
  281. case e := <-healthErr:
  282. exitErr.cause = e
  283. default:
  284. if status.Err() != nil {
  285. exitErr.cause = status.Err()
  286. }
  287. }
  288. return exitErr
  289. }
  290. return nil
  291. }
  292. func (r *controller) hasServiceBinding() bool {
  293. if r.task == nil {
  294. return false
  295. }
  296. // service is attached to a network besides the default bridge
  297. for _, na := range r.task.Networks {
  298. if na.Network == nil ||
  299. na.Network.DriverState == nil ||
  300. na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" {
  301. continue
  302. }
  303. return true
  304. }
  305. return false
  306. }
  307. // Shutdown the container cleanly.
  308. func (r *controller) Shutdown(ctx context.Context) error {
  309. if err := r.checkClosed(); err != nil {
  310. return err
  311. }
  312. if r.cancelPull != nil {
  313. r.cancelPull()
  314. }
  315. if r.hasServiceBinding() {
  316. // remove container from service binding
  317. if err := r.adapter.deactivateServiceBinding(); err != nil {
  318. log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name())
  319. // Don't return an error here, because failure to deactivate
  320. // the service binding is expected if the container was never
  321. // started.
  322. }
  323. // add a delay for gossip converge
  324. // TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay.
  325. time.Sleep(defaultGossipConvergeDelay)
  326. }
  327. if err := r.adapter.shutdown(ctx); err != nil {
  328. if !(isUnknownContainer(err) || isStoppedContainer(err)) {
  329. return err
  330. }
  331. }
  332. // Try removing networks referenced in this task in case this
  333. // task is the last one referencing it
  334. if err := r.adapter.removeNetworks(ctx); err != nil {
  335. if !isUnknownContainer(err) {
  336. return err
  337. }
  338. }
  339. return nil
  340. }
  341. // Terminate the container, with force.
  342. func (r *controller) Terminate(ctx context.Context) error {
  343. if err := r.checkClosed(); err != nil {
  344. return err
  345. }
  346. if r.cancelPull != nil {
  347. r.cancelPull()
  348. }
  349. if err := r.adapter.terminate(ctx); err != nil {
  350. if isUnknownContainer(err) {
  351. return nil
  352. }
  353. return err
  354. }
  355. return nil
  356. }
  357. // Remove the container and its resources.
  358. func (r *controller) Remove(ctx context.Context) error {
  359. if err := r.checkClosed(); err != nil {
  360. return err
  361. }
  362. if r.cancelPull != nil {
  363. r.cancelPull()
  364. }
  365. // It may be necessary to shut down the task before removing it.
  366. if err := r.Shutdown(ctx); err != nil {
  367. if isUnknownContainer(err) {
  368. return nil
  369. }
  370. // This may fail if the task was already shut down.
  371. log.G(ctx).WithError(err).Debug("shutdown failed on removal")
  372. }
  373. if err := r.adapter.remove(ctx); err != nil {
  374. if isUnknownContainer(err) {
  375. return nil
  376. }
  377. return err
  378. }
  379. return nil
  380. }
  381. // waitReady waits for a container to be "ready".
  382. // Ready means it's past the started state.
  383. func (r *controller) waitReady(pctx context.Context) error {
  384. if err := r.checkClosed(); err != nil {
  385. return err
  386. }
  387. ctx, cancel := context.WithCancel(pctx)
  388. defer cancel()
  389. eventq := r.adapter.events(ctx)
  390. ctnr, err := r.adapter.inspect(ctx)
  391. if err != nil {
  392. if !isUnknownContainer(err) {
  393. return errors.Wrap(err, "inspect container failed")
  394. }
  395. } else {
  396. switch ctnr.State.Status {
  397. case "running", "exited", "dead":
  398. return nil
  399. }
  400. }
  401. for {
  402. select {
  403. case event := <-eventq:
  404. if !r.matchevent(event) {
  405. continue
  406. }
  407. switch event.Action {
  408. case "start":
  409. return nil
  410. }
  411. case <-ctx.Done():
  412. return ctx.Err()
  413. case <-r.closed:
  414. return r.err
  415. }
  416. }
  417. }
  418. func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error {
  419. if err := r.checkClosed(); err != nil {
  420. return err
  421. }
  422. // if we're following, wait for this container to be ready. there is a
  423. // problem here: if the container will never be ready (for example, it has
  424. // been totally deleted) then this will wait forever. however, this doesn't
  425. // actually cause any UI issues, and shouldn't be a problem. the stuck wait
  426. // will go away when the follow (context) is canceled.
  427. if options.Follow {
  428. if err := r.waitReady(ctx); err != nil {
  429. return errors.Wrap(err, "container not ready for logs")
  430. }
  431. }
  432. // if we're not following, we're not gonna wait for the container to be
  433. // ready. just call logs. if the container isn't ready, the call will fail
  434. // and return an error. no big deal, we don't care, we only want the logs
  435. // we can get RIGHT NOW with no follow
  436. logsContext, cancel := context.WithCancel(ctx)
  437. msgs, err := r.adapter.logs(logsContext, options)
  438. defer cancel()
  439. if err != nil {
  440. return errors.Wrap(err, "failed getting container logs")
  441. }
  442. var (
  443. // use a rate limiter to keep things under control but also provides some
  444. // ability coalesce messages.
  445. // this will implement a "token bucket" of size 10 MB, initially full and refilled
  446. // at rate 10 MB tokens per second.
  447. limiter = rate.NewLimiter(10<<20, 10<<20) // 10 MB/s
  448. msgctx = api.LogContext{
  449. NodeID: r.task.NodeID,
  450. ServiceID: r.task.ServiceID,
  451. TaskID: r.task.ID,
  452. }
  453. )
  454. for {
  455. msg, ok := <-msgs
  456. if !ok {
  457. // we're done here, no more messages
  458. return nil
  459. }
  460. if msg.Err != nil {
  461. // the deferred cancel closes the adapter's log stream
  462. return msg.Err
  463. }
  464. // wait here for the limiter to catch up
  465. if err := limiter.WaitN(ctx, len(msg.Line)); err != nil {
  466. return errors.Wrap(err, "failed rate limiter")
  467. }
  468. tsp, err := gogotypes.TimestampProto(msg.Timestamp)
  469. if err != nil {
  470. return errors.Wrap(err, "failed to convert timestamp")
  471. }
  472. var stream api.LogStream
  473. if msg.Source == "stdout" {
  474. stream = api.LogStreamStdout
  475. } else if msg.Source == "stderr" {
  476. stream = api.LogStreamStderr
  477. }
  478. // parse the details out of the Attrs map
  479. var attrs []api.LogAttr
  480. if len(msg.Attrs) != 0 {
  481. attrs = make([]api.LogAttr, 0, len(msg.Attrs))
  482. for _, attr := range msg.Attrs {
  483. attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value})
  484. }
  485. }
  486. if err := publisher.Publish(ctx, api.LogMessage{
  487. Context: msgctx,
  488. Timestamp: tsp,
  489. Stream: stream,
  490. Attrs: attrs,
  491. Data: msg.Line,
  492. }); err != nil {
  493. return errors.Wrap(err, "failed to publish log message")
  494. }
  495. }
  496. }
  497. // Close the runner and clean up any ephemeral resources.
  498. func (r *controller) Close() error {
  499. select {
  500. case <-r.closed:
  501. return r.err
  502. default:
  503. if r.cancelPull != nil {
  504. r.cancelPull()
  505. }
  506. r.err = exec.ErrControllerClosed
  507. close(r.closed)
  508. }
  509. return nil
  510. }
  511. func (r *controller) matchevent(event events.Message) bool {
  512. if event.Type != events.ContainerEventType {
  513. return false
  514. }
  515. // we can't filter using id since it will have huge chances to introduce a deadlock. see #33377.
  516. return event.Actor.Attributes["name"] == r.adapter.container.name()
  517. }
  518. func (r *controller) checkClosed() error {
  519. select {
  520. case <-r.closed:
  521. return r.err
  522. default:
  523. return nil
  524. }
  525. }
  526. func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
  527. status := &api.ContainerStatus{
  528. ContainerID: ctnr.ID,
  529. PID: int32(ctnr.State.Pid),
  530. ExitCode: int32(ctnr.State.ExitCode),
  531. }
  532. return status, nil
  533. }
  534. func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) {
  535. status := &api.PortStatus{}
  536. if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 {
  537. exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports)
  538. if err != nil {
  539. return nil, err
  540. }
  541. status.Ports = exposedPorts
  542. }
  543. return status, nil
  544. }
  545. func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) {
  546. exposedPorts := make([]*api.PortConfig, 0, len(portMap))
  547. for portProtocol, mapping := range portMap {
  548. p, proto, ok := strings.Cut(string(portProtocol), "/")
  549. if !ok {
  550. return nil, fmt.Errorf("invalid port mapping: %s", portProtocol)
  551. }
  552. port, err := strconv.ParseUint(p, 10, 16)
  553. if err != nil {
  554. return nil, err
  555. }
  556. var protocol api.PortConfig_Protocol
  557. switch strings.ToLower(proto) {
  558. case "tcp":
  559. protocol = api.ProtocolTCP
  560. case "udp":
  561. protocol = api.ProtocolUDP
  562. case "sctp":
  563. protocol = api.ProtocolSCTP
  564. default:
  565. return nil, fmt.Errorf("invalid protocol: %s", proto)
  566. }
  567. for _, binding := range mapping {
  568. hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
  569. if err != nil {
  570. return nil, err
  571. }
  572. // TODO(aluzzardi): We're losing the port `name` here since
  573. // there's no way to retrieve it back from the Engine.
  574. exposedPorts = append(exposedPorts, &api.PortConfig{
  575. PublishMode: api.PublishModeHost,
  576. Protocol: protocol,
  577. TargetPort: uint32(port),
  578. PublishedPort: uint32(hostPort),
  579. })
  580. }
  581. }
  582. return exposedPorts, nil
  583. }
  584. type exitError struct {
  585. code int
  586. cause error
  587. }
  588. func (e *exitError) Error() string {
  589. if e.cause != nil {
  590. return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
  591. }
  592. return fmt.Sprintf("task: non-zero exit (%v)", e.code)
  593. }
  594. func (e *exitError) ExitCode() int {
  595. return e.code
  596. }
  597. func (e *exitError) Cause() error {
  598. return e.cause
  599. }
  600. // checkHealth blocks until unhealthy container is detected or ctx exits
  601. func (r *controller) checkHealth(ctx context.Context) error {
  602. eventq := r.adapter.events(ctx)
  603. for {
  604. select {
  605. case <-ctx.Done():
  606. return nil
  607. case <-r.closed:
  608. return nil
  609. case event := <-eventq:
  610. if !r.matchevent(event) {
  611. continue
  612. }
  613. switch event.Action {
  614. case events.ActionHealthStatusUnhealthy:
  615. return ErrContainerUnhealthy
  616. }
  617. }
  618. }
  619. }