client.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. package remote // import "github.com/docker/docker/libcontainerd/remote"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "reflect"
  9. "runtime"
  10. "strings"
  11. "sync"
  12. "syscall"
  13. "time"
  14. "github.com/containerd/containerd"
  15. apievents "github.com/containerd/containerd/api/events"
  16. "github.com/containerd/containerd/api/types"
  17. "github.com/containerd/containerd/archive"
  18. "github.com/containerd/containerd/cio"
  19. "github.com/containerd/containerd/content"
  20. cerrdefs "github.com/containerd/containerd/errdefs"
  21. "github.com/containerd/containerd/events"
  22. "github.com/containerd/containerd/images"
  23. "github.com/containerd/containerd/log"
  24. v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
  25. "github.com/containerd/typeurl/v2"
  26. "github.com/docker/docker/errdefs"
  27. "github.com/docker/docker/libcontainerd/queue"
  28. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  29. "github.com/docker/docker/pkg/ioutils"
  30. "github.com/hashicorp/go-multierror"
  31. ocispec "github.com/opencontainers/image-spec/specs-go/v1"
  32. specs "github.com/opencontainers/runtime-spec/specs-go"
  33. "github.com/pkg/errors"
  34. "google.golang.org/grpc/codes"
  35. "google.golang.org/grpc/status"
  36. )
  37. // DockerContainerBundlePath is the label key pointing to the container's bundle path
  38. const DockerContainerBundlePath = "com.docker/engine.bundle.path"
  39. type client struct {
  40. client *containerd.Client
  41. stateDir string
  42. logger *log.Entry
  43. ns string
  44. backend libcontainerdtypes.Backend
  45. eventQ queue.Queue
  46. }
  47. type container struct {
  48. client *client
  49. c8dCtr containerd.Container
  50. v2runcoptions *v2runcoptions.Options
  51. }
  52. type task struct {
  53. containerd.Task
  54. ctr *container
  55. // Workaround for https://github.com/containerd/containerd/issues/8557.
  56. // See also https://github.com/moby/moby/issues/45595.
  57. serializeExecStartsWorkaround sync.Mutex
  58. }
  59. type process struct {
  60. containerd.Process
  61. }
  62. // NewClient creates a new libcontainerd client from a containerd client
  63. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  64. c := &client{
  65. client: cli,
  66. stateDir: stateDir,
  67. logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
  68. ns: ns,
  69. backend: b,
  70. }
  71. go c.processEventStream(ctx, ns)
  72. return c, nil
  73. }
  74. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  75. return c.client.Version(ctx)
  76. }
  77. func (c *container) newTask(t containerd.Task) *task {
  78. return &task{Task: t, ctr: c}
  79. }
  80. func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
  81. var dio *cio.DirectIO
  82. defer func() {
  83. if err != nil && dio != nil {
  84. dio.Cancel()
  85. dio.Close()
  86. }
  87. }()
  88. attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
  89. // dio must be assigned to the previously defined dio for the defer above
  90. // to handle cleanup
  91. dio, err = c.client.newDirectIO(ctx, fifos)
  92. if err != nil {
  93. return nil, err
  94. }
  95. return attachStdio(dio)
  96. }
  97. t, err := c.c8dCtr.Task(ctx, attachIO)
  98. if err != nil {
  99. return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
  100. }
  101. return c.newTask(t), nil
  102. }
  103. func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  104. bdir := c.bundleDir(id)
  105. c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
  106. newOpts := []containerd.NewContainerOpts{
  107. containerd.WithSpec(ociSpec),
  108. containerd.WithRuntime(shim, runtimeOptions),
  109. WithBundle(bdir, ociSpec),
  110. }
  111. opts = append(opts, newOpts...)
  112. ctr, err := c.client.NewContainer(ctx, id, opts...)
  113. if err != nil {
  114. if cerrdefs.IsAlreadyExists(err) {
  115. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  116. }
  117. return nil, wrapError(err)
  118. }
  119. created := container{
  120. client: c,
  121. c8dCtr: ctr,
  122. }
  123. if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
  124. created.v2runcoptions = x
  125. }
  126. return &created, nil
  127. }
  128. // Start create and start a task for the specified containerd id
  129. func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  130. var (
  131. checkpoint *types.Descriptor
  132. t containerd.Task
  133. rio cio.IO
  134. stdinCloseSync = make(chan containerd.Process, 1)
  135. )
  136. if checkpointDir != "" {
  137. // write checkpoint to the content store
  138. tar := archive.Diff(ctx, "", checkpointDir)
  139. var err error
  140. checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
  141. // remove the checkpoint when we're done
  142. defer func() {
  143. if checkpoint != nil {
  144. err := c.client.client.ContentStore().Delete(ctx, checkpoint.Digest)
  145. if err != nil {
  146. c.client.logger.WithError(err).WithFields(log.Fields{
  147. "ref": checkpointDir,
  148. "digest": checkpoint.Digest,
  149. }).Warnf("failed to delete temporary checkpoint entry")
  150. }
  151. }
  152. }()
  153. if err := tar.Close(); err != nil {
  154. return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
  155. }
  156. if err != nil {
  157. return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
  158. }
  159. }
  160. // Optimization: assume the relevant metadata has not changed in the
  161. // moment since the container was created. Elide redundant RPC requests
  162. // to refresh the metadata separately for spec and labels.
  163. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  164. if err != nil {
  165. return nil, errors.Wrap(err, "failed to retrieve metadata")
  166. }
  167. bundle := md.Labels[DockerContainerBundlePath]
  168. var spec specs.Spec
  169. if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
  170. return nil, errors.Wrap(err, "failed to retrieve spec")
  171. }
  172. uid, gid := getSpecUser(&spec)
  173. taskOpts := []containerd.NewTaskOpts{
  174. func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  175. info.Checkpoint = checkpoint
  176. return nil
  177. },
  178. }
  179. if runtime.GOOS != "windows" {
  180. taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  181. if c.v2runcoptions != nil {
  182. opts := *c.v2runcoptions
  183. opts.IoUid = uint32(uid)
  184. opts.IoGid = uint32(gid)
  185. info.Options = &opts
  186. }
  187. return nil
  188. })
  189. } else {
  190. taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
  191. }
  192. t, err = c.c8dCtr.NewTask(ctx,
  193. func(id string) (cio.IO, error) {
  194. fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
  195. rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
  196. return rio, err
  197. },
  198. taskOpts...,
  199. )
  200. if err != nil {
  201. close(stdinCloseSync)
  202. if rio != nil {
  203. rio.Cancel()
  204. rio.Close()
  205. }
  206. return nil, errors.Wrap(wrapError(err), "failed to create task for container")
  207. }
  208. // Signal c.createIO that it can call CloseIO
  209. stdinCloseSync <- t
  210. if err := t.Start(ctx); err != nil {
  211. // Only Stopped tasks can be deleted. Created tasks have to be
  212. // killed first, to transition them to Stopped.
  213. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil {
  214. c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
  215. Error("failed to delete task after fail start")
  216. }
  217. return nil, wrapError(err)
  218. }
  219. return c.newTask(t), nil
  220. }
  221. // Exec creates exec process.
  222. //
  223. // The containerd client calls Exec to register the exec config in the shim side.
  224. // When the client calls Start, the shim will create stdin fifo if needs. But
  225. // for the container main process, the stdin fifo will be created in Create not
  226. // the Start call. stdinCloseSync channel should be closed after Start exec
  227. // process.
  228. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
  229. var (
  230. p containerd.Process
  231. rio cio.IO
  232. stdinCloseSync = make(chan containerd.Process, 1)
  233. )
  234. // Optimization: assume the DockerContainerBundlePath label has not been
  235. // updated since the container metadata was last loaded/refreshed.
  236. md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  237. if err != nil {
  238. return nil, wrapError(err)
  239. }
  240. fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
  241. defer func() {
  242. if err != nil {
  243. if rio != nil {
  244. rio.Cancel()
  245. rio.Close()
  246. }
  247. }
  248. }()
  249. p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
  250. rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
  251. return rio, err
  252. })
  253. if err != nil {
  254. close(stdinCloseSync)
  255. if cerrdefs.IsAlreadyExists(err) {
  256. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  257. }
  258. return nil, wrapError(err)
  259. }
  260. // Signal c.createIO that it can call CloseIO
  261. //
  262. // the stdin of exec process will be created after p.Start in containerd
  263. defer func() { stdinCloseSync <- p }()
  264. err = func() error {
  265. t.serializeExecStartsWorkaround.Lock()
  266. defer t.serializeExecStartsWorkaround.Unlock()
  267. return p.Start(ctx)
  268. }()
  269. if err != nil {
  270. // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
  271. // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
  272. // older containerd-shim
  273. ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
  274. defer cancel()
  275. p.Delete(ctx)
  276. return nil, wrapError(err)
  277. }
  278. return process{p}, nil
  279. }
  280. func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
  281. return wrapError(t.Task.Kill(ctx, signal))
  282. }
  283. func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
  284. return wrapError(p.Process.Kill(ctx, signal))
  285. }
  286. func (t *task) Pause(ctx context.Context) error {
  287. return wrapError(t.Task.Pause(ctx))
  288. }
  289. func (t *task) Resume(ctx context.Context) error {
  290. return wrapError(t.Task.Resume(ctx))
  291. }
  292. func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
  293. m, err := t.Metrics(ctx)
  294. if err != nil {
  295. return nil, err
  296. }
  297. v, err := typeurl.UnmarshalAny(m.Data)
  298. if err != nil {
  299. return nil, err
  300. }
  301. return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
  302. }
  303. func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
  304. pis, err := t.Pids(ctx)
  305. if err != nil {
  306. return nil, err
  307. }
  308. var infos []libcontainerdtypes.Summary
  309. for _, pi := range pis {
  310. i, err := typeurl.UnmarshalAny(pi.Info)
  311. if err != nil {
  312. return nil, errors.Wrap(err, "unable to decode process details")
  313. }
  314. s, err := summaryFromInterface(i)
  315. if err != nil {
  316. return nil, err
  317. }
  318. infos = append(infos, *s)
  319. }
  320. return infos, nil
  321. }
  322. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  323. s, err := t.Task.Delete(ctx)
  324. return s, wrapError(err)
  325. }
  326. func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  327. s, err := p.Process.Delete(ctx)
  328. return s, wrapError(err)
  329. }
  330. func (c *container) Delete(ctx context.Context) error {
  331. // Optimization: assume the DockerContainerBundlePath label has not been
  332. // updated since the container metadata was last loaded/refreshed.
  333. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  334. if err != nil {
  335. return err
  336. }
  337. bundle := md.Labels[DockerContainerBundlePath]
  338. if err := c.c8dCtr.Delete(ctx); err != nil {
  339. return wrapError(err)
  340. }
  341. if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
  342. if err := os.RemoveAll(bundle); err != nil {
  343. c.client.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
  344. "container": c.c8dCtr.ID(),
  345. "bundle": bundle,
  346. }).Error("failed to remove state dir")
  347. }
  348. }
  349. return nil
  350. }
  351. func (t *task) ForceDelete(ctx context.Context) error {
  352. _, err := t.Task.Delete(ctx, containerd.WithProcessKill)
  353. return wrapError(err)
  354. }
  355. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  356. s, err := t.Task.Status(ctx)
  357. return s, wrapError(err)
  358. }
  359. func (p process) Status(ctx context.Context) (containerd.Status, error) {
  360. s, err := p.Process.Status(ctx)
  361. return s, wrapError(err)
  362. }
  363. func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
  364. return func(r *containerd.CheckpointTaskInfo) error {
  365. if r.Options == nil && c.v2runcoptions != nil {
  366. r.Options = &v2runcoptions.CheckpointOptions{}
  367. }
  368. switch opts := r.Options.(type) {
  369. case *v2runcoptions.CheckpointOptions:
  370. opts.Exit = exit
  371. }
  372. return nil
  373. }
  374. }
  375. func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  376. img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
  377. if err != nil {
  378. return wrapError(err)
  379. }
  380. // Whatever happens, delete the checkpoint from containerd
  381. defer func() {
  382. err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
  383. if err != nil {
  384. t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
  385. Warnf("failed to delete checkpoint image")
  386. }
  387. }()
  388. b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
  389. if err != nil {
  390. return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
  391. }
  392. var index ocispec.Index
  393. if err := json.Unmarshal(b, &index); err != nil {
  394. return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
  395. }
  396. var cpDesc *ocispec.Descriptor
  397. for _, m := range index.Manifests {
  398. m := m
  399. if m.MediaType == images.MediaTypeContainerd1Checkpoint {
  400. cpDesc = &m //nolint:gosec
  401. break
  402. }
  403. }
  404. if cpDesc == nil {
  405. return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
  406. }
  407. rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
  408. if err != nil {
  409. return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
  410. }
  411. defer rat.Close()
  412. _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
  413. if err != nil {
  414. return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
  415. }
  416. return err
  417. }
  418. // LoadContainer loads the containerd container.
  419. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  420. ctr, err := c.client.LoadContainer(ctx, id)
  421. if err != nil {
  422. if cerrdefs.IsNotFound(err) {
  423. return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  424. }
  425. return nil, wrapError(err)
  426. }
  427. return &container{client: c, c8dCtr: ctr}, nil
  428. }
  429. func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
  430. t, err := c.c8dCtr.Task(ctx, nil)
  431. if err != nil {
  432. return nil, wrapError(err)
  433. }
  434. return c.newTask(t), nil
  435. }
  436. // createIO creates the io to be used by a process
  437. // This needs to get a pointer to interface as upon closure the process may not have yet been registered
  438. func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
  439. var (
  440. io *cio.DirectIO
  441. err error
  442. )
  443. io, err = c.client.newDirectIO(context.Background(), fifos)
  444. if err != nil {
  445. return nil, err
  446. }
  447. if io.Stdin != nil {
  448. var (
  449. closeErr error
  450. stdinOnce sync.Once
  451. )
  452. pipe := io.Stdin
  453. io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
  454. stdinOnce.Do(func() {
  455. closeErr = pipe.Close()
  456. select {
  457. case p, ok := <-stdinCloseSync:
  458. if !ok {
  459. return
  460. }
  461. if err := closeStdin(context.Background(), p); err != nil {
  462. if closeErr != nil {
  463. closeErr = multierror.Append(closeErr, err)
  464. } else {
  465. // Avoid wrapping a single error in a multierror.
  466. closeErr = err
  467. }
  468. }
  469. default:
  470. // The process wasn't ready. Close its stdin asynchronously.
  471. go func() {
  472. p, ok := <-stdinCloseSync
  473. if !ok {
  474. return
  475. }
  476. if err := closeStdin(context.Background(), p); err != nil {
  477. c.client.logger.WithError(err).
  478. WithField("container", c.c8dCtr.ID()).
  479. Error("failed to close container stdin")
  480. }
  481. }()
  482. }
  483. })
  484. return closeErr
  485. })
  486. }
  487. rio, err := attachStdio(io)
  488. if err != nil {
  489. io.Cancel()
  490. io.Close()
  491. }
  492. return rio, err
  493. }
  494. func closeStdin(ctx context.Context, p containerd.Process) error {
  495. err := p.CloseIO(ctx, containerd.WithStdinCloser)
  496. if err != nil && strings.Contains(err.Error(), "transport is closing") {
  497. err = nil
  498. }
  499. return err
  500. }
  501. func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
  502. c.eventQ.Append(ei.ContainerID, func() {
  503. err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
  504. if err != nil {
  505. c.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
  506. "container": ei.ContainerID,
  507. "event": et,
  508. "event-info": ei,
  509. }).Error("failed to process event")
  510. }
  511. })
  512. }
  513. func (c *client) waitServe(ctx context.Context) bool {
  514. t := 100 * time.Millisecond
  515. delay := time.NewTimer(t)
  516. if !delay.Stop() {
  517. <-delay.C
  518. }
  519. defer delay.Stop()
  520. // `IsServing` will actually block until the service is ready.
  521. // However it can return early, so we'll loop with a delay to handle it.
  522. for {
  523. serving, err := c.client.IsServing(ctx)
  524. if err != nil {
  525. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
  526. return false
  527. }
  528. log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready")
  529. }
  530. if serving {
  531. return true
  532. }
  533. delay.Reset(t)
  534. select {
  535. case <-ctx.Done():
  536. return false
  537. case <-delay.C:
  538. }
  539. }
  540. }
  541. func (c *client) processEventStream(ctx context.Context, ns string) {
  542. var (
  543. err error
  544. ev *events.Envelope
  545. et libcontainerdtypes.EventType
  546. ei libcontainerdtypes.EventInfo
  547. )
  548. // Create a new context specifically for this subscription.
  549. // The context must be cancelled to cancel the subscription.
  550. // In cases where we have to restart event stream processing,
  551. // we'll need the original context b/c this one will be cancelled
  552. subCtx, cancel := context.WithCancel(ctx)
  553. defer cancel()
  554. // Filter on both namespace *and* topic. To create an "and" filter,
  555. // this must be a single, comma-separated string
  556. eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
  557. c.logger.Debug("processing event stream")
  558. for {
  559. select {
  560. case err = <-errC:
  561. if err != nil {
  562. errStatus, ok := status.FromError(err)
  563. if !ok || errStatus.Code() != codes.Canceled {
  564. c.logger.WithError(err).Error("Failed to get event")
  565. c.logger.Info("Waiting for containerd to be ready to restart event processing")
  566. if c.waitServe(ctx) {
  567. go c.processEventStream(ctx, ns)
  568. return
  569. }
  570. }
  571. c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
  572. }
  573. return
  574. case ev = <-eventStream:
  575. if ev.Event == nil {
  576. c.logger.WithField("event", ev).Warn("invalid event")
  577. continue
  578. }
  579. v, err := typeurl.UnmarshalAny(ev.Event)
  580. if err != nil {
  581. c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
  582. continue
  583. }
  584. c.logger.WithField("topic", ev.Topic).Debug("event")
  585. switch t := v.(type) {
  586. case *apievents.TaskCreate:
  587. et = libcontainerdtypes.EventCreate
  588. ei = libcontainerdtypes.EventInfo{
  589. ContainerID: t.ContainerID,
  590. ProcessID: t.ContainerID,
  591. Pid: t.Pid,
  592. }
  593. case *apievents.TaskStart:
  594. et = libcontainerdtypes.EventStart
  595. ei = libcontainerdtypes.EventInfo{
  596. ContainerID: t.ContainerID,
  597. ProcessID: t.ContainerID,
  598. Pid: t.Pid,
  599. }
  600. case *apievents.TaskExit:
  601. et = libcontainerdtypes.EventExit
  602. ei = libcontainerdtypes.EventInfo{
  603. ContainerID: t.ContainerID,
  604. ProcessID: t.ID,
  605. Pid: t.Pid,
  606. ExitCode: t.ExitStatus,
  607. ExitedAt: t.ExitedAt,
  608. }
  609. case *apievents.TaskOOM:
  610. et = libcontainerdtypes.EventOOM
  611. ei = libcontainerdtypes.EventInfo{
  612. ContainerID: t.ContainerID,
  613. }
  614. case *apievents.TaskExecAdded:
  615. et = libcontainerdtypes.EventExecAdded
  616. ei = libcontainerdtypes.EventInfo{
  617. ContainerID: t.ContainerID,
  618. ProcessID: t.ExecID,
  619. }
  620. case *apievents.TaskExecStarted:
  621. et = libcontainerdtypes.EventExecStarted
  622. ei = libcontainerdtypes.EventInfo{
  623. ContainerID: t.ContainerID,
  624. ProcessID: t.ExecID,
  625. Pid: t.Pid,
  626. }
  627. case *apievents.TaskPaused:
  628. et = libcontainerdtypes.EventPaused
  629. ei = libcontainerdtypes.EventInfo{
  630. ContainerID: t.ContainerID,
  631. }
  632. case *apievents.TaskResumed:
  633. et = libcontainerdtypes.EventResumed
  634. ei = libcontainerdtypes.EventInfo{
  635. ContainerID: t.ContainerID,
  636. }
  637. case *apievents.TaskDelete:
  638. c.logger.WithFields(log.Fields{
  639. "topic": ev.Topic,
  640. "type": reflect.TypeOf(t),
  641. "container": t.ContainerID,
  642. },
  643. ).Info("ignoring event")
  644. continue
  645. default:
  646. c.logger.WithFields(log.Fields{
  647. "topic": ev.Topic,
  648. "type": reflect.TypeOf(t),
  649. },
  650. ).Info("ignoring event")
  651. continue
  652. }
  653. c.processEvent(ctx, et, ei)
  654. }
  655. }
  656. }
  657. func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
  658. writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
  659. if err != nil {
  660. return nil, err
  661. }
  662. defer writer.Close()
  663. size, err := io.Copy(writer, r)
  664. if err != nil {
  665. return nil, err
  666. }
  667. labels := map[string]string{
  668. "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
  669. }
  670. if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
  671. return nil, err
  672. }
  673. return &types.Descriptor{
  674. MediaType: mediaType,
  675. Digest: writer.Digest(),
  676. Size_: size,
  677. }, nil
  678. }
  679. func (c *client) bundleDir(id string) string {
  680. return filepath.Join(c.stateDir, id)
  681. }
  682. func wrapError(err error) error {
  683. switch {
  684. case err == nil:
  685. return nil
  686. case cerrdefs.IsNotFound(err):
  687. return errdefs.NotFound(err)
  688. }
  689. msg := err.Error()
  690. for _, s := range []string{"container does not exist", "not found", "no such container"} {
  691. if strings.Contains(msg, s) {
  692. return errdefs.NotFound(err)
  693. }
  694. }
  695. return err
  696. }