client.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. package remote // import "github.com/docker/docker/libcontainerd/remote"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "reflect"
  9. "runtime"
  10. "strings"
  11. "sync"
  12. "syscall"
  13. "time"
  14. "github.com/containerd/containerd"
  15. apievents "github.com/containerd/containerd/api/events"
  16. "github.com/containerd/containerd/api/types"
  17. "github.com/containerd/containerd/archive"
  18. "github.com/containerd/containerd/cio"
  19. "github.com/containerd/containerd/content"
  20. cerrdefs "github.com/containerd/containerd/errdefs"
  21. "github.com/containerd/containerd/events"
  22. "github.com/containerd/containerd/images"
  23. "github.com/containerd/containerd/log"
  24. v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
  25. "github.com/containerd/typeurl/v2"
  26. "github.com/docker/docker/errdefs"
  27. "github.com/docker/docker/libcontainerd/queue"
  28. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  29. "github.com/docker/docker/pkg/ioutils"
  30. "github.com/hashicorp/go-multierror"
  31. ocispec "github.com/opencontainers/image-spec/specs-go/v1"
  32. specs "github.com/opencontainers/runtime-spec/specs-go"
  33. "github.com/pkg/errors"
  34. "github.com/sirupsen/logrus"
  35. "google.golang.org/grpc/codes"
  36. "google.golang.org/grpc/status"
  37. )
  38. // DockerContainerBundlePath is the label key pointing to the container's bundle path
  39. const DockerContainerBundlePath = "com.docker/engine.bundle.path"
  40. type client struct {
  41. client *containerd.Client
  42. stateDir string
  43. logger *logrus.Entry
  44. ns string
  45. backend libcontainerdtypes.Backend
  46. eventQ queue.Queue
  47. }
  48. type container struct {
  49. client *client
  50. c8dCtr containerd.Container
  51. v2runcoptions *v2runcoptions.Options
  52. }
  53. type task struct {
  54. containerd.Task
  55. ctr *container
  56. // Workaround for https://github.com/containerd/containerd/issues/8557.
  57. // See also https://github.com/moby/moby/issues/45595.
  58. serializeExecStartsWorkaround sync.Mutex
  59. }
  60. type process struct {
  61. containerd.Process
  62. }
  63. // NewClient creates a new libcontainerd client from a containerd client
  64. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  65. c := &client{
  66. client: cli,
  67. stateDir: stateDir,
  68. logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
  69. ns: ns,
  70. backend: b,
  71. }
  72. go c.processEventStream(ctx, ns)
  73. return c, nil
  74. }
  75. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  76. return c.client.Version(ctx)
  77. }
  78. func (c *container) newTask(t containerd.Task) *task {
  79. return &task{Task: t, ctr: c}
  80. }
  81. func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
  82. var dio *cio.DirectIO
  83. defer func() {
  84. if err != nil && dio != nil {
  85. dio.Cancel()
  86. dio.Close()
  87. }
  88. }()
  89. attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
  90. // dio must be assigned to the previously defined dio for the defer above
  91. // to handle cleanup
  92. dio, err = c.client.newDirectIO(ctx, fifos)
  93. if err != nil {
  94. return nil, err
  95. }
  96. return attachStdio(dio)
  97. }
  98. t, err := c.c8dCtr.Task(ctx, attachIO)
  99. if err != nil {
  100. return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
  101. }
  102. return c.newTask(t), nil
  103. }
  104. func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  105. bdir := c.bundleDir(id)
  106. c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
  107. newOpts := []containerd.NewContainerOpts{
  108. containerd.WithSpec(ociSpec),
  109. containerd.WithRuntime(shim, runtimeOptions),
  110. WithBundle(bdir, ociSpec),
  111. }
  112. opts = append(opts, newOpts...)
  113. ctr, err := c.client.NewContainer(ctx, id, opts...)
  114. if err != nil {
  115. if cerrdefs.IsAlreadyExists(err) {
  116. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  117. }
  118. return nil, wrapError(err)
  119. }
  120. created := container{
  121. client: c,
  122. c8dCtr: ctr,
  123. }
  124. if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
  125. created.v2runcoptions = x
  126. }
  127. return &created, nil
  128. }
  129. // Start create and start a task for the specified containerd id
  130. func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  131. var (
  132. checkpoint *types.Descriptor
  133. t containerd.Task
  134. rio cio.IO
  135. stdinCloseSync = make(chan containerd.Process, 1)
  136. )
  137. if checkpointDir != "" {
  138. // write checkpoint to the content store
  139. tar := archive.Diff(ctx, "", checkpointDir)
  140. var err error
  141. checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
  142. // remove the checkpoint when we're done
  143. defer func() {
  144. if checkpoint != nil {
  145. err := c.client.client.ContentStore().Delete(ctx, checkpoint.Digest)
  146. if err != nil {
  147. c.client.logger.WithError(err).WithFields(logrus.Fields{
  148. "ref": checkpointDir,
  149. "digest": checkpoint.Digest,
  150. }).Warnf("failed to delete temporary checkpoint entry")
  151. }
  152. }
  153. }()
  154. if err := tar.Close(); err != nil {
  155. return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
  156. }
  157. if err != nil {
  158. return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
  159. }
  160. }
  161. // Optimization: assume the relevant metadata has not changed in the
  162. // moment since the container was created. Elide redundant RPC requests
  163. // to refresh the metadata separately for spec and labels.
  164. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  165. if err != nil {
  166. return nil, errors.Wrap(err, "failed to retrieve metadata")
  167. }
  168. bundle := md.Labels[DockerContainerBundlePath]
  169. var spec specs.Spec
  170. if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
  171. return nil, errors.Wrap(err, "failed to retrieve spec")
  172. }
  173. uid, gid := getSpecUser(&spec)
  174. taskOpts := []containerd.NewTaskOpts{
  175. func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  176. info.Checkpoint = checkpoint
  177. return nil
  178. },
  179. }
  180. if runtime.GOOS != "windows" {
  181. taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  182. if c.v2runcoptions != nil {
  183. opts := *c.v2runcoptions
  184. opts.IoUid = uint32(uid)
  185. opts.IoGid = uint32(gid)
  186. info.Options = &opts
  187. }
  188. return nil
  189. })
  190. } else {
  191. taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
  192. }
  193. t, err = c.c8dCtr.NewTask(ctx,
  194. func(id string) (cio.IO, error) {
  195. fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
  196. rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
  197. return rio, err
  198. },
  199. taskOpts...,
  200. )
  201. if err != nil {
  202. close(stdinCloseSync)
  203. if rio != nil {
  204. rio.Cancel()
  205. rio.Close()
  206. }
  207. return nil, errors.Wrap(wrapError(err), "failed to create task for container")
  208. }
  209. // Signal c.createIO that it can call CloseIO
  210. stdinCloseSync <- t
  211. if err := t.Start(ctx); err != nil {
  212. // Only Stopped tasks can be deleted. Created tasks have to be
  213. // killed first, to transition them to Stopped.
  214. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil {
  215. c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
  216. Error("failed to delete task after fail start")
  217. }
  218. return nil, wrapError(err)
  219. }
  220. return c.newTask(t), nil
  221. }
  222. // Exec creates exec process.
  223. //
  224. // The containerd client calls Exec to register the exec config in the shim side.
  225. // When the client calls Start, the shim will create stdin fifo if needs. But
  226. // for the container main process, the stdin fifo will be created in Create not
  227. // the Start call. stdinCloseSync channel should be closed after Start exec
  228. // process.
  229. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
  230. var (
  231. p containerd.Process
  232. rio cio.IO
  233. stdinCloseSync = make(chan containerd.Process, 1)
  234. )
  235. // Optimization: assume the DockerContainerBundlePath label has not been
  236. // updated since the container metadata was last loaded/refreshed.
  237. md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  238. if err != nil {
  239. return nil, wrapError(err)
  240. }
  241. fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
  242. defer func() {
  243. if err != nil {
  244. if rio != nil {
  245. rio.Cancel()
  246. rio.Close()
  247. }
  248. }
  249. }()
  250. p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
  251. rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
  252. return rio, err
  253. })
  254. if err != nil {
  255. close(stdinCloseSync)
  256. if cerrdefs.IsAlreadyExists(err) {
  257. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  258. }
  259. return nil, wrapError(err)
  260. }
  261. // Signal c.createIO that it can call CloseIO
  262. //
  263. // the stdin of exec process will be created after p.Start in containerd
  264. defer func() { stdinCloseSync <- p }()
  265. err = func() error {
  266. t.serializeExecStartsWorkaround.Lock()
  267. defer t.serializeExecStartsWorkaround.Unlock()
  268. return p.Start(ctx)
  269. }()
  270. if err != nil {
  271. // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
  272. // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
  273. // older containerd-shim
  274. ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
  275. defer cancel()
  276. p.Delete(ctx)
  277. return nil, wrapError(err)
  278. }
  279. return process{p}, nil
  280. }
  281. func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
  282. return wrapError(t.Task.Kill(ctx, signal))
  283. }
  284. func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
  285. return wrapError(p.Process.Kill(ctx, signal))
  286. }
  287. func (t *task) Pause(ctx context.Context) error {
  288. return wrapError(t.Task.Pause(ctx))
  289. }
  290. func (t *task) Resume(ctx context.Context) error {
  291. return wrapError(t.Task.Resume(ctx))
  292. }
  293. func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
  294. m, err := t.Metrics(ctx)
  295. if err != nil {
  296. return nil, err
  297. }
  298. v, err := typeurl.UnmarshalAny(m.Data)
  299. if err != nil {
  300. return nil, err
  301. }
  302. return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
  303. }
  304. func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
  305. pis, err := t.Pids(ctx)
  306. if err != nil {
  307. return nil, err
  308. }
  309. var infos []libcontainerdtypes.Summary
  310. for _, pi := range pis {
  311. i, err := typeurl.UnmarshalAny(pi.Info)
  312. if err != nil {
  313. return nil, errors.Wrap(err, "unable to decode process details")
  314. }
  315. s, err := summaryFromInterface(i)
  316. if err != nil {
  317. return nil, err
  318. }
  319. infos = append(infos, *s)
  320. }
  321. return infos, nil
  322. }
  323. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  324. s, err := t.Task.Delete(ctx)
  325. return s, wrapError(err)
  326. }
  327. func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  328. s, err := p.Process.Delete(ctx)
  329. return s, wrapError(err)
  330. }
  331. func (c *container) Delete(ctx context.Context) error {
  332. // Optimization: assume the DockerContainerBundlePath label has not been
  333. // updated since the container metadata was last loaded/refreshed.
  334. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  335. if err != nil {
  336. return err
  337. }
  338. bundle := md.Labels[DockerContainerBundlePath]
  339. if err := c.c8dCtr.Delete(ctx); err != nil {
  340. return wrapError(err)
  341. }
  342. if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
  343. if err := os.RemoveAll(bundle); err != nil {
  344. c.client.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
  345. "container": c.c8dCtr.ID(),
  346. "bundle": bundle,
  347. }).Error("failed to remove state dir")
  348. }
  349. }
  350. return nil
  351. }
  352. func (t *task) ForceDelete(ctx context.Context) error {
  353. _, err := t.Task.Delete(ctx, containerd.WithProcessKill)
  354. return wrapError(err)
  355. }
  356. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  357. s, err := t.Task.Status(ctx)
  358. return s, wrapError(err)
  359. }
  360. func (p process) Status(ctx context.Context) (containerd.Status, error) {
  361. s, err := p.Process.Status(ctx)
  362. return s, wrapError(err)
  363. }
  364. func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
  365. return func(r *containerd.CheckpointTaskInfo) error {
  366. if r.Options == nil && c.v2runcoptions != nil {
  367. r.Options = &v2runcoptions.CheckpointOptions{}
  368. }
  369. switch opts := r.Options.(type) {
  370. case *v2runcoptions.CheckpointOptions:
  371. opts.Exit = exit
  372. }
  373. return nil
  374. }
  375. }
  376. func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  377. img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
  378. if err != nil {
  379. return wrapError(err)
  380. }
  381. // Whatever happens, delete the checkpoint from containerd
  382. defer func() {
  383. err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
  384. if err != nil {
  385. t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
  386. Warnf("failed to delete checkpoint image")
  387. }
  388. }()
  389. b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
  390. if err != nil {
  391. return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
  392. }
  393. var index ocispec.Index
  394. if err := json.Unmarshal(b, &index); err != nil {
  395. return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
  396. }
  397. var cpDesc *ocispec.Descriptor
  398. for _, m := range index.Manifests {
  399. m := m
  400. if m.MediaType == images.MediaTypeContainerd1Checkpoint {
  401. cpDesc = &m //nolint:gosec
  402. break
  403. }
  404. }
  405. if cpDesc == nil {
  406. return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
  407. }
  408. rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
  409. if err != nil {
  410. return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
  411. }
  412. defer rat.Close()
  413. _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
  414. if err != nil {
  415. return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
  416. }
  417. return err
  418. }
  419. // LoadContainer loads the containerd container.
  420. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  421. ctr, err := c.client.LoadContainer(ctx, id)
  422. if err != nil {
  423. if cerrdefs.IsNotFound(err) {
  424. return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  425. }
  426. return nil, wrapError(err)
  427. }
  428. return &container{client: c, c8dCtr: ctr}, nil
  429. }
  430. func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
  431. t, err := c.c8dCtr.Task(ctx, nil)
  432. if err != nil {
  433. return nil, wrapError(err)
  434. }
  435. return c.newTask(t), nil
  436. }
  437. // createIO creates the io to be used by a process
  438. // This needs to get a pointer to interface as upon closure the process may not have yet been registered
  439. func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
  440. var (
  441. io *cio.DirectIO
  442. err error
  443. )
  444. io, err = c.client.newDirectIO(context.Background(), fifos)
  445. if err != nil {
  446. return nil, err
  447. }
  448. if io.Stdin != nil {
  449. var (
  450. closeErr error
  451. stdinOnce sync.Once
  452. )
  453. pipe := io.Stdin
  454. io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
  455. stdinOnce.Do(func() {
  456. closeErr = pipe.Close()
  457. select {
  458. case p, ok := <-stdinCloseSync:
  459. if !ok {
  460. return
  461. }
  462. if err := closeStdin(context.Background(), p); err != nil {
  463. if closeErr != nil {
  464. closeErr = multierror.Append(closeErr, err)
  465. } else {
  466. // Avoid wrapping a single error in a multierror.
  467. closeErr = err
  468. }
  469. }
  470. default:
  471. // The process wasn't ready. Close its stdin asynchronously.
  472. go func() {
  473. p, ok := <-stdinCloseSync
  474. if !ok {
  475. return
  476. }
  477. if err := closeStdin(context.Background(), p); err != nil {
  478. c.client.logger.WithError(err).
  479. WithField("container", c.c8dCtr.ID()).
  480. Error("failed to close container stdin")
  481. }
  482. }()
  483. }
  484. })
  485. return closeErr
  486. })
  487. }
  488. rio, err := attachStdio(io)
  489. if err != nil {
  490. io.Cancel()
  491. io.Close()
  492. }
  493. return rio, err
  494. }
  495. func closeStdin(ctx context.Context, p containerd.Process) error {
  496. err := p.CloseIO(ctx, containerd.WithStdinCloser)
  497. if err != nil && strings.Contains(err.Error(), "transport is closing") {
  498. err = nil
  499. }
  500. return err
  501. }
  502. func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
  503. c.eventQ.Append(ei.ContainerID, func() {
  504. err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
  505. if err != nil {
  506. c.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
  507. "container": ei.ContainerID,
  508. "event": et,
  509. "event-info": ei,
  510. }).Error("failed to process event")
  511. }
  512. })
  513. }
  514. func (c *client) waitServe(ctx context.Context) bool {
  515. t := 100 * time.Millisecond
  516. delay := time.NewTimer(t)
  517. if !delay.Stop() {
  518. <-delay.C
  519. }
  520. defer delay.Stop()
  521. // `IsServing` will actually block until the service is ready.
  522. // However it can return early, so we'll loop with a delay to handle it.
  523. for {
  524. serving, err := c.client.IsServing(ctx)
  525. if err != nil {
  526. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
  527. return false
  528. }
  529. log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready")
  530. }
  531. if serving {
  532. return true
  533. }
  534. delay.Reset(t)
  535. select {
  536. case <-ctx.Done():
  537. return false
  538. case <-delay.C:
  539. }
  540. }
  541. }
  542. func (c *client) processEventStream(ctx context.Context, ns string) {
  543. var (
  544. err error
  545. ev *events.Envelope
  546. et libcontainerdtypes.EventType
  547. ei libcontainerdtypes.EventInfo
  548. )
  549. // Create a new context specifically for this subscription.
  550. // The context must be cancelled to cancel the subscription.
  551. // In cases where we have to restart event stream processing,
  552. // we'll need the original context b/c this one will be cancelled
  553. subCtx, cancel := context.WithCancel(ctx)
  554. defer cancel()
  555. // Filter on both namespace *and* topic. To create an "and" filter,
  556. // this must be a single, comma-separated string
  557. eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
  558. c.logger.Debug("processing event stream")
  559. for {
  560. select {
  561. case err = <-errC:
  562. if err != nil {
  563. errStatus, ok := status.FromError(err)
  564. if !ok || errStatus.Code() != codes.Canceled {
  565. c.logger.WithError(err).Error("Failed to get event")
  566. c.logger.Info("Waiting for containerd to be ready to restart event processing")
  567. if c.waitServe(ctx) {
  568. go c.processEventStream(ctx, ns)
  569. return
  570. }
  571. }
  572. c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
  573. }
  574. return
  575. case ev = <-eventStream:
  576. if ev.Event == nil {
  577. c.logger.WithField("event", ev).Warn("invalid event")
  578. continue
  579. }
  580. v, err := typeurl.UnmarshalAny(ev.Event)
  581. if err != nil {
  582. c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
  583. continue
  584. }
  585. c.logger.WithField("topic", ev.Topic).Debug("event")
  586. switch t := v.(type) {
  587. case *apievents.TaskCreate:
  588. et = libcontainerdtypes.EventCreate
  589. ei = libcontainerdtypes.EventInfo{
  590. ContainerID: t.ContainerID,
  591. ProcessID: t.ContainerID,
  592. Pid: t.Pid,
  593. }
  594. case *apievents.TaskStart:
  595. et = libcontainerdtypes.EventStart
  596. ei = libcontainerdtypes.EventInfo{
  597. ContainerID: t.ContainerID,
  598. ProcessID: t.ContainerID,
  599. Pid: t.Pid,
  600. }
  601. case *apievents.TaskExit:
  602. et = libcontainerdtypes.EventExit
  603. ei = libcontainerdtypes.EventInfo{
  604. ContainerID: t.ContainerID,
  605. ProcessID: t.ID,
  606. Pid: t.Pid,
  607. ExitCode: t.ExitStatus,
  608. ExitedAt: t.ExitedAt,
  609. }
  610. case *apievents.TaskOOM:
  611. et = libcontainerdtypes.EventOOM
  612. ei = libcontainerdtypes.EventInfo{
  613. ContainerID: t.ContainerID,
  614. }
  615. case *apievents.TaskExecAdded:
  616. et = libcontainerdtypes.EventExecAdded
  617. ei = libcontainerdtypes.EventInfo{
  618. ContainerID: t.ContainerID,
  619. ProcessID: t.ExecID,
  620. }
  621. case *apievents.TaskExecStarted:
  622. et = libcontainerdtypes.EventExecStarted
  623. ei = libcontainerdtypes.EventInfo{
  624. ContainerID: t.ContainerID,
  625. ProcessID: t.ExecID,
  626. Pid: t.Pid,
  627. }
  628. case *apievents.TaskPaused:
  629. et = libcontainerdtypes.EventPaused
  630. ei = libcontainerdtypes.EventInfo{
  631. ContainerID: t.ContainerID,
  632. }
  633. case *apievents.TaskResumed:
  634. et = libcontainerdtypes.EventResumed
  635. ei = libcontainerdtypes.EventInfo{
  636. ContainerID: t.ContainerID,
  637. }
  638. case *apievents.TaskDelete:
  639. c.logger.WithFields(logrus.Fields{
  640. "topic": ev.Topic,
  641. "type": reflect.TypeOf(t),
  642. "container": t.ContainerID,
  643. },
  644. ).Info("ignoring event")
  645. continue
  646. default:
  647. c.logger.WithFields(logrus.Fields{
  648. "topic": ev.Topic,
  649. "type": reflect.TypeOf(t),
  650. },
  651. ).Info("ignoring event")
  652. continue
  653. }
  654. c.processEvent(ctx, et, ei)
  655. }
  656. }
  657. }
  658. func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
  659. writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
  660. if err != nil {
  661. return nil, err
  662. }
  663. defer writer.Close()
  664. size, err := io.Copy(writer, r)
  665. if err != nil {
  666. return nil, err
  667. }
  668. labels := map[string]string{
  669. "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
  670. }
  671. if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
  672. return nil, err
  673. }
  674. return &types.Descriptor{
  675. MediaType: mediaType,
  676. Digest: writer.Digest(),
  677. Size_: size,
  678. }, nil
  679. }
  680. func (c *client) bundleDir(id string) string {
  681. return filepath.Join(c.stateDir, id)
  682. }
  683. func wrapError(err error) error {
  684. switch {
  685. case err == nil:
  686. return nil
  687. case cerrdefs.IsNotFound(err):
  688. return errdefs.NotFound(err)
  689. }
  690. msg := err.Error()
  691. for _, s := range []string{"container does not exist", "not found", "no such container"} {
  692. if strings.Contains(msg, s) {
  693. return errdefs.NotFound(err)
  694. }
  695. }
  696. return err
  697. }