client.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. package remote // import "github.com/docker/docker/libcontainerd/remote"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "reflect"
  9. "runtime"
  10. "strings"
  11. "sync"
  12. "syscall"
  13. "time"
  14. "github.com/containerd/containerd"
  15. apievents "github.com/containerd/containerd/api/events"
  16. "github.com/containerd/containerd/api/types"
  17. "github.com/containerd/containerd/archive"
  18. "github.com/containerd/containerd/cio"
  19. "github.com/containerd/containerd/content"
  20. cerrdefs "github.com/containerd/containerd/errdefs"
  21. "github.com/containerd/containerd/images"
  22. "github.com/containerd/containerd/protobuf"
  23. v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
  24. "github.com/containerd/log"
  25. "github.com/containerd/typeurl/v2"
  26. "github.com/docker/docker/errdefs"
  27. "github.com/docker/docker/libcontainerd/queue"
  28. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  29. "github.com/docker/docker/pkg/ioutils"
  30. "github.com/hashicorp/go-multierror"
  31. "github.com/opencontainers/go-digest"
  32. ocispec "github.com/opencontainers/image-spec/specs-go/v1"
  33. specs "github.com/opencontainers/runtime-spec/specs-go"
  34. "github.com/pkg/errors"
  35. "google.golang.org/grpc/codes"
  36. "google.golang.org/grpc/status"
  37. "google.golang.org/protobuf/proto"
  38. )
  39. // DockerContainerBundlePath is the label key pointing to the container's bundle path
  40. const DockerContainerBundlePath = "com.docker/engine.bundle.path"
  41. type client struct {
  42. client *containerd.Client
  43. stateDir string
  44. logger *log.Entry
  45. ns string
  46. backend libcontainerdtypes.Backend
  47. eventQ queue.Queue
  48. }
  49. type container struct {
  50. client *client
  51. c8dCtr containerd.Container
  52. v2runcoptions *v2runcoptions.Options
  53. }
  54. type task struct {
  55. containerd.Task
  56. ctr *container
  57. }
  58. type process struct {
  59. containerd.Process
  60. }
  61. // NewClient creates a new libcontainerd client from a containerd client
  62. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  63. c := &client{
  64. client: cli,
  65. stateDir: stateDir,
  66. logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
  67. ns: ns,
  68. backend: b,
  69. }
  70. go c.processEventStream(ctx, ns)
  71. return c, nil
  72. }
  73. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  74. return c.client.Version(ctx)
  75. }
  76. func (c *container) newTask(t containerd.Task) *task {
  77. return &task{Task: t, ctr: c}
  78. }
  79. func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
  80. var dio *cio.DirectIO
  81. defer func() {
  82. if err != nil && dio != nil {
  83. dio.Cancel()
  84. dio.Close()
  85. }
  86. }()
  87. attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
  88. // dio must be assigned to the previously defined dio for the defer above
  89. // to handle cleanup
  90. dio, err = c.client.newDirectIO(ctx, fifos)
  91. if err != nil {
  92. return nil, err
  93. }
  94. return attachStdio(dio)
  95. }
  96. t, err := c.c8dCtr.Task(ctx, attachIO)
  97. if err != nil {
  98. return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
  99. }
  100. return c.newTask(t), nil
  101. }
  102. func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  103. bdir := c.bundleDir(id)
  104. c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
  105. newOpts := []containerd.NewContainerOpts{
  106. containerd.WithSpec(ociSpec),
  107. containerd.WithRuntime(shim, runtimeOptions),
  108. WithBundle(bdir, ociSpec),
  109. }
  110. opts = append(opts, newOpts...)
  111. ctr, err := c.client.NewContainer(ctx, id, opts...)
  112. if err != nil {
  113. if cerrdefs.IsAlreadyExists(err) {
  114. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  115. }
  116. return nil, wrapError(err)
  117. }
  118. created := container{
  119. client: c,
  120. c8dCtr: ctr,
  121. }
  122. if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
  123. created.v2runcoptions = x
  124. }
  125. return &created, nil
  126. }
  127. // NewTask creates a task for the specified containerd id
  128. func (c *container) NewTask(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  129. var (
  130. checkpoint *types.Descriptor
  131. t containerd.Task
  132. rio cio.IO
  133. stdinCloseSync = make(chan containerd.Process, 1)
  134. )
  135. if checkpointDir != "" {
  136. // write checkpoint to the content store
  137. tar := archive.Diff(ctx, "", checkpointDir)
  138. var err error
  139. checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
  140. // remove the checkpoint when we're done
  141. defer func() {
  142. if checkpoint != nil {
  143. err := c.client.client.ContentStore().Delete(ctx, digest.Digest(checkpoint.Digest))
  144. if err != nil {
  145. c.client.logger.WithError(err).WithFields(log.Fields{
  146. "ref": checkpointDir,
  147. "digest": checkpoint.Digest,
  148. }).Warnf("failed to delete temporary checkpoint entry")
  149. }
  150. }
  151. }()
  152. if err := tar.Close(); err != nil {
  153. return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
  154. }
  155. if err != nil {
  156. return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
  157. }
  158. }
  159. // Optimization: assume the relevant metadata has not changed in the
  160. // moment since the container was created. Elide redundant RPC requests
  161. // to refresh the metadata separately for spec and labels.
  162. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  163. if err != nil {
  164. return nil, errors.Wrap(err, "failed to retrieve metadata")
  165. }
  166. bundle := md.Labels[DockerContainerBundlePath]
  167. var spec specs.Spec
  168. if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
  169. return nil, errors.Wrap(err, "failed to retrieve spec")
  170. }
  171. uid, gid := getSpecUser(&spec)
  172. taskOpts := []containerd.NewTaskOpts{
  173. func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  174. info.Checkpoint = checkpoint
  175. return nil
  176. },
  177. }
  178. if runtime.GOOS != "windows" {
  179. taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  180. if c.v2runcoptions != nil {
  181. opts := proto.Clone(c.v2runcoptions).(*v2runcoptions.Options)
  182. opts.IoUid = uint32(uid)
  183. opts.IoGid = uint32(gid)
  184. info.Options = opts
  185. }
  186. return nil
  187. })
  188. } else {
  189. taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
  190. }
  191. t, err = c.c8dCtr.NewTask(ctx,
  192. func(id string) (cio.IO, error) {
  193. fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
  194. rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
  195. return rio, err
  196. },
  197. taskOpts...,
  198. )
  199. if err != nil {
  200. close(stdinCloseSync)
  201. if rio != nil {
  202. rio.Cancel()
  203. rio.Close()
  204. }
  205. return nil, errors.Wrap(wrapError(err), "failed to create task for container")
  206. }
  207. // Signal c.createIO that it can call CloseIO
  208. stdinCloseSync <- t
  209. return c.newTask(t), nil
  210. }
  211. func (t *task) Start(ctx context.Context) error {
  212. return wrapError(t.Task.Start(ctx))
  213. }
  214. // Exec creates exec process.
  215. //
  216. // The containerd client calls Exec to register the exec config in the shim side.
  217. // When the client calls Start, the shim will create stdin fifo if needs. But
  218. // for the container main process, the stdin fifo will be created in Create not
  219. // the Start call. stdinCloseSync channel should be closed after Start exec
  220. // process.
  221. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
  222. var (
  223. p containerd.Process
  224. rio cio.IO
  225. stdinCloseSync = make(chan containerd.Process, 1)
  226. )
  227. // Optimization: assume the DockerContainerBundlePath label has not been
  228. // updated since the container metadata was last loaded/refreshed.
  229. md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  230. if err != nil {
  231. return nil, wrapError(err)
  232. }
  233. fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
  234. defer func() {
  235. if err != nil {
  236. if rio != nil {
  237. rio.Cancel()
  238. rio.Close()
  239. }
  240. }
  241. }()
  242. p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
  243. rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
  244. return rio, err
  245. })
  246. if err != nil {
  247. close(stdinCloseSync)
  248. if cerrdefs.IsAlreadyExists(err) {
  249. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  250. }
  251. return nil, wrapError(err)
  252. }
  253. // Signal c.createIO that it can call CloseIO
  254. //
  255. // the stdin of exec process will be created after p.Start in containerd
  256. defer func() { stdinCloseSync <- p }()
  257. if err = p.Start(ctx); err != nil {
  258. // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
  259. // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
  260. // older containerd-shim
  261. ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
  262. defer cancel()
  263. p.Delete(ctx)
  264. return nil, wrapError(err)
  265. }
  266. return process{p}, nil
  267. }
  268. func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
  269. return wrapError(t.Task.Kill(ctx, signal))
  270. }
  271. func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
  272. return wrapError(p.Process.Kill(ctx, signal))
  273. }
  274. func (t *task) Pause(ctx context.Context) error {
  275. return wrapError(t.Task.Pause(ctx))
  276. }
  277. func (t *task) Resume(ctx context.Context) error {
  278. return wrapError(t.Task.Resume(ctx))
  279. }
  280. func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
  281. m, err := t.Metrics(ctx)
  282. if err != nil {
  283. return nil, err
  284. }
  285. v, err := typeurl.UnmarshalAny(m.Data)
  286. if err != nil {
  287. return nil, err
  288. }
  289. return libcontainerdtypes.InterfaceToStats(protobuf.FromTimestamp(m.Timestamp), v), nil
  290. }
  291. func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
  292. pis, err := t.Pids(ctx)
  293. if err != nil {
  294. return nil, err
  295. }
  296. var infos []libcontainerdtypes.Summary
  297. for _, pi := range pis {
  298. i, err := typeurl.UnmarshalAny(pi.Info)
  299. if err != nil {
  300. return nil, errors.Wrap(err, "unable to decode process details")
  301. }
  302. s, err := summaryFromInterface(i)
  303. if err != nil {
  304. return nil, err
  305. }
  306. infos = append(infos, *s)
  307. }
  308. return infos, nil
  309. }
  310. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  311. s, err := t.Task.Delete(ctx)
  312. return s, wrapError(err)
  313. }
  314. func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  315. s, err := p.Process.Delete(ctx)
  316. return s, wrapError(err)
  317. }
  318. func (c *container) Delete(ctx context.Context) error {
  319. // Optimization: assume the DockerContainerBundlePath label has not been
  320. // updated since the container metadata was last loaded/refreshed.
  321. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  322. if err != nil {
  323. return err
  324. }
  325. bundle := md.Labels[DockerContainerBundlePath]
  326. if err := c.c8dCtr.Delete(ctx); err != nil {
  327. return wrapError(err)
  328. }
  329. if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
  330. if err := os.RemoveAll(bundle); err != nil {
  331. c.client.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
  332. "container": c.c8dCtr.ID(),
  333. "bundle": bundle,
  334. }).Error("failed to remove state dir")
  335. }
  336. }
  337. return nil
  338. }
  339. func (t *task) ForceDelete(ctx context.Context) error {
  340. _, err := t.Task.Delete(ctx, containerd.WithProcessKill)
  341. return wrapError(err)
  342. }
  343. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  344. s, err := t.Task.Status(ctx)
  345. return s, wrapError(err)
  346. }
  347. func (p process) Status(ctx context.Context) (containerd.Status, error) {
  348. s, err := p.Process.Status(ctx)
  349. return s, wrapError(err)
  350. }
  351. func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
  352. return func(r *containerd.CheckpointTaskInfo) error {
  353. if r.Options == nil && c.v2runcoptions != nil {
  354. r.Options = &v2runcoptions.CheckpointOptions{}
  355. }
  356. switch opts := r.Options.(type) {
  357. case *v2runcoptions.CheckpointOptions:
  358. opts.Exit = exit
  359. }
  360. return nil
  361. }
  362. }
  363. func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  364. img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
  365. if err != nil {
  366. return wrapError(err)
  367. }
  368. // Whatever happens, delete the checkpoint from containerd
  369. defer func() {
  370. err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
  371. if err != nil {
  372. t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
  373. Warnf("failed to delete checkpoint image")
  374. }
  375. }()
  376. b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
  377. if err != nil {
  378. return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
  379. }
  380. var index ocispec.Index
  381. if err := json.Unmarshal(b, &index); err != nil {
  382. return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
  383. }
  384. var cpDesc *ocispec.Descriptor
  385. for _, m := range index.Manifests {
  386. m := m
  387. if m.MediaType == images.MediaTypeContainerd1Checkpoint {
  388. cpDesc = &m //nolint:gosec
  389. break
  390. }
  391. }
  392. if cpDesc == nil {
  393. return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
  394. }
  395. rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
  396. if err != nil {
  397. return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
  398. }
  399. defer rat.Close()
  400. _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
  401. if err != nil {
  402. return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
  403. }
  404. return err
  405. }
  406. // LoadContainer loads the containerd container.
  407. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  408. ctr, err := c.client.LoadContainer(ctx, id)
  409. if err != nil {
  410. if cerrdefs.IsNotFound(err) {
  411. return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  412. }
  413. return nil, wrapError(err)
  414. }
  415. return &container{client: c, c8dCtr: ctr}, nil
  416. }
  417. func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
  418. t, err := c.c8dCtr.Task(ctx, nil)
  419. if err != nil {
  420. return nil, wrapError(err)
  421. }
  422. return c.newTask(t), nil
  423. }
  424. // createIO creates the io to be used by a process
  425. // This needs to get a pointer to interface as upon closure the process may not have yet been registered
  426. func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
  427. var (
  428. io *cio.DirectIO
  429. err error
  430. )
  431. io, err = c.client.newDirectIO(context.Background(), fifos)
  432. if err != nil {
  433. return nil, err
  434. }
  435. if io.Stdin != nil {
  436. var (
  437. closeErr error
  438. stdinOnce sync.Once
  439. )
  440. pipe := io.Stdin
  441. io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
  442. stdinOnce.Do(func() {
  443. closeErr = pipe.Close()
  444. select {
  445. case p, ok := <-stdinCloseSync:
  446. if !ok {
  447. return
  448. }
  449. if err := closeStdin(context.Background(), p); err != nil {
  450. if closeErr != nil {
  451. closeErr = multierror.Append(closeErr, err)
  452. } else {
  453. // Avoid wrapping a single error in a multierror.
  454. closeErr = err
  455. }
  456. }
  457. default:
  458. // The process wasn't ready. Close its stdin asynchronously.
  459. go func() {
  460. p, ok := <-stdinCloseSync
  461. if !ok {
  462. return
  463. }
  464. if err := closeStdin(context.Background(), p); err != nil {
  465. c.client.logger.WithError(err).
  466. WithField("container", c.c8dCtr.ID()).
  467. Error("failed to close container stdin")
  468. }
  469. }()
  470. }
  471. })
  472. return closeErr
  473. })
  474. }
  475. rio, err := attachStdio(io)
  476. if err != nil {
  477. io.Cancel()
  478. io.Close()
  479. }
  480. return rio, err
  481. }
  482. func closeStdin(ctx context.Context, p containerd.Process) error {
  483. err := p.CloseIO(ctx, containerd.WithStdinCloser)
  484. if err != nil && strings.Contains(err.Error(), "transport is closing") {
  485. err = nil
  486. }
  487. return err
  488. }
  489. func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
  490. c.eventQ.Append(ei.ContainerID, func() {
  491. err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
  492. if err != nil {
  493. c.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
  494. "container": ei.ContainerID,
  495. "event": et,
  496. "event-info": ei,
  497. }).Error("failed to process event")
  498. }
  499. })
  500. }
  501. func (c *client) waitServe(ctx context.Context) bool {
  502. t := 100 * time.Millisecond
  503. delay := time.NewTimer(t)
  504. if !delay.Stop() {
  505. <-delay.C
  506. }
  507. defer delay.Stop()
  508. // `IsServing` will actually block until the service is ready.
  509. // However it can return early, so we'll loop with a delay to handle it.
  510. for {
  511. serving, err := c.client.IsServing(ctx)
  512. if err != nil {
  513. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
  514. return false
  515. }
  516. log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready")
  517. }
  518. if serving {
  519. return true
  520. }
  521. delay.Reset(t)
  522. select {
  523. case <-ctx.Done():
  524. return false
  525. case <-delay.C:
  526. }
  527. }
  528. }
  529. func (c *client) processEventStream(ctx context.Context, ns string) {
  530. // Create a new context specifically for this subscription.
  531. // The context must be cancelled to cancel the subscription.
  532. // In cases where we have to restart event stream processing,
  533. // we'll need the original context b/c this one will be cancelled
  534. subCtx, cancel := context.WithCancel(ctx)
  535. defer cancel()
  536. // Filter on both namespace *and* topic. To create an "and" filter,
  537. // this must be a single, comma-separated string
  538. eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
  539. c.logger.Debug("processing event stream")
  540. for {
  541. select {
  542. case err := <-errC:
  543. if err != nil {
  544. errStatus, ok := status.FromError(err)
  545. if !ok || errStatus.Code() != codes.Canceled {
  546. c.logger.WithError(err).Error("Failed to get event")
  547. c.logger.Info("Waiting for containerd to be ready to restart event processing")
  548. if c.waitServe(ctx) {
  549. go c.processEventStream(ctx, ns)
  550. return
  551. }
  552. }
  553. c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
  554. }
  555. return
  556. case ev := <-eventStream:
  557. if ev.Event == nil {
  558. c.logger.WithField("event", ev).Warn("invalid event")
  559. continue
  560. }
  561. v, err := typeurl.UnmarshalAny(ev.Event)
  562. if err != nil {
  563. c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
  564. continue
  565. }
  566. c.logger.WithField("topic", ev.Topic).Debug("event")
  567. switch t := v.(type) {
  568. case *apievents.TaskCreate:
  569. c.processEvent(ctx, libcontainerdtypes.EventCreate, libcontainerdtypes.EventInfo{
  570. ContainerID: t.ContainerID,
  571. ProcessID: t.ContainerID,
  572. Pid: t.Pid,
  573. })
  574. case *apievents.TaskStart:
  575. c.processEvent(ctx, libcontainerdtypes.EventStart, libcontainerdtypes.EventInfo{
  576. ContainerID: t.ContainerID,
  577. ProcessID: t.ContainerID,
  578. Pid: t.Pid,
  579. })
  580. case *apievents.TaskExit:
  581. c.processEvent(ctx, libcontainerdtypes.EventExit, libcontainerdtypes.EventInfo{
  582. ContainerID: t.ContainerID,
  583. ProcessID: t.ID,
  584. Pid: t.Pid,
  585. ExitCode: t.ExitStatus,
  586. ExitedAt: protobuf.FromTimestamp(t.ExitedAt),
  587. })
  588. case *apievents.TaskOOM:
  589. c.processEvent(ctx, libcontainerdtypes.EventOOM, libcontainerdtypes.EventInfo{
  590. ContainerID: t.ContainerID,
  591. })
  592. case *apievents.TaskExecAdded:
  593. c.processEvent(ctx, libcontainerdtypes.EventExecAdded, libcontainerdtypes.EventInfo{
  594. ContainerID: t.ContainerID,
  595. ProcessID: t.ExecID,
  596. })
  597. case *apievents.TaskExecStarted:
  598. c.processEvent(ctx, libcontainerdtypes.EventExecStarted, libcontainerdtypes.EventInfo{
  599. ContainerID: t.ContainerID,
  600. ProcessID: t.ExecID,
  601. Pid: t.Pid,
  602. })
  603. case *apievents.TaskPaused:
  604. c.processEvent(ctx, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  605. ContainerID: t.ContainerID,
  606. })
  607. case *apievents.TaskResumed:
  608. c.processEvent(ctx, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  609. ContainerID: t.ContainerID,
  610. })
  611. case *apievents.TaskDelete:
  612. c.logger.WithFields(log.Fields{
  613. "topic": ev.Topic,
  614. "type": reflect.TypeOf(t),
  615. "container": t.ContainerID,
  616. }).Info("ignoring event")
  617. default:
  618. c.logger.WithFields(log.Fields{
  619. "topic": ev.Topic,
  620. "type": reflect.TypeOf(t),
  621. }).Info("ignoring event")
  622. }
  623. }
  624. }
  625. }
  626. func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
  627. writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
  628. if err != nil {
  629. return nil, err
  630. }
  631. defer writer.Close()
  632. size, err := io.Copy(writer, r)
  633. if err != nil {
  634. return nil, err
  635. }
  636. labels := map[string]string{
  637. "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
  638. }
  639. if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
  640. return nil, err
  641. }
  642. return &types.Descriptor{
  643. MediaType: mediaType,
  644. Digest: writer.Digest().Encoded(),
  645. Size: size,
  646. }, nil
  647. }
  648. func (c *client) bundleDir(id string) string {
  649. return filepath.Join(c.stateDir, id)
  650. }
  651. func wrapError(err error) error {
  652. switch {
  653. case err == nil:
  654. return nil
  655. case cerrdefs.IsNotFound(err):
  656. return errdefs.NotFound(err)
  657. }
  658. msg := err.Error()
  659. for _, s := range []string{"container does not exist", "not found", "no such container"} {
  660. if strings.Contains(msg, s) {
  661. return errdefs.NotFound(err)
  662. }
  663. }
  664. return err
  665. }