client.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. package remote // import "github.com/docker/docker/libcontainerd/remote"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "reflect"
  9. "runtime"
  10. "strings"
  11. "sync"
  12. "syscall"
  13. "time"
  14. "github.com/containerd/containerd"
  15. apievents "github.com/containerd/containerd/api/events"
  16. "github.com/containerd/containerd/api/types"
  17. "github.com/containerd/containerd/archive"
  18. "github.com/containerd/containerd/cio"
  19. "github.com/containerd/containerd/content"
  20. containerderrors "github.com/containerd/containerd/errdefs"
  21. "github.com/containerd/containerd/events"
  22. "github.com/containerd/containerd/images"
  23. v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
  24. "github.com/containerd/typeurl"
  25. "github.com/docker/docker/errdefs"
  26. "github.com/docker/docker/libcontainerd/queue"
  27. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  28. "github.com/docker/docker/pkg/ioutils"
  29. v1 "github.com/opencontainers/image-spec/specs-go/v1"
  30. specs "github.com/opencontainers/runtime-spec/specs-go"
  31. "github.com/pkg/errors"
  32. "github.com/sirupsen/logrus"
  33. "google.golang.org/grpc/codes"
  34. "google.golang.org/grpc/status"
  35. )
  36. // DockerContainerBundlePath is the label key pointing to the container's bundle path
  37. const DockerContainerBundlePath = "com.docker/engine.bundle.path"
  38. type client struct {
  39. client *containerd.Client
  40. stateDir string
  41. logger *logrus.Entry
  42. ns string
  43. backend libcontainerdtypes.Backend
  44. eventQ queue.Queue
  45. }
  46. type container struct {
  47. client *client
  48. c8dCtr containerd.Container
  49. v2runcoptions *v2runcoptions.Options
  50. }
  51. type task struct {
  52. containerd.Task
  53. ctr *container
  54. }
  55. type process struct {
  56. containerd.Process
  57. }
  58. // NewClient creates a new libcontainerd client from a containerd client
  59. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  60. c := &client{
  61. client: cli,
  62. stateDir: stateDir,
  63. logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
  64. ns: ns,
  65. backend: b,
  66. }
  67. go c.processEventStream(ctx, ns)
  68. return c, nil
  69. }
  70. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  71. return c.client.Version(ctx)
  72. }
  73. func (c *container) newTask(t containerd.Task) *task {
  74. return &task{Task: t, ctr: c}
  75. }
  76. func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
  77. var dio *cio.DirectIO
  78. defer func() {
  79. if err != nil && dio != nil {
  80. dio.Cancel()
  81. dio.Close()
  82. }
  83. }()
  84. attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
  85. // dio must be assigned to the previously defined dio for the defer above
  86. // to handle cleanup
  87. dio, err = c.client.newDirectIO(ctx, fifos)
  88. if err != nil {
  89. return nil, err
  90. }
  91. return attachStdio(dio)
  92. }
  93. t, err := c.c8dCtr.Task(ctx, attachIO)
  94. if err != nil {
  95. return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
  96. }
  97. return c.newTask(t), nil
  98. }
  99. func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  100. bdir := c.bundleDir(id)
  101. c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
  102. newOpts := []containerd.NewContainerOpts{
  103. containerd.WithSpec(ociSpec),
  104. containerd.WithRuntime(shim, runtimeOptions),
  105. WithBundle(bdir, ociSpec),
  106. }
  107. opts = append(opts, newOpts...)
  108. ctr, err := c.client.NewContainer(ctx, id, opts...)
  109. if err != nil {
  110. if containerderrors.IsAlreadyExists(err) {
  111. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  112. }
  113. return nil, wrapError(err)
  114. }
  115. created := container{
  116. client: c,
  117. c8dCtr: ctr,
  118. }
  119. if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
  120. created.v2runcoptions = x
  121. }
  122. return &created, nil
  123. }
  124. // Start create and start a task for the specified containerd id
  125. func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  126. var (
  127. cp *types.Descriptor
  128. t containerd.Task
  129. rio cio.IO
  130. stdinCloseSync = make(chan containerd.Process, 1)
  131. )
  132. if checkpointDir != "" {
  133. // write checkpoint to the content store
  134. tar := archive.Diff(ctx, "", checkpointDir)
  135. var err error
  136. cp, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
  137. // remove the checkpoint when we're done
  138. defer func() {
  139. if cp != nil {
  140. err := c.client.client.ContentStore().Delete(ctx, cp.Digest)
  141. if err != nil {
  142. c.client.logger.WithError(err).WithFields(logrus.Fields{
  143. "ref": checkpointDir,
  144. "digest": cp.Digest,
  145. }).Warnf("failed to delete temporary checkpoint entry")
  146. }
  147. }
  148. }()
  149. if err := tar.Close(); err != nil {
  150. return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
  151. }
  152. if err != nil {
  153. return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
  154. }
  155. }
  156. // Optimization: assume the relevant metadata has not changed in the
  157. // moment since the container was created. Elide redundant RPC requests
  158. // to refresh the metadata separately for spec and labels.
  159. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  160. if err != nil {
  161. return nil, errors.Wrap(err, "failed to retrieve metadata")
  162. }
  163. bundle := md.Labels[DockerContainerBundlePath]
  164. var spec specs.Spec
  165. if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
  166. return nil, errors.Wrap(err, "failed to retrieve spec")
  167. }
  168. uid, gid := getSpecUser(&spec)
  169. taskOpts := []containerd.NewTaskOpts{
  170. func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  171. info.Checkpoint = cp
  172. return nil
  173. },
  174. }
  175. if runtime.GOOS != "windows" {
  176. taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
  177. if c.v2runcoptions != nil {
  178. opts := *c.v2runcoptions
  179. opts.IoUid = uint32(uid)
  180. opts.IoGid = uint32(gid)
  181. info.Options = &opts
  182. }
  183. return nil
  184. })
  185. } else {
  186. taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
  187. }
  188. t, err = c.c8dCtr.NewTask(ctx,
  189. func(id string) (cio.IO, error) {
  190. fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
  191. rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
  192. return rio, err
  193. },
  194. taskOpts...,
  195. )
  196. if err != nil {
  197. close(stdinCloseSync)
  198. if rio != nil {
  199. rio.Cancel()
  200. rio.Close()
  201. }
  202. return nil, errors.Wrap(wrapError(err), "failed to create task for container")
  203. }
  204. // Signal c.createIO that it can call CloseIO
  205. stdinCloseSync <- t
  206. if err := t.Start(ctx); err != nil {
  207. // Only Stopped tasks can be deleted. Created tasks have to be
  208. // killed first, to transition them to Stopped.
  209. if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil {
  210. c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
  211. Error("failed to delete task after fail start")
  212. }
  213. return nil, wrapError(err)
  214. }
  215. return c.newTask(t), nil
  216. }
  217. // Exec creates exec process.
  218. //
  219. // The containerd client calls Exec to register the exec config in the shim side.
  220. // When the client calls Start, the shim will create stdin fifo if needs. But
  221. // for the container main process, the stdin fifo will be created in Create not
  222. // the Start call. stdinCloseSync channel should be closed after Start exec
  223. // process.
  224. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
  225. var (
  226. p containerd.Process
  227. rio cio.IO
  228. stdinCloseSync = make(chan containerd.Process, 1)
  229. )
  230. // Optimization: assume the DockerContainerBundlePath label has not been
  231. // updated since the container metadata was last loaded/refreshed.
  232. md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  233. if err != nil {
  234. return nil, wrapError(err)
  235. }
  236. fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
  237. defer func() {
  238. if err != nil {
  239. if rio != nil {
  240. rio.Cancel()
  241. rio.Close()
  242. }
  243. }
  244. }()
  245. p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
  246. rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
  247. return rio, err
  248. })
  249. if err != nil {
  250. close(stdinCloseSync)
  251. if containerderrors.IsAlreadyExists(err) {
  252. return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  253. }
  254. return nil, wrapError(err)
  255. }
  256. // Signal c.createIO that it can call CloseIO
  257. //
  258. // the stdin of exec process will be created after p.Start in containerd
  259. defer func() { stdinCloseSync <- p }()
  260. if err = p.Start(ctx); err != nil {
  261. // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
  262. // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
  263. // older containerd-shim
  264. ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
  265. defer cancel()
  266. p.Delete(ctx)
  267. return nil, wrapError(err)
  268. }
  269. return process{p}, nil
  270. }
  271. func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
  272. return wrapError(t.Task.Kill(ctx, signal))
  273. }
  274. func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
  275. return wrapError(p.Process.Kill(ctx, signal))
  276. }
  277. func (t *task) Pause(ctx context.Context) error {
  278. return wrapError(t.Task.Pause(ctx))
  279. }
  280. func (t *task) Resume(ctx context.Context) error {
  281. return wrapError(t.Task.Resume(ctx))
  282. }
  283. func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
  284. m, err := t.Metrics(ctx)
  285. if err != nil {
  286. return nil, err
  287. }
  288. v, err := typeurl.UnmarshalAny(m.Data)
  289. if err != nil {
  290. return nil, err
  291. }
  292. return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
  293. }
  294. func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
  295. pis, err := t.Pids(ctx)
  296. if err != nil {
  297. return nil, err
  298. }
  299. var infos []libcontainerdtypes.Summary
  300. for _, pi := range pis {
  301. i, err := typeurl.UnmarshalAny(pi.Info)
  302. if err != nil {
  303. return nil, errors.Wrap(err, "unable to decode process details")
  304. }
  305. s, err := summaryFromInterface(i)
  306. if err != nil {
  307. return nil, err
  308. }
  309. infos = append(infos, *s)
  310. }
  311. return infos, nil
  312. }
  313. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  314. s, err := t.Task.Delete(ctx)
  315. return s, wrapError(err)
  316. }
  317. func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  318. s, err := p.Process.Delete(ctx)
  319. return s, wrapError(err)
  320. }
  321. func (c *container) Delete(ctx context.Context) error {
  322. // Optimization: assume the DockerContainerBundlePath label has not been
  323. // updated since the container metadata was last loaded/refreshed.
  324. md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
  325. if err != nil {
  326. return err
  327. }
  328. bundle := md.Labels[DockerContainerBundlePath]
  329. if err := c.c8dCtr.Delete(ctx); err != nil {
  330. return wrapError(err)
  331. }
  332. if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
  333. if err := os.RemoveAll(bundle); err != nil {
  334. c.client.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
  335. "container": c.c8dCtr.ID(),
  336. "bundle": bundle,
  337. }).Error("failed to remove state dir")
  338. }
  339. }
  340. return nil
  341. }
  342. func (t *task) ForceDelete(ctx context.Context) error {
  343. _, err := t.Task.Delete(ctx, containerd.WithProcessKill)
  344. return wrapError(err)
  345. }
  346. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  347. s, err := t.Task.Status(ctx)
  348. return s, wrapError(err)
  349. }
  350. func (p process) Status(ctx context.Context) (containerd.Status, error) {
  351. s, err := p.Process.Status(ctx)
  352. return s, wrapError(err)
  353. }
  354. func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
  355. return func(r *containerd.CheckpointTaskInfo) error {
  356. if r.Options == nil && c.v2runcoptions != nil {
  357. r.Options = &v2runcoptions.CheckpointOptions{}
  358. }
  359. switch opts := r.Options.(type) {
  360. case *v2runcoptions.CheckpointOptions:
  361. opts.Exit = exit
  362. }
  363. return nil
  364. }
  365. }
  366. func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  367. img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
  368. if err != nil {
  369. return wrapError(err)
  370. }
  371. // Whatever happens, delete the checkpoint from containerd
  372. defer func() {
  373. err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
  374. if err != nil {
  375. t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
  376. Warnf("failed to delete checkpoint image")
  377. }
  378. }()
  379. b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
  380. if err != nil {
  381. return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
  382. }
  383. var index v1.Index
  384. if err := json.Unmarshal(b, &index); err != nil {
  385. return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
  386. }
  387. var cpDesc *v1.Descriptor
  388. for _, m := range index.Manifests {
  389. m := m
  390. if m.MediaType == images.MediaTypeContainerd1Checkpoint {
  391. cpDesc = &m //nolint:gosec
  392. break
  393. }
  394. }
  395. if cpDesc == nil {
  396. return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
  397. }
  398. rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
  399. if err != nil {
  400. return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
  401. }
  402. defer rat.Close()
  403. _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
  404. if err != nil {
  405. return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
  406. }
  407. return err
  408. }
  409. // LoadContainer loads the containerd container.
  410. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  411. ctr, err := c.client.LoadContainer(ctx, id)
  412. if err != nil {
  413. if containerderrors.IsNotFound(err) {
  414. return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  415. }
  416. return nil, wrapError(err)
  417. }
  418. return &container{client: c, c8dCtr: ctr}, nil
  419. }
  420. func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
  421. t, err := c.c8dCtr.Task(ctx, nil)
  422. if err != nil {
  423. return nil, wrapError(err)
  424. }
  425. return c.newTask(t), nil
  426. }
  427. // createIO creates the io to be used by a process
  428. // This needs to get a pointer to interface as upon closure the process may not have yet been registered
  429. func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
  430. var (
  431. io *cio.DirectIO
  432. err error
  433. )
  434. io, err = c.client.newDirectIO(context.Background(), fifos)
  435. if err != nil {
  436. return nil, err
  437. }
  438. if io.Stdin != nil {
  439. var (
  440. err error
  441. stdinOnce sync.Once
  442. )
  443. pipe := io.Stdin
  444. io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
  445. stdinOnce.Do(func() {
  446. err = pipe.Close()
  447. // Do the rest in a new routine to avoid a deadlock if the
  448. // Exec/Start call failed.
  449. go func() {
  450. p, ok := <-stdinCloseSync
  451. if !ok {
  452. return
  453. }
  454. err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
  455. if err != nil && strings.Contains(err.Error(), "transport is closing") {
  456. err = nil
  457. }
  458. }()
  459. })
  460. return err
  461. })
  462. }
  463. rio, err := attachStdio(io)
  464. if err != nil {
  465. io.Cancel()
  466. io.Close()
  467. }
  468. return rio, err
  469. }
  470. func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
  471. c.eventQ.Append(ei.ContainerID, func() {
  472. err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
  473. if err != nil {
  474. c.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
  475. "container": ei.ContainerID,
  476. "event": et,
  477. "event-info": ei,
  478. }).Error("failed to process event")
  479. }
  480. })
  481. }
  482. func (c *client) waitServe(ctx context.Context) bool {
  483. t := 100 * time.Millisecond
  484. delay := time.NewTimer(t)
  485. if !delay.Stop() {
  486. <-delay.C
  487. }
  488. defer delay.Stop()
  489. // `IsServing` will actually block until the service is ready.
  490. // However it can return early, so we'll loop with a delay to handle it.
  491. for {
  492. serving, err := c.client.IsServing(ctx)
  493. if err != nil {
  494. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
  495. return false
  496. }
  497. logrus.WithError(err).Warn("Error while testing if containerd API is ready")
  498. }
  499. if serving {
  500. return true
  501. }
  502. delay.Reset(t)
  503. select {
  504. case <-ctx.Done():
  505. return false
  506. case <-delay.C:
  507. }
  508. }
  509. }
  510. func (c *client) processEventStream(ctx context.Context, ns string) {
  511. var (
  512. err error
  513. ev *events.Envelope
  514. et libcontainerdtypes.EventType
  515. ei libcontainerdtypes.EventInfo
  516. )
  517. // Create a new context specifically for this subscription.
  518. // The context must be cancelled to cancel the subscription.
  519. // In cases where we have to restart event stream processing,
  520. // we'll need the original context b/c this one will be cancelled
  521. subCtx, cancel := context.WithCancel(ctx)
  522. defer cancel()
  523. // Filter on both namespace *and* topic. To create an "and" filter,
  524. // this must be a single, comma-separated string
  525. eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
  526. c.logger.Debug("processing event stream")
  527. for {
  528. select {
  529. case err = <-errC:
  530. if err != nil {
  531. errStatus, ok := status.FromError(err)
  532. if !ok || errStatus.Code() != codes.Canceled {
  533. c.logger.WithError(err).Error("Failed to get event")
  534. c.logger.Info("Waiting for containerd to be ready to restart event processing")
  535. if c.waitServe(ctx) {
  536. go c.processEventStream(ctx, ns)
  537. return
  538. }
  539. }
  540. c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
  541. }
  542. return
  543. case ev = <-eventStream:
  544. if ev.Event == nil {
  545. c.logger.WithField("event", ev).Warn("invalid event")
  546. continue
  547. }
  548. v, err := typeurl.UnmarshalAny(ev.Event)
  549. if err != nil {
  550. c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
  551. continue
  552. }
  553. c.logger.WithField("topic", ev.Topic).Debug("event")
  554. switch t := v.(type) {
  555. case *apievents.TaskCreate:
  556. et = libcontainerdtypes.EventCreate
  557. ei = libcontainerdtypes.EventInfo{
  558. ContainerID: t.ContainerID,
  559. ProcessID: t.ContainerID,
  560. Pid: t.Pid,
  561. }
  562. case *apievents.TaskStart:
  563. et = libcontainerdtypes.EventStart
  564. ei = libcontainerdtypes.EventInfo{
  565. ContainerID: t.ContainerID,
  566. ProcessID: t.ContainerID,
  567. Pid: t.Pid,
  568. }
  569. case *apievents.TaskExit:
  570. et = libcontainerdtypes.EventExit
  571. ei = libcontainerdtypes.EventInfo{
  572. ContainerID: t.ContainerID,
  573. ProcessID: t.ID,
  574. Pid: t.Pid,
  575. ExitCode: t.ExitStatus,
  576. ExitedAt: t.ExitedAt,
  577. }
  578. case *apievents.TaskOOM:
  579. et = libcontainerdtypes.EventOOM
  580. ei = libcontainerdtypes.EventInfo{
  581. ContainerID: t.ContainerID,
  582. }
  583. case *apievents.TaskExecAdded:
  584. et = libcontainerdtypes.EventExecAdded
  585. ei = libcontainerdtypes.EventInfo{
  586. ContainerID: t.ContainerID,
  587. ProcessID: t.ExecID,
  588. }
  589. case *apievents.TaskExecStarted:
  590. et = libcontainerdtypes.EventExecStarted
  591. ei = libcontainerdtypes.EventInfo{
  592. ContainerID: t.ContainerID,
  593. ProcessID: t.ExecID,
  594. Pid: t.Pid,
  595. }
  596. case *apievents.TaskPaused:
  597. et = libcontainerdtypes.EventPaused
  598. ei = libcontainerdtypes.EventInfo{
  599. ContainerID: t.ContainerID,
  600. }
  601. case *apievents.TaskResumed:
  602. et = libcontainerdtypes.EventResumed
  603. ei = libcontainerdtypes.EventInfo{
  604. ContainerID: t.ContainerID,
  605. }
  606. case *apievents.TaskDelete:
  607. c.logger.WithFields(logrus.Fields{
  608. "topic": ev.Topic,
  609. "type": reflect.TypeOf(t),
  610. "container": t.ContainerID},
  611. ).Info("ignoring event")
  612. continue
  613. default:
  614. c.logger.WithFields(logrus.Fields{
  615. "topic": ev.Topic,
  616. "type": reflect.TypeOf(t)},
  617. ).Info("ignoring event")
  618. continue
  619. }
  620. c.processEvent(ctx, et, ei)
  621. }
  622. }
  623. }
  624. func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
  625. writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
  626. if err != nil {
  627. return nil, err
  628. }
  629. defer writer.Close()
  630. size, err := io.Copy(writer, r)
  631. if err != nil {
  632. return nil, err
  633. }
  634. labels := map[string]string{
  635. "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
  636. }
  637. if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
  638. return nil, err
  639. }
  640. return &types.Descriptor{
  641. MediaType: mediaType,
  642. Digest: writer.Digest(),
  643. Size_: size,
  644. }, nil
  645. }
  646. func (c *client) bundleDir(id string) string {
  647. return filepath.Join(c.stateDir, id)
  648. }
  649. func wrapError(err error) error {
  650. switch {
  651. case err == nil:
  652. return nil
  653. case containerderrors.IsNotFound(err):
  654. return errdefs.NotFound(err)
  655. }
  656. msg := err.Error()
  657. for _, s := range []string{"container does not exist", "not found", "no such container"} {
  658. if strings.Contains(msg, s) {
  659. return errdefs.NotFound(err)
  660. }
  661. }
  662. return err
  663. }