local_windows.go 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. package local // import "github.com/docker/docker/libcontainerd/local"
  2. // This package contains the legacy in-proc calls in HCS using the v1 schema
  3. // for Windows runtime purposes.
  4. import (
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "time"
  15. "github.com/Microsoft/hcsshim"
  16. "github.com/Microsoft/hcsshim/osversion"
  17. "github.com/containerd/containerd"
  18. "github.com/containerd/containerd/cio"
  19. containerderrdefs "github.com/containerd/containerd/errdefs"
  20. "github.com/docker/docker/errdefs"
  21. "github.com/docker/docker/libcontainerd/queue"
  22. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  23. "github.com/docker/docker/pkg/sysinfo"
  24. "github.com/docker/docker/pkg/system"
  25. specs "github.com/opencontainers/runtime-spec/specs-go"
  26. "github.com/pkg/errors"
  27. "github.com/sirupsen/logrus"
  28. "golang.org/x/sys/windows"
  29. )
  30. type process struct {
  31. id string
  32. pid int
  33. hcsProcess hcsshim.Process
  34. }
  35. type container struct {
  36. sync.Mutex
  37. // The ociSpec is required, as client.Create() needs a spec, but can
  38. // be called from the RestartManager context which does not otherwise
  39. // have access to the Spec
  40. ociSpec *specs.Spec
  41. hcsContainer hcsshim.Container
  42. id string
  43. status containerd.ProcessStatus
  44. exitedAt time.Time
  45. exitCode uint32
  46. waitCh chan struct{}
  47. init *process
  48. execs map[string]*process
  49. terminateInvoked bool
  50. }
  51. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  52. // container creator management stacks. We hard code "docker" in the case
  53. // of docker.
  54. const defaultOwner = "docker"
  55. type client struct {
  56. sync.Mutex
  57. stateDir string
  58. backend libcontainerdtypes.Backend
  59. logger *logrus.Entry
  60. eventQ queue.Queue
  61. containers map[string]*container
  62. }
  63. // NewClient creates a new local executor for windows
  64. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  65. c := &client{
  66. stateDir: stateDir,
  67. backend: b,
  68. logger: logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
  69. containers: make(map[string]*container),
  70. }
  71. return c, nil
  72. }
  73. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  74. return containerd.Version{}, errors.New("not implemented on Windows")
  75. }
  76. // Create is the entrypoint to create a container from a spec.
  77. // Table below shows the fields required for HCS JSON calling parameters,
  78. // where if not populated, is omitted.
  79. // +-----------------+--------------------------------------------+---------------------------------------------------+
  80. // | | Isolation=Process | Isolation=Hyper-V |
  81. // +-----------------+--------------------------------------------+---------------------------------------------------+
  82. // | VolumePath | \\?\\Volume{GUIDa} | |
  83. // | LayerFolderPath | %root%\windowsfilter\containerID | |
  84. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  85. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  86. // +-----------------+--------------------------------------------+---------------------------------------------------+
  87. //
  88. // Isolation=Process example:
  89. //
  90. // {
  91. // "SystemType": "Container",
  92. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  93. // "Owner": "docker",
  94. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  95. // "IgnoreFlushesDuringBoot": true,
  96. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  97. // "Layers": [{
  98. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  99. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  100. // }],
  101. // "HostName": "5e0055c814a6",
  102. // "MappedDirectories": [],
  103. // "HvPartition": false,
  104. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  105. // }
  106. //
  107. // Isolation=Hyper-V example:
  108. //
  109. // {
  110. // "SystemType": "Container",
  111. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  112. // "Owner": "docker",
  113. // "IgnoreFlushesDuringBoot": true,
  114. // "Layers": [{
  115. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  116. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  117. // }],
  118. // "HostName": "475c2c58933b",
  119. // "MappedDirectories": [],
  120. // "HvPartition": true,
  121. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  122. // "DNSSearchList": "a.com,b.com,c.com",
  123. // "HvRuntime": {
  124. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  125. // },
  126. // }
  127. func (c *client) Create(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
  128. if ctr := c.getContainer(id); ctr != nil {
  129. return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  130. }
  131. var err error
  132. if spec.Linux != nil {
  133. return errors.New("linux containers are not supported on this platform")
  134. }
  135. err = c.createWindows(id, spec, runtimeOptions)
  136. if err == nil {
  137. c.eventQ.Append(id, func() {
  138. ei := libcontainerdtypes.EventInfo{
  139. ContainerID: id,
  140. }
  141. c.logger.WithFields(logrus.Fields{
  142. "container": id,
  143. "event": libcontainerdtypes.EventCreate,
  144. }).Info("sending event")
  145. err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
  146. if err != nil {
  147. c.logger.WithError(err).WithFields(logrus.Fields{
  148. "container": id,
  149. "event": libcontainerdtypes.EventCreate,
  150. }).Error("failed to process event")
  151. }
  152. })
  153. }
  154. return err
  155. }
  156. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  157. logger := c.logger.WithField("container", id)
  158. configuration := &hcsshim.ContainerConfig{
  159. SystemType: "Container",
  160. Name: id,
  161. Owner: defaultOwner,
  162. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  163. HostName: spec.Hostname,
  164. HvPartition: false,
  165. }
  166. c.extractResourcesFromSpec(spec, configuration)
  167. if spec.Windows.Resources != nil {
  168. if spec.Windows.Resources.Storage != nil {
  169. if spec.Windows.Resources.Storage.Bps != nil {
  170. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  171. }
  172. if spec.Windows.Resources.Storage.Iops != nil {
  173. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  174. }
  175. }
  176. }
  177. if spec.Windows.HyperV != nil {
  178. configuration.HvPartition = true
  179. }
  180. if spec.Windows.Network != nil {
  181. configuration.EndpointList = spec.Windows.Network.EndpointList
  182. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  183. if spec.Windows.Network.DNSSearchList != nil {
  184. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  185. }
  186. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  187. }
  188. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  189. configuration.Credentials = cs
  190. }
  191. // We must have least two layers in the spec, the bottom one being a
  192. // base image, the top one being the RW layer.
  193. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  194. return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  195. }
  196. // Strip off the top-most layer as that's passed in separately to HCS
  197. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  198. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  199. if configuration.HvPartition {
  200. // We don't currently support setting the utility VM image explicitly.
  201. // TODO circa RS5, this may be re-locatable.
  202. if spec.Windows.HyperV.UtilityVMPath != "" {
  203. return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  204. }
  205. // Find the upper-most utility VM image.
  206. var uvmImagePath string
  207. for _, path := range layerFolders {
  208. fullPath := filepath.Join(path, "UtilityVM")
  209. _, err := os.Stat(fullPath)
  210. if err == nil {
  211. uvmImagePath = fullPath
  212. break
  213. }
  214. if !os.IsNotExist(err) {
  215. return err
  216. }
  217. }
  218. if uvmImagePath == "" {
  219. return errors.New("utility VM image could not be found")
  220. }
  221. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  222. if spec.Root.Path != "" {
  223. return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  224. }
  225. } else {
  226. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  227. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  228. return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  229. }
  230. // HCS API requires the trailing backslash to be removed
  231. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  232. }
  233. if spec.Root.Readonly {
  234. return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  235. }
  236. for _, layerPath := range layerFolders {
  237. _, filename := filepath.Split(layerPath)
  238. g, err := hcsshim.NameToGuid(filename)
  239. if err != nil {
  240. return err
  241. }
  242. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  243. ID: g.ToString(),
  244. Path: layerPath,
  245. })
  246. }
  247. // Add the mounts (volumes, bind mounts etc) to the structure
  248. var mds []hcsshim.MappedDir
  249. var mps []hcsshim.MappedPipe
  250. for _, mount := range spec.Mounts {
  251. const pipePrefix = `\\.\pipe\`
  252. if mount.Type != "" {
  253. return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  254. }
  255. if strings.HasPrefix(mount.Destination, pipePrefix) {
  256. mp := hcsshim.MappedPipe{
  257. HostPath: mount.Source,
  258. ContainerPipeName: mount.Destination[len(pipePrefix):],
  259. }
  260. mps = append(mps, mp)
  261. } else {
  262. md := hcsshim.MappedDir{
  263. HostPath: mount.Source,
  264. ContainerPath: mount.Destination,
  265. ReadOnly: false,
  266. }
  267. for _, o := range mount.Options {
  268. if strings.ToLower(o) == "ro" {
  269. md.ReadOnly = true
  270. }
  271. }
  272. mds = append(mds, md)
  273. }
  274. }
  275. configuration.MappedDirectories = mds
  276. if len(mps) > 0 && osversion.Build() < osversion.RS3 {
  277. return errors.New("named pipe mounts are not supported on this version of Windows")
  278. }
  279. configuration.MappedPipes = mps
  280. if len(spec.Windows.Devices) > 0 {
  281. // Add any device assignments
  282. if configuration.HvPartition {
  283. return errors.New("device assignment is not supported for HyperV containers")
  284. }
  285. if osversion.Build() < osversion.RS5 {
  286. return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
  287. }
  288. for _, d := range spec.Windows.Devices {
  289. configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
  290. }
  291. }
  292. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  293. if err != nil {
  294. return err
  295. }
  296. // Construct a container object for calling start on it.
  297. ctr := &container{
  298. id: id,
  299. execs: make(map[string]*process),
  300. ociSpec: spec,
  301. hcsContainer: hcsContainer,
  302. status: containerd.Created,
  303. waitCh: make(chan struct{}),
  304. }
  305. logger.Debug("starting container")
  306. if err = hcsContainer.Start(); err != nil {
  307. c.logger.WithError(err).Error("failed to start container")
  308. ctr.Lock()
  309. if err := c.terminateContainer(ctr); err != nil {
  310. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  311. } else {
  312. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  313. }
  314. ctr.Unlock()
  315. return err
  316. }
  317. c.Lock()
  318. c.containers[id] = ctr
  319. c.Unlock()
  320. logger.Debug("createWindows() completed successfully")
  321. return nil
  322. }
  323. func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
  324. if spec.Windows.Resources != nil {
  325. if spec.Windows.Resources.CPU != nil {
  326. if spec.Windows.Resources.CPU.Count != nil {
  327. // This check is being done here rather than in adaptContainerSettings
  328. // because we don't want to update the HostConfig in case this container
  329. // is moved to a host with more CPUs than this one.
  330. cpuCount := *spec.Windows.Resources.CPU.Count
  331. hostCPUCount := uint64(sysinfo.NumCPU())
  332. if cpuCount > hostCPUCount {
  333. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  334. cpuCount = hostCPUCount
  335. }
  336. configuration.ProcessorCount = uint32(cpuCount)
  337. }
  338. if spec.Windows.Resources.CPU.Shares != nil {
  339. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  340. }
  341. if spec.Windows.Resources.CPU.Maximum != nil {
  342. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  343. }
  344. }
  345. if spec.Windows.Resources.Memory != nil {
  346. if spec.Windows.Resources.Memory.Limit != nil {
  347. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  348. }
  349. }
  350. }
  351. }
  352. func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
  353. ctr := c.getContainer(id)
  354. switch {
  355. case ctr == nil:
  356. return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  357. case ctr.init != nil:
  358. return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
  359. }
  360. logger := c.logger.WithField("container", id)
  361. // Note we always tell HCS to create stdout as it's required
  362. // regardless of '-i' or '-t' options, so that docker can always grab
  363. // the output through logs. We also tell HCS to always create stdin,
  364. // even if it's not used - it will be closed shortly. Stderr is only
  365. // created if it we're not -t.
  366. var (
  367. emulateConsole bool
  368. createStdErrPipe bool
  369. )
  370. if ctr.ociSpec.Process != nil {
  371. emulateConsole = ctr.ociSpec.Process.Terminal
  372. createStdErrPipe = !ctr.ociSpec.Process.Terminal
  373. }
  374. createProcessParms := &hcsshim.ProcessConfig{
  375. EmulateConsole: emulateConsole,
  376. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  377. CreateStdInPipe: true,
  378. CreateStdOutPipe: true,
  379. CreateStdErrPipe: createStdErrPipe,
  380. }
  381. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  382. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  383. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  384. }
  385. // Configure the environment for the process
  386. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  387. // Configure the CommandLine/CommandArgs
  388. setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
  389. logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
  390. createProcessParms.User = ctr.ociSpec.Process.User.Username
  391. ctr.Lock()
  392. // Start the command running in the container.
  393. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  394. if err != nil {
  395. logger.WithError(err).Error("CreateProcess() failed")
  396. // Fix for https://github.com/moby/moby/issues/38719.
  397. // If the init process failed to launch, we still need to reap the
  398. // container to avoid leaking it.
  399. //
  400. // Note we use the explicit exit code of 127 which is the
  401. // Linux shell equivalent of "command not found". Windows cannot
  402. // know ahead of time whether or not the command exists, especially
  403. // in the case of Hyper-V containers.
  404. ctr.Unlock()
  405. exitedAt := time.Now()
  406. p := &process{
  407. id: libcontainerdtypes.InitProcessName,
  408. pid: 0,
  409. }
  410. c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
  411. return -1, err
  412. }
  413. defer ctr.Unlock()
  414. defer func() {
  415. if err != nil {
  416. if err := newProcess.Kill(); err != nil {
  417. logger.WithError(err).Error("failed to kill process")
  418. }
  419. go func() {
  420. if err := newProcess.Wait(); err != nil {
  421. logger.WithError(err).Error("failed to wait for process")
  422. }
  423. if err := newProcess.Close(); err != nil {
  424. logger.WithError(err).Error("failed to clean process resources")
  425. }
  426. }()
  427. }
  428. }()
  429. p := &process{
  430. hcsProcess: newProcess,
  431. id: libcontainerdtypes.InitProcessName,
  432. pid: newProcess.Pid(),
  433. }
  434. logger.WithField("pid", p.pid).Debug("init process started")
  435. ctr.status = containerd.Running
  436. ctr.init = p
  437. // Spin up a go routine waiting for exit to handle cleanup
  438. go c.reapProcess(ctr, p)
  439. // Don't shadow err here due to our deferred clean-up.
  440. var dio *cio.DirectIO
  441. dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
  442. if err != nil {
  443. logger.WithError(err).Error("failed to get stdio pipes")
  444. return -1, err
  445. }
  446. _, err = attachStdio(dio)
  447. if err != nil {
  448. logger.WithError(err).Error("failed to attach stdio")
  449. return -1, err
  450. }
  451. // Generate the associated event
  452. c.eventQ.Append(id, func() {
  453. ei := libcontainerdtypes.EventInfo{
  454. ContainerID: id,
  455. ProcessID: libcontainerdtypes.InitProcessName,
  456. Pid: uint32(p.pid),
  457. }
  458. c.logger.WithFields(logrus.Fields{
  459. "container": ctr.id,
  460. "event": libcontainerdtypes.EventStart,
  461. "event-info": ei,
  462. }).Info("sending event")
  463. err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
  464. if err != nil {
  465. c.logger.WithError(err).WithFields(logrus.Fields{
  466. "container": id,
  467. "event": libcontainerdtypes.EventStart,
  468. "event-info": ei,
  469. }).Error("failed to process event")
  470. }
  471. })
  472. logger.Debug("start() completed")
  473. return p.pid, nil
  474. }
  475. // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
  476. func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
  477. if process.CommandLine != "" {
  478. createProcessParms.CommandLine = process.CommandLine
  479. } else {
  480. createProcessParms.CommandLine = system.EscapeArgs(process.Args)
  481. }
  482. }
  483. func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
  484. stdin, stdout, stderr, err := newProcess.Stdio()
  485. if err != nil {
  486. return nil, err
  487. }
  488. dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
  489. // Convert io.ReadClosers to io.Readers
  490. if stdout != nil {
  491. dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
  492. }
  493. if stderr != nil {
  494. dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
  495. }
  496. return dio, nil
  497. }
  498. // Exec adds a process in an running container
  499. func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
  500. ctr := c.getContainer(containerID)
  501. switch {
  502. case ctr == nil:
  503. return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  504. case ctr.hcsContainer == nil:
  505. return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
  506. case ctr.execs != nil && ctr.execs[processID] != nil:
  507. return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  508. }
  509. logger := c.logger.WithFields(logrus.Fields{
  510. "container": containerID,
  511. "exec": processID,
  512. })
  513. // Note we always tell HCS to
  514. // create stdout as it's required regardless of '-i' or '-t' options, so that
  515. // docker can always grab the output through logs. We also tell HCS to always
  516. // create stdin, even if it's not used - it will be closed shortly. Stderr
  517. // is only created if it we're not -t.
  518. createProcessParms := &hcsshim.ProcessConfig{
  519. CreateStdInPipe: true,
  520. CreateStdOutPipe: true,
  521. CreateStdErrPipe: !spec.Terminal,
  522. }
  523. if spec.Terminal {
  524. createProcessParms.EmulateConsole = true
  525. if spec.ConsoleSize != nil {
  526. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  527. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  528. }
  529. }
  530. // Take working directory from the process to add if it is defined,
  531. // otherwise take from the first process.
  532. if spec.Cwd != "" {
  533. createProcessParms.WorkingDirectory = spec.Cwd
  534. } else {
  535. createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
  536. }
  537. // Configure the environment for the process
  538. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  539. // Configure the CommandLine/CommandArgs
  540. setCommandLineAndArgs(spec, createProcessParms)
  541. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  542. createProcessParms.User = spec.User.Username
  543. // Start the command running in the container.
  544. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  545. if err != nil {
  546. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  547. return -1, err
  548. }
  549. pid := newProcess.Pid()
  550. defer func() {
  551. if err != nil {
  552. if err := newProcess.Kill(); err != nil {
  553. logger.WithError(err).Error("failed to kill process")
  554. }
  555. go func() {
  556. if err := newProcess.Wait(); err != nil {
  557. logger.WithError(err).Error("failed to wait for process")
  558. }
  559. if err := newProcess.Close(); err != nil {
  560. logger.WithError(err).Error("failed to clean process resources")
  561. }
  562. }()
  563. }
  564. }()
  565. dio, err := newIOFromProcess(newProcess, spec.Terminal)
  566. if err != nil {
  567. logger.WithError(err).Error("failed to get stdio pipes")
  568. return -1, err
  569. }
  570. // Tell the engine to attach streams back to the client
  571. _, err = attachStdio(dio)
  572. if err != nil {
  573. return -1, err
  574. }
  575. p := &process{
  576. id: processID,
  577. pid: pid,
  578. hcsProcess: newProcess,
  579. }
  580. // Add the process to the container's list of processes
  581. ctr.Lock()
  582. ctr.execs[processID] = p
  583. ctr.Unlock()
  584. // Spin up a go routine waiting for exit to handle cleanup
  585. go c.reapProcess(ctr, p)
  586. c.eventQ.Append(ctr.id, func() {
  587. ei := libcontainerdtypes.EventInfo{
  588. ContainerID: ctr.id,
  589. ProcessID: p.id,
  590. Pid: uint32(p.pid),
  591. }
  592. c.logger.WithFields(logrus.Fields{
  593. "container": ctr.id,
  594. "event": libcontainerdtypes.EventExecAdded,
  595. "event-info": ei,
  596. }).Info("sending event")
  597. err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
  598. if err != nil {
  599. c.logger.WithError(err).WithFields(logrus.Fields{
  600. "container": ctr.id,
  601. "event": libcontainerdtypes.EventExecAdded,
  602. "event-info": ei,
  603. }).Error("failed to process event")
  604. }
  605. err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
  606. if err != nil {
  607. c.logger.WithError(err).WithFields(logrus.Fields{
  608. "container": ctr.id,
  609. "event": libcontainerdtypes.EventExecStarted,
  610. "event-info": ei,
  611. }).Error("failed to process event")
  612. }
  613. })
  614. return pid, nil
  615. }
  616. // SignalProcess handles `docker stop` on Windows. While Linux has support for
  617. // the full range of signals, signals aren't really implemented on Windows.
  618. // We fake supporting regular stop and -9 to force kill.
  619. func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
  620. ctr, p, err := c.getProcess(containerID, processID)
  621. if err != nil {
  622. return err
  623. }
  624. logger := c.logger.WithFields(logrus.Fields{
  625. "container": containerID,
  626. "process": processID,
  627. "pid": p.pid,
  628. "signal": signal,
  629. })
  630. logger.Debug("Signal()")
  631. if processID == libcontainerdtypes.InitProcessName {
  632. if syscall.Signal(signal) == syscall.SIGKILL {
  633. // Terminate the compute system
  634. ctr.Lock()
  635. ctr.terminateInvoked = true
  636. if err := ctr.hcsContainer.Terminate(); err != nil {
  637. if !hcsshim.IsPending(err) {
  638. logger.WithError(err).Error("failed to terminate hccshim container")
  639. }
  640. }
  641. ctr.Unlock()
  642. } else {
  643. // Shut down the container
  644. if err := ctr.hcsContainer.Shutdown(); err != nil {
  645. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  646. // ignore errors
  647. logger.WithError(err).Error("failed to shutdown hccshim container")
  648. }
  649. }
  650. }
  651. } else {
  652. return p.hcsProcess.Kill()
  653. }
  654. return nil
  655. }
  656. // ResizeTerminal handles a CLI event to resize an interactive docker run or docker
  657. // exec window.
  658. func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
  659. _, p, err := c.getProcess(containerID, processID)
  660. if err != nil {
  661. return err
  662. }
  663. c.logger.WithFields(logrus.Fields{
  664. "container": containerID,
  665. "process": processID,
  666. "height": height,
  667. "width": width,
  668. "pid": p.pid,
  669. }).Debug("resizing")
  670. return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
  671. }
  672. func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
  673. _, p, err := c.getProcess(containerID, processID)
  674. if err != nil {
  675. return err
  676. }
  677. return p.hcsProcess.CloseStdin()
  678. }
  679. // Pause handles pause requests for containers
  680. func (c *client) Pause(_ context.Context, containerID string) error {
  681. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  682. if err != nil {
  683. return err
  684. }
  685. if ctr.ociSpec.Windows.HyperV == nil {
  686. return containerderrdefs.ErrNotImplemented
  687. }
  688. ctr.Lock()
  689. defer ctr.Unlock()
  690. if err = ctr.hcsContainer.Pause(); err != nil {
  691. return err
  692. }
  693. ctr.status = containerd.Paused
  694. c.eventQ.Append(containerID, func() {
  695. err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  696. ContainerID: containerID,
  697. ProcessID: libcontainerdtypes.InitProcessName,
  698. })
  699. c.logger.WithFields(logrus.Fields{
  700. "container": ctr.id,
  701. "event": libcontainerdtypes.EventPaused,
  702. }).Info("sending event")
  703. if err != nil {
  704. c.logger.WithError(err).WithFields(logrus.Fields{
  705. "container": containerID,
  706. "event": libcontainerdtypes.EventPaused,
  707. }).Error("failed to process event")
  708. }
  709. })
  710. return nil
  711. }
  712. // Resume handles resume requests for containers
  713. func (c *client) Resume(_ context.Context, containerID string) error {
  714. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  715. if err != nil {
  716. return err
  717. }
  718. if ctr.ociSpec.Windows.HyperV == nil {
  719. return errors.New("cannot resume Windows Server Containers")
  720. }
  721. ctr.Lock()
  722. defer ctr.Unlock()
  723. if err = ctr.hcsContainer.Resume(); err != nil {
  724. return err
  725. }
  726. ctr.status = containerd.Running
  727. c.eventQ.Append(containerID, func() {
  728. err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  729. ContainerID: containerID,
  730. ProcessID: libcontainerdtypes.InitProcessName,
  731. })
  732. c.logger.WithFields(logrus.Fields{
  733. "container": ctr.id,
  734. "event": libcontainerdtypes.EventResumed,
  735. }).Info("sending event")
  736. if err != nil {
  737. c.logger.WithError(err).WithFields(logrus.Fields{
  738. "container": containerID,
  739. "event": libcontainerdtypes.EventResumed,
  740. }).Error("failed to process event")
  741. }
  742. })
  743. return nil
  744. }
  745. // Stats handles stats requests for containers
  746. func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
  747. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  748. if err != nil {
  749. return nil, err
  750. }
  751. readAt := time.Now()
  752. s, err := ctr.hcsContainer.Statistics()
  753. if err != nil {
  754. return nil, err
  755. }
  756. return &libcontainerdtypes.Stats{
  757. Read: readAt,
  758. HCSStats: &s,
  759. }, nil
  760. }
  761. // Restore is the handler for restoring a container
  762. func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
  763. c.logger.WithField("container", id).Debug("restore()")
  764. // TODO Windows: On RS1, a re-attach isn't possible.
  765. // However, there is a scenario in which there is an issue.
  766. // Consider a background container. The daemon dies unexpectedly.
  767. // HCS will still have the compute service alive and running.
  768. // For consistence, we call in to shoot it regardless if HCS knows about it
  769. // We explicitly just log a warning if the terminate fails.
  770. // Then we tell the backend the container exited.
  771. if hc, err := hcsshim.OpenContainer(id); err == nil {
  772. const terminateTimeout = time.Minute * 2
  773. err := hc.Terminate()
  774. if hcsshim.IsPending(err) {
  775. err = hc.WaitTimeout(terminateTimeout)
  776. } else if hcsshim.IsAlreadyStopped(err) {
  777. err = nil
  778. }
  779. if err != nil {
  780. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  781. return false, -1, nil, err
  782. }
  783. }
  784. return false, -1, &restoredProcess{
  785. c: c,
  786. id: id,
  787. }, nil
  788. }
  789. // ListPids returns a list of process IDs running in a container. It is not
  790. // implemented on Windows.
  791. func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  792. return nil, errors.New("not implemented on Windows")
  793. }
  794. // Summary returns a summary of the processes running in a container.
  795. // This is present in Windows to support docker top. In linux, the
  796. // engine shells out to ps to get process information. On Windows, as
  797. // the containers could be Hyper-V containers, they would not be
  798. // visible on the container host. However, libcontainerd does have
  799. // that information.
  800. func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
  801. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  802. if err != nil {
  803. return nil, err
  804. }
  805. p, err := ctr.hcsContainer.ProcessList()
  806. if err != nil {
  807. return nil, err
  808. }
  809. pl := make([]libcontainerdtypes.Summary, len(p))
  810. for i := range p {
  811. pl[i] = libcontainerdtypes.Summary{
  812. ImageName: p[i].ImageName,
  813. CreatedAt: p[i].CreateTimestamp,
  814. KernelTime_100Ns: p[i].KernelTime100ns,
  815. MemoryCommitBytes: p[i].MemoryCommitBytes,
  816. MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  817. MemoryWorkingSetSharedBytes: p[i].MemoryWorkingSetSharedBytes,
  818. ProcessID: p[i].ProcessId,
  819. UserTime_100Ns: p[i].UserTime100ns,
  820. ExecID: "",
  821. }
  822. }
  823. return pl, nil
  824. }
  825. type restoredProcess struct {
  826. id string
  827. c *client
  828. }
  829. func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
  830. return p.c.DeleteTask(ctx, p.id)
  831. }
  832. func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  833. ec := -1
  834. ctr := c.getContainer(containerID)
  835. if ctr == nil {
  836. return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  837. }
  838. select {
  839. case <-ctx.Done():
  840. return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  841. case <-ctr.waitCh:
  842. default:
  843. return uint32(ec), time.Now(), errors.New("container is not stopped")
  844. }
  845. ctr.Lock()
  846. defer ctr.Unlock()
  847. return ctr.exitCode, ctr.exitedAt, nil
  848. }
  849. func (c *client) Delete(_ context.Context, containerID string) error {
  850. c.Lock()
  851. defer c.Unlock()
  852. ctr := c.containers[containerID]
  853. if ctr == nil {
  854. return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  855. }
  856. ctr.Lock()
  857. defer ctr.Unlock()
  858. switch ctr.status {
  859. case containerd.Created:
  860. if err := c.shutdownContainer(ctr); err != nil {
  861. return err
  862. }
  863. fallthrough
  864. case containerd.Stopped:
  865. delete(c.containers, containerID)
  866. return nil
  867. }
  868. return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
  869. }
  870. func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
  871. c.Lock()
  872. defer c.Unlock()
  873. ctr := c.containers[containerID]
  874. if ctr == nil {
  875. return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  876. }
  877. ctr.Lock()
  878. defer ctr.Unlock()
  879. return ctr.status, nil
  880. }
  881. func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
  882. // Updating resource isn't supported on Windows
  883. // but we should return nil for enabling updating container
  884. return nil
  885. }
  886. func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  887. return errors.New("Windows: Containers do not support checkpoints")
  888. }
  889. func (c *client) getContainer(id string) *container {
  890. c.Lock()
  891. ctr := c.containers[id]
  892. c.Unlock()
  893. return ctr
  894. }
  895. func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  896. ctr := c.getContainer(containerID)
  897. switch {
  898. case ctr == nil:
  899. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  900. case ctr.init == nil:
  901. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  902. case processID == libcontainerdtypes.InitProcessName:
  903. return ctr, ctr.init, nil
  904. default:
  905. ctr.Lock()
  906. defer ctr.Unlock()
  907. if ctr.execs == nil {
  908. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  909. }
  910. }
  911. p := ctr.execs[processID]
  912. if p == nil {
  913. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  914. }
  915. return ctr, p, nil
  916. }
  917. // ctr mutex must be held when calling this function.
  918. func (c *client) shutdownContainer(ctr *container) error {
  919. var err error
  920. const waitTimeout = time.Minute * 5
  921. if !ctr.terminateInvoked {
  922. err = ctr.hcsContainer.Shutdown()
  923. }
  924. if hcsshim.IsPending(err) || ctr.terminateInvoked {
  925. err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  926. } else if hcsshim.IsAlreadyStopped(err) {
  927. err = nil
  928. }
  929. if err != nil {
  930. c.logger.WithError(err).WithField("container", ctr.id).
  931. Debug("failed to shutdown container, terminating it")
  932. terminateErr := c.terminateContainer(ctr)
  933. if terminateErr != nil {
  934. c.logger.WithError(terminateErr).WithField("container", ctr.id).
  935. Error("failed to shutdown container, and subsequent terminate also failed")
  936. return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  937. }
  938. return err
  939. }
  940. return nil
  941. }
  942. // ctr mutex must be held when calling this function.
  943. func (c *client) terminateContainer(ctr *container) error {
  944. const terminateTimeout = time.Minute * 5
  945. ctr.terminateInvoked = true
  946. err := ctr.hcsContainer.Terminate()
  947. if hcsshim.IsPending(err) {
  948. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  949. } else if hcsshim.IsAlreadyStopped(err) {
  950. err = nil
  951. }
  952. if err != nil {
  953. c.logger.WithError(err).WithField("container", ctr.id).
  954. Debug("failed to terminate container")
  955. return err
  956. }
  957. return nil
  958. }
  959. func (c *client) reapProcess(ctr *container, p *process) int {
  960. logger := c.logger.WithFields(logrus.Fields{
  961. "container": ctr.id,
  962. "process": p.id,
  963. })
  964. var eventErr error
  965. // Block indefinitely for the process to exit.
  966. if err := p.hcsProcess.Wait(); err != nil {
  967. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  968. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  969. }
  970. // Fall through here, do not return. This ensures we attempt to
  971. // continue the shutdown in HCS and tell the docker engine that the
  972. // process/container has exited to avoid a container being dropped on
  973. // the floor.
  974. }
  975. exitedAt := time.Now()
  976. exitCode, err := p.hcsProcess.ExitCode()
  977. if err != nil {
  978. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  979. logger.WithError(err).Warnf("unable to get exit code for process")
  980. }
  981. // Since we got an error retrieving the exit code, make sure that the
  982. // code we return doesn't incorrectly indicate success.
  983. exitCode = -1
  984. // Fall through here, do not return. This ensures we attempt to
  985. // continue the shutdown in HCS and tell the docker engine that the
  986. // process/container has exited to avoid a container being dropped on
  987. // the floor.
  988. }
  989. if err := p.hcsProcess.Close(); err != nil {
  990. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  991. exitCode = -1
  992. eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  993. }
  994. if p.id == libcontainerdtypes.InitProcessName {
  995. exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  996. }
  997. c.eventQ.Append(ctr.id, func() {
  998. ei := libcontainerdtypes.EventInfo{
  999. ContainerID: ctr.id,
  1000. ProcessID: p.id,
  1001. Pid: uint32(p.pid),
  1002. ExitCode: uint32(exitCode),
  1003. ExitedAt: exitedAt,
  1004. Error: eventErr,
  1005. }
  1006. c.logger.WithFields(logrus.Fields{
  1007. "container": ctr.id,
  1008. "event": libcontainerdtypes.EventExit,
  1009. "event-info": ei,
  1010. }).Info("sending event")
  1011. err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1012. if err != nil {
  1013. c.logger.WithError(err).WithFields(logrus.Fields{
  1014. "container": ctr.id,
  1015. "event": libcontainerdtypes.EventExit,
  1016. "event-info": ei,
  1017. }).Error("failed to process event")
  1018. }
  1019. if p.id != libcontainerdtypes.InitProcessName {
  1020. ctr.Lock()
  1021. delete(ctr.execs, p.id)
  1022. ctr.Unlock()
  1023. }
  1024. })
  1025. return exitCode
  1026. }
  1027. // reapContainer shuts down the container and releases associated resources. It returns
  1028. // the error to be logged in the eventInfo sent back to the monitor.
  1029. func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1030. // Update container status
  1031. ctr.Lock()
  1032. ctr.status = containerd.Stopped
  1033. ctr.exitedAt = exitedAt
  1034. ctr.exitCode = uint32(exitCode)
  1035. close(ctr.waitCh)
  1036. if err := c.shutdownContainer(ctr); err != nil {
  1037. exitCode = -1
  1038. logger.WithError(err).Warn("failed to shutdown container")
  1039. thisErr := errors.Wrap(err, "failed to shutdown container")
  1040. if eventErr != nil {
  1041. eventErr = errors.Wrap(eventErr, thisErr.Error())
  1042. } else {
  1043. eventErr = thisErr
  1044. }
  1045. } else {
  1046. logger.Debug("completed container shutdown")
  1047. }
  1048. ctr.Unlock()
  1049. if err := ctr.hcsContainer.Close(); err != nil {
  1050. exitCode = -1
  1051. logger.WithError(err).Error("failed to clean hcs container resources")
  1052. thisErr := errors.Wrap(err, "failed to terminate container")
  1053. if eventErr != nil {
  1054. eventErr = errors.Wrap(eventErr, thisErr.Error())
  1055. } else {
  1056. eventErr = thisErr
  1057. }
  1058. }
  1059. return exitCode, eventErr
  1060. }