local_windows.go 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. package local // import "github.com/docker/docker/libcontainerd/local"
  2. // This package contains the legacy in-proc calls in HCS using the v1 schema
  3. // for Windows runtime purposes.
  4. import (
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "time"
  15. "github.com/Microsoft/hcsshim"
  16. "github.com/containerd/containerd"
  17. "github.com/containerd/containerd/cio"
  18. containerderrdefs "github.com/containerd/containerd/errdefs"
  19. "github.com/docker/docker/errdefs"
  20. "github.com/docker/docker/libcontainerd/queue"
  21. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  22. "github.com/docker/docker/pkg/sysinfo"
  23. "github.com/docker/docker/pkg/system"
  24. specs "github.com/opencontainers/runtime-spec/specs-go"
  25. "github.com/pkg/errors"
  26. "github.com/sirupsen/logrus"
  27. "golang.org/x/sys/windows"
  28. )
  29. type process struct {
  30. id string
  31. pid int
  32. hcsProcess hcsshim.Process
  33. }
  34. type container struct {
  35. sync.Mutex
  36. // The ociSpec is required, as client.Create() needs a spec, but can
  37. // be called from the RestartManager context which does not otherwise
  38. // have access to the Spec
  39. ociSpec *specs.Spec
  40. hcsContainer hcsshim.Container
  41. id string
  42. status containerd.ProcessStatus
  43. exitedAt time.Time
  44. exitCode uint32
  45. waitCh chan struct{}
  46. init *process
  47. execs map[string]*process
  48. terminateInvoked bool
  49. }
  50. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  51. // container creator management stacks. We hard code "docker" in the case
  52. // of docker.
  53. const defaultOwner = "docker"
  54. type client struct {
  55. sync.Mutex
  56. stateDir string
  57. backend libcontainerdtypes.Backend
  58. logger *logrus.Entry
  59. eventQ queue.Queue
  60. containers map[string]*container
  61. }
  62. // NewClient creates a new local executor for windows
  63. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  64. c := &client{
  65. stateDir: stateDir,
  66. backend: b,
  67. logger: logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
  68. containers: make(map[string]*container),
  69. }
  70. return c, nil
  71. }
  72. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  73. return containerd.Version{}, errors.New("not implemented on Windows")
  74. }
  75. // Create is the entrypoint to create a container from a spec.
  76. // Table below shows the fields required for HCS JSON calling parameters,
  77. // where if not populated, is omitted.
  78. // +-----------------+--------------------------------------------+---------------------------------------------------+
  79. // | | Isolation=Process | Isolation=Hyper-V |
  80. // +-----------------+--------------------------------------------+---------------------------------------------------+
  81. // | VolumePath | \\?\\Volume{GUIDa} | |
  82. // | LayerFolderPath | %root%\windowsfilter\containerID | |
  83. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  84. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  85. // +-----------------+--------------------------------------------+---------------------------------------------------+
  86. //
  87. // Isolation=Process example:
  88. //
  89. // {
  90. // "SystemType": "Container",
  91. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  92. // "Owner": "docker",
  93. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  94. // "IgnoreFlushesDuringBoot": true,
  95. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  96. // "Layers": [{
  97. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  98. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  99. // }],
  100. // "HostName": "5e0055c814a6",
  101. // "MappedDirectories": [],
  102. // "HvPartition": false,
  103. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  104. // }
  105. //
  106. // Isolation=Hyper-V example:
  107. //
  108. // {
  109. // "SystemType": "Container",
  110. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  111. // "Owner": "docker",
  112. // "IgnoreFlushesDuringBoot": true,
  113. // "Layers": [{
  114. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  115. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  116. // }],
  117. // "HostName": "475c2c58933b",
  118. // "MappedDirectories": [],
  119. // "HvPartition": true,
  120. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  121. // "DNSSearchList": "a.com,b.com,c.com",
  122. // "HvRuntime": {
  123. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  124. // },
  125. // }
  126. func (c *client) Create(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
  127. if ctr := c.getContainer(id); ctr != nil {
  128. return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  129. }
  130. var err error
  131. if spec.Linux != nil {
  132. return errors.New("linux containers are not supported on this platform")
  133. }
  134. err = c.createWindows(id, spec, runtimeOptions)
  135. if err == nil {
  136. c.eventQ.Append(id, func() {
  137. ei := libcontainerdtypes.EventInfo{
  138. ContainerID: id,
  139. }
  140. c.logger.WithFields(logrus.Fields{
  141. "container": id,
  142. "event": libcontainerdtypes.EventCreate,
  143. }).Info("sending event")
  144. err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
  145. if err != nil {
  146. c.logger.WithError(err).WithFields(logrus.Fields{
  147. "container": id,
  148. "event": libcontainerdtypes.EventCreate,
  149. }).Error("failed to process event")
  150. }
  151. })
  152. }
  153. return err
  154. }
  155. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  156. logger := c.logger.WithField("container", id)
  157. configuration := &hcsshim.ContainerConfig{
  158. SystemType: "Container",
  159. Name: id,
  160. Owner: defaultOwner,
  161. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  162. HostName: spec.Hostname,
  163. HvPartition: false,
  164. }
  165. c.extractResourcesFromSpec(spec, configuration)
  166. if spec.Windows.Resources != nil {
  167. if spec.Windows.Resources.Storage != nil {
  168. if spec.Windows.Resources.Storage.Bps != nil {
  169. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  170. }
  171. if spec.Windows.Resources.Storage.Iops != nil {
  172. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  173. }
  174. }
  175. }
  176. if spec.Windows.HyperV != nil {
  177. configuration.HvPartition = true
  178. }
  179. if spec.Windows.Network != nil {
  180. configuration.EndpointList = spec.Windows.Network.EndpointList
  181. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  182. if spec.Windows.Network.DNSSearchList != nil {
  183. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  184. }
  185. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  186. }
  187. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  188. configuration.Credentials = cs
  189. }
  190. // We must have least two layers in the spec, the bottom one being a
  191. // base image, the top one being the RW layer.
  192. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  193. return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  194. }
  195. // Strip off the top-most layer as that's passed in separately to HCS
  196. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  197. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  198. if configuration.HvPartition {
  199. // We don't currently support setting the utility VM image explicitly.
  200. // TODO circa RS5, this may be re-locatable.
  201. if spec.Windows.HyperV.UtilityVMPath != "" {
  202. return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  203. }
  204. // Find the upper-most utility VM image.
  205. var uvmImagePath string
  206. for _, path := range layerFolders {
  207. fullPath := filepath.Join(path, "UtilityVM")
  208. _, err := os.Stat(fullPath)
  209. if err == nil {
  210. uvmImagePath = fullPath
  211. break
  212. }
  213. if !os.IsNotExist(err) {
  214. return err
  215. }
  216. }
  217. if uvmImagePath == "" {
  218. return errors.New("utility VM image could not be found")
  219. }
  220. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  221. if spec.Root.Path != "" {
  222. return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  223. }
  224. } else {
  225. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  226. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  227. return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  228. }
  229. // HCS API requires the trailing backslash to be removed
  230. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  231. }
  232. if spec.Root.Readonly {
  233. return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  234. }
  235. for _, layerPath := range layerFolders {
  236. _, filename := filepath.Split(layerPath)
  237. g, err := hcsshim.NameToGuid(filename)
  238. if err != nil {
  239. return err
  240. }
  241. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  242. ID: g.ToString(),
  243. Path: layerPath,
  244. })
  245. }
  246. // Add the mounts (volumes, bind mounts etc) to the structure
  247. var mds []hcsshim.MappedDir
  248. var mps []hcsshim.MappedPipe
  249. for _, mount := range spec.Mounts {
  250. const pipePrefix = `\\.\pipe\`
  251. if mount.Type != "" {
  252. return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  253. }
  254. if strings.HasPrefix(mount.Destination, pipePrefix) {
  255. mp := hcsshim.MappedPipe{
  256. HostPath: mount.Source,
  257. ContainerPipeName: mount.Destination[len(pipePrefix):],
  258. }
  259. mps = append(mps, mp)
  260. } else {
  261. md := hcsshim.MappedDir{
  262. HostPath: mount.Source,
  263. ContainerPath: mount.Destination,
  264. ReadOnly: false,
  265. }
  266. for _, o := range mount.Options {
  267. if strings.ToLower(o) == "ro" {
  268. md.ReadOnly = true
  269. }
  270. }
  271. mds = append(mds, md)
  272. }
  273. }
  274. configuration.MappedDirectories = mds
  275. configuration.MappedPipes = mps
  276. if len(spec.Windows.Devices) > 0 {
  277. // Add any device assignments
  278. if configuration.HvPartition {
  279. return errors.New("device assignment is not supported for HyperV containers")
  280. }
  281. for _, d := range spec.Windows.Devices {
  282. // Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
  283. // this represents an Interface Class GUID.
  284. if d.IDType != "class" {
  285. return errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
  286. }
  287. configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
  288. }
  289. }
  290. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  291. if err != nil {
  292. return err
  293. }
  294. // Construct a container object for calling start on it.
  295. ctr := &container{
  296. id: id,
  297. execs: make(map[string]*process),
  298. ociSpec: spec,
  299. hcsContainer: hcsContainer,
  300. status: containerd.Created,
  301. waitCh: make(chan struct{}),
  302. }
  303. logger.Debug("starting container")
  304. if err = hcsContainer.Start(); err != nil {
  305. c.logger.WithError(err).Error("failed to start container")
  306. ctr.Lock()
  307. if err := c.terminateContainer(ctr); err != nil {
  308. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  309. } else {
  310. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  311. }
  312. ctr.Unlock()
  313. return err
  314. }
  315. c.Lock()
  316. c.containers[id] = ctr
  317. c.Unlock()
  318. logger.Debug("createWindows() completed successfully")
  319. return nil
  320. }
  321. func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
  322. if spec.Windows.Resources != nil {
  323. if spec.Windows.Resources.CPU != nil {
  324. if spec.Windows.Resources.CPU.Count != nil {
  325. // This check is being done here rather than in adaptContainerSettings
  326. // because we don't want to update the HostConfig in case this container
  327. // is moved to a host with more CPUs than this one.
  328. cpuCount := *spec.Windows.Resources.CPU.Count
  329. hostCPUCount := uint64(sysinfo.NumCPU())
  330. if cpuCount > hostCPUCount {
  331. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  332. cpuCount = hostCPUCount
  333. }
  334. configuration.ProcessorCount = uint32(cpuCount)
  335. }
  336. if spec.Windows.Resources.CPU.Shares != nil {
  337. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  338. }
  339. if spec.Windows.Resources.CPU.Maximum != nil {
  340. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  341. }
  342. }
  343. if spec.Windows.Resources.Memory != nil {
  344. if spec.Windows.Resources.Memory.Limit != nil {
  345. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  346. }
  347. }
  348. }
  349. }
  350. func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
  351. ctr := c.getContainer(id)
  352. switch {
  353. case ctr == nil:
  354. return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  355. case ctr.init != nil:
  356. return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
  357. }
  358. logger := c.logger.WithField("container", id)
  359. // Note we always tell HCS to create stdout as it's required
  360. // regardless of '-i' or '-t' options, so that docker can always grab
  361. // the output through logs. We also tell HCS to always create stdin,
  362. // even if it's not used - it will be closed shortly. Stderr is only
  363. // created if it we're not -t.
  364. var (
  365. emulateConsole bool
  366. createStdErrPipe bool
  367. )
  368. if ctr.ociSpec.Process != nil {
  369. emulateConsole = ctr.ociSpec.Process.Terminal
  370. createStdErrPipe = !ctr.ociSpec.Process.Terminal
  371. }
  372. createProcessParms := &hcsshim.ProcessConfig{
  373. EmulateConsole: emulateConsole,
  374. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  375. CreateStdInPipe: true,
  376. CreateStdOutPipe: true,
  377. CreateStdErrPipe: createStdErrPipe,
  378. }
  379. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  380. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  381. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  382. }
  383. // Configure the environment for the process
  384. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  385. // Configure the CommandLine/CommandArgs
  386. setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
  387. logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
  388. createProcessParms.User = ctr.ociSpec.Process.User.Username
  389. ctr.Lock()
  390. // Start the command running in the container.
  391. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  392. if err != nil {
  393. logger.WithError(err).Error("CreateProcess() failed")
  394. // Fix for https://github.com/moby/moby/issues/38719.
  395. // If the init process failed to launch, we still need to reap the
  396. // container to avoid leaking it.
  397. //
  398. // Note we use the explicit exit code of 127 which is the
  399. // Linux shell equivalent of "command not found". Windows cannot
  400. // know ahead of time whether or not the command exists, especially
  401. // in the case of Hyper-V containers.
  402. ctr.Unlock()
  403. exitedAt := time.Now()
  404. p := &process{
  405. id: libcontainerdtypes.InitProcessName,
  406. pid: 0,
  407. }
  408. c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
  409. return -1, err
  410. }
  411. defer ctr.Unlock()
  412. defer func() {
  413. if err != nil {
  414. if err := newProcess.Kill(); err != nil {
  415. logger.WithError(err).Error("failed to kill process")
  416. }
  417. go func() {
  418. if err := newProcess.Wait(); err != nil {
  419. logger.WithError(err).Error("failed to wait for process")
  420. }
  421. if err := newProcess.Close(); err != nil {
  422. logger.WithError(err).Error("failed to clean process resources")
  423. }
  424. }()
  425. }
  426. }()
  427. p := &process{
  428. hcsProcess: newProcess,
  429. id: libcontainerdtypes.InitProcessName,
  430. pid: newProcess.Pid(),
  431. }
  432. logger.WithField("pid", p.pid).Debug("init process started")
  433. ctr.status = containerd.Running
  434. ctr.init = p
  435. // Spin up a go routine waiting for exit to handle cleanup
  436. go c.reapProcess(ctr, p)
  437. // Don't shadow err here due to our deferred clean-up.
  438. var dio *cio.DirectIO
  439. dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
  440. if err != nil {
  441. logger.WithError(err).Error("failed to get stdio pipes")
  442. return -1, err
  443. }
  444. _, err = attachStdio(dio)
  445. if err != nil {
  446. logger.WithError(err).Error("failed to attach stdio")
  447. return -1, err
  448. }
  449. // Generate the associated event
  450. c.eventQ.Append(id, func() {
  451. ei := libcontainerdtypes.EventInfo{
  452. ContainerID: id,
  453. ProcessID: libcontainerdtypes.InitProcessName,
  454. Pid: uint32(p.pid),
  455. }
  456. c.logger.WithFields(logrus.Fields{
  457. "container": ctr.id,
  458. "event": libcontainerdtypes.EventStart,
  459. "event-info": ei,
  460. }).Info("sending event")
  461. err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
  462. if err != nil {
  463. c.logger.WithError(err).WithFields(logrus.Fields{
  464. "container": id,
  465. "event": libcontainerdtypes.EventStart,
  466. "event-info": ei,
  467. }).Error("failed to process event")
  468. }
  469. })
  470. logger.Debug("start() completed")
  471. return p.pid, nil
  472. }
  473. // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
  474. func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
  475. if process.CommandLine != "" {
  476. createProcessParms.CommandLine = process.CommandLine
  477. } else {
  478. createProcessParms.CommandLine = system.EscapeArgs(process.Args)
  479. }
  480. }
  481. func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
  482. stdin, stdout, stderr, err := newProcess.Stdio()
  483. if err != nil {
  484. return nil, err
  485. }
  486. dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
  487. // Convert io.ReadClosers to io.Readers
  488. if stdout != nil {
  489. dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
  490. }
  491. if stderr != nil {
  492. dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
  493. }
  494. return dio, nil
  495. }
  496. // Exec adds a process in an running container
  497. func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
  498. ctr := c.getContainer(containerID)
  499. switch {
  500. case ctr == nil:
  501. return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  502. case ctr.hcsContainer == nil:
  503. return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
  504. case ctr.execs != nil && ctr.execs[processID] != nil:
  505. return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
  506. }
  507. logger := c.logger.WithFields(logrus.Fields{
  508. "container": containerID,
  509. "exec": processID,
  510. })
  511. // Note we always tell HCS to
  512. // create stdout as it's required regardless of '-i' or '-t' options, so that
  513. // docker can always grab the output through logs. We also tell HCS to always
  514. // create stdin, even if it's not used - it will be closed shortly. Stderr
  515. // is only created if it we're not -t.
  516. createProcessParms := &hcsshim.ProcessConfig{
  517. CreateStdInPipe: true,
  518. CreateStdOutPipe: true,
  519. CreateStdErrPipe: !spec.Terminal,
  520. }
  521. if spec.Terminal {
  522. createProcessParms.EmulateConsole = true
  523. if spec.ConsoleSize != nil {
  524. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  525. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  526. }
  527. }
  528. // Take working directory from the process to add if it is defined,
  529. // otherwise take from the first process.
  530. if spec.Cwd != "" {
  531. createProcessParms.WorkingDirectory = spec.Cwd
  532. } else {
  533. createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
  534. }
  535. // Configure the environment for the process
  536. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  537. // Configure the CommandLine/CommandArgs
  538. setCommandLineAndArgs(spec, createProcessParms)
  539. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  540. createProcessParms.User = spec.User.Username
  541. // Start the command running in the container.
  542. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  543. if err != nil {
  544. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  545. return -1, err
  546. }
  547. pid := newProcess.Pid()
  548. defer func() {
  549. if err != nil {
  550. if err := newProcess.Kill(); err != nil {
  551. logger.WithError(err).Error("failed to kill process")
  552. }
  553. go func() {
  554. if err := newProcess.Wait(); err != nil {
  555. logger.WithError(err).Error("failed to wait for process")
  556. }
  557. if err := newProcess.Close(); err != nil {
  558. logger.WithError(err).Error("failed to clean process resources")
  559. }
  560. }()
  561. }
  562. }()
  563. dio, err := newIOFromProcess(newProcess, spec.Terminal)
  564. if err != nil {
  565. logger.WithError(err).Error("failed to get stdio pipes")
  566. return -1, err
  567. }
  568. // Tell the engine to attach streams back to the client
  569. _, err = attachStdio(dio)
  570. if err != nil {
  571. return -1, err
  572. }
  573. p := &process{
  574. id: processID,
  575. pid: pid,
  576. hcsProcess: newProcess,
  577. }
  578. // Add the process to the container's list of processes
  579. ctr.Lock()
  580. ctr.execs[processID] = p
  581. ctr.Unlock()
  582. // Spin up a go routine waiting for exit to handle cleanup
  583. go c.reapProcess(ctr, p)
  584. c.eventQ.Append(ctr.id, func() {
  585. ei := libcontainerdtypes.EventInfo{
  586. ContainerID: ctr.id,
  587. ProcessID: p.id,
  588. Pid: uint32(p.pid),
  589. }
  590. c.logger.WithFields(logrus.Fields{
  591. "container": ctr.id,
  592. "event": libcontainerdtypes.EventExecAdded,
  593. "event-info": ei,
  594. }).Info("sending event")
  595. err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
  596. if err != nil {
  597. c.logger.WithError(err).WithFields(logrus.Fields{
  598. "container": ctr.id,
  599. "event": libcontainerdtypes.EventExecAdded,
  600. "event-info": ei,
  601. }).Error("failed to process event")
  602. }
  603. err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
  604. if err != nil {
  605. c.logger.WithError(err).WithFields(logrus.Fields{
  606. "container": ctr.id,
  607. "event": libcontainerdtypes.EventExecStarted,
  608. "event-info": ei,
  609. }).Error("failed to process event")
  610. }
  611. })
  612. return pid, nil
  613. }
  614. // SignalProcess handles `docker stop` on Windows. While Linux has support for
  615. // the full range of signals, signals aren't really implemented on Windows.
  616. // We fake supporting regular stop and -9 to force kill.
  617. func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
  618. ctr, p, err := c.getProcess(containerID, processID)
  619. if err != nil {
  620. return err
  621. }
  622. logger := c.logger.WithFields(logrus.Fields{
  623. "container": containerID,
  624. "process": processID,
  625. "pid": p.pid,
  626. "signal": signal,
  627. })
  628. logger.Debug("Signal()")
  629. if processID == libcontainerdtypes.InitProcessName {
  630. if syscall.Signal(signal) == syscall.SIGKILL {
  631. // Terminate the compute system
  632. ctr.Lock()
  633. ctr.terminateInvoked = true
  634. if err := ctr.hcsContainer.Terminate(); err != nil {
  635. if !hcsshim.IsPending(err) {
  636. logger.WithError(err).Error("failed to terminate hccshim container")
  637. }
  638. }
  639. ctr.Unlock()
  640. } else {
  641. // Shut down the container
  642. if err := ctr.hcsContainer.Shutdown(); err != nil {
  643. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  644. // ignore errors
  645. logger.WithError(err).Error("failed to shutdown hccshim container")
  646. }
  647. }
  648. }
  649. } else {
  650. return p.hcsProcess.Kill()
  651. }
  652. return nil
  653. }
  654. // ResizeTerminal handles a CLI event to resize an interactive docker run or docker
  655. // exec window.
  656. func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
  657. _, p, err := c.getProcess(containerID, processID)
  658. if err != nil {
  659. return err
  660. }
  661. c.logger.WithFields(logrus.Fields{
  662. "container": containerID,
  663. "process": processID,
  664. "height": height,
  665. "width": width,
  666. "pid": p.pid,
  667. }).Debug("resizing")
  668. return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
  669. }
  670. func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
  671. _, p, err := c.getProcess(containerID, processID)
  672. if err != nil {
  673. return err
  674. }
  675. return p.hcsProcess.CloseStdin()
  676. }
  677. // Pause handles pause requests for containers
  678. func (c *client) Pause(_ context.Context, containerID string) error {
  679. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  680. if err != nil {
  681. return err
  682. }
  683. if ctr.ociSpec.Windows.HyperV == nil {
  684. return containerderrdefs.ErrNotImplemented
  685. }
  686. ctr.Lock()
  687. defer ctr.Unlock()
  688. if err = ctr.hcsContainer.Pause(); err != nil {
  689. return err
  690. }
  691. ctr.status = containerd.Paused
  692. c.eventQ.Append(containerID, func() {
  693. err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  694. ContainerID: containerID,
  695. ProcessID: libcontainerdtypes.InitProcessName,
  696. })
  697. c.logger.WithFields(logrus.Fields{
  698. "container": ctr.id,
  699. "event": libcontainerdtypes.EventPaused,
  700. }).Info("sending event")
  701. if err != nil {
  702. c.logger.WithError(err).WithFields(logrus.Fields{
  703. "container": containerID,
  704. "event": libcontainerdtypes.EventPaused,
  705. }).Error("failed to process event")
  706. }
  707. })
  708. return nil
  709. }
  710. // Resume handles resume requests for containers
  711. func (c *client) Resume(_ context.Context, containerID string) error {
  712. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  713. if err != nil {
  714. return err
  715. }
  716. if ctr.ociSpec.Windows.HyperV == nil {
  717. return errors.New("cannot resume Windows Server Containers")
  718. }
  719. ctr.Lock()
  720. defer ctr.Unlock()
  721. if err = ctr.hcsContainer.Resume(); err != nil {
  722. return err
  723. }
  724. ctr.status = containerd.Running
  725. c.eventQ.Append(containerID, func() {
  726. err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  727. ContainerID: containerID,
  728. ProcessID: libcontainerdtypes.InitProcessName,
  729. })
  730. c.logger.WithFields(logrus.Fields{
  731. "container": ctr.id,
  732. "event": libcontainerdtypes.EventResumed,
  733. }).Info("sending event")
  734. if err != nil {
  735. c.logger.WithError(err).WithFields(logrus.Fields{
  736. "container": containerID,
  737. "event": libcontainerdtypes.EventResumed,
  738. }).Error("failed to process event")
  739. }
  740. })
  741. return nil
  742. }
  743. // Stats handles stats requests for containers
  744. func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
  745. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  746. if err != nil {
  747. return nil, err
  748. }
  749. readAt := time.Now()
  750. s, err := ctr.hcsContainer.Statistics()
  751. if err != nil {
  752. return nil, err
  753. }
  754. return &libcontainerdtypes.Stats{
  755. Read: readAt,
  756. HCSStats: &s,
  757. }, nil
  758. }
  759. // Restore is the handler for restoring a container
  760. func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
  761. c.logger.WithField("container", id).Debug("restore()")
  762. // TODO Windows: On RS1, a re-attach isn't possible.
  763. // However, there is a scenario in which there is an issue.
  764. // Consider a background container. The daemon dies unexpectedly.
  765. // HCS will still have the compute service alive and running.
  766. // For consistence, we call in to shoot it regardless if HCS knows about it
  767. // We explicitly just log a warning if the terminate fails.
  768. // Then we tell the backend the container exited.
  769. if hc, err := hcsshim.OpenContainer(id); err == nil {
  770. const terminateTimeout = time.Minute * 2
  771. err := hc.Terminate()
  772. if hcsshim.IsPending(err) {
  773. err = hc.WaitTimeout(terminateTimeout)
  774. } else if hcsshim.IsAlreadyStopped(err) {
  775. err = nil
  776. }
  777. if err != nil {
  778. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  779. return false, -1, nil, err
  780. }
  781. }
  782. return false, -1, &restoredProcess{
  783. c: c,
  784. id: id,
  785. }, nil
  786. }
  787. // ListPids returns a list of process IDs running in a container. It is not
  788. // implemented on Windows.
  789. func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  790. return nil, errors.New("not implemented on Windows")
  791. }
  792. // Summary returns a summary of the processes running in a container.
  793. // This is present in Windows to support docker top. In linux, the
  794. // engine shells out to ps to get process information. On Windows, as
  795. // the containers could be Hyper-V containers, they would not be
  796. // visible on the container host. However, libcontainerd does have
  797. // that information.
  798. func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
  799. ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  800. if err != nil {
  801. return nil, err
  802. }
  803. p, err := ctr.hcsContainer.ProcessList()
  804. if err != nil {
  805. return nil, err
  806. }
  807. pl := make([]libcontainerdtypes.Summary, len(p))
  808. for i := range p {
  809. pl[i] = libcontainerdtypes.Summary{
  810. ImageName: p[i].ImageName,
  811. CreatedAt: p[i].CreateTimestamp,
  812. KernelTime_100Ns: p[i].KernelTime100ns,
  813. MemoryCommitBytes: p[i].MemoryCommitBytes,
  814. MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  815. MemoryWorkingSetSharedBytes: p[i].MemoryWorkingSetSharedBytes,
  816. ProcessID: p[i].ProcessId,
  817. UserTime_100Ns: p[i].UserTime100ns,
  818. ExecID: "",
  819. }
  820. }
  821. return pl, nil
  822. }
  823. type restoredProcess struct {
  824. id string
  825. c *client
  826. }
  827. func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
  828. return p.c.DeleteTask(ctx, p.id)
  829. }
  830. func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  831. ec := -1
  832. ctr := c.getContainer(containerID)
  833. if ctr == nil {
  834. return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  835. }
  836. select {
  837. case <-ctx.Done():
  838. return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  839. case <-ctr.waitCh:
  840. default:
  841. return uint32(ec), time.Now(), errors.New("container is not stopped")
  842. }
  843. ctr.Lock()
  844. defer ctr.Unlock()
  845. return ctr.exitCode, ctr.exitedAt, nil
  846. }
  847. func (c *client) Delete(_ context.Context, containerID string) error {
  848. c.Lock()
  849. defer c.Unlock()
  850. ctr := c.containers[containerID]
  851. if ctr == nil {
  852. return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  853. }
  854. ctr.Lock()
  855. defer ctr.Unlock()
  856. switch ctr.status {
  857. case containerd.Created:
  858. if err := c.shutdownContainer(ctr); err != nil {
  859. return err
  860. }
  861. fallthrough
  862. case containerd.Stopped:
  863. delete(c.containers, containerID)
  864. return nil
  865. }
  866. return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
  867. }
  868. func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
  869. c.Lock()
  870. defer c.Unlock()
  871. ctr := c.containers[containerID]
  872. if ctr == nil {
  873. return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  874. }
  875. ctr.Lock()
  876. defer ctr.Unlock()
  877. return ctr.status, nil
  878. }
  879. func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
  880. // Updating resource isn't supported on Windows
  881. // but we should return nil for enabling updating container
  882. return nil
  883. }
  884. func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  885. return errors.New("Windows: Containers do not support checkpoints")
  886. }
  887. func (c *client) getContainer(id string) *container {
  888. c.Lock()
  889. ctr := c.containers[id]
  890. c.Unlock()
  891. return ctr
  892. }
  893. func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  894. ctr := c.getContainer(containerID)
  895. switch {
  896. case ctr == nil:
  897. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  898. case ctr.init == nil:
  899. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  900. case processID == libcontainerdtypes.InitProcessName:
  901. return ctr, ctr.init, nil
  902. default:
  903. ctr.Lock()
  904. defer ctr.Unlock()
  905. if ctr.execs == nil {
  906. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  907. }
  908. }
  909. p := ctr.execs[processID]
  910. if p == nil {
  911. return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  912. }
  913. return ctr, p, nil
  914. }
  915. // ctr mutex must be held when calling this function.
  916. func (c *client) shutdownContainer(ctr *container) error {
  917. var err error
  918. const waitTimeout = time.Minute * 5
  919. if !ctr.terminateInvoked {
  920. err = ctr.hcsContainer.Shutdown()
  921. }
  922. if hcsshim.IsPending(err) || ctr.terminateInvoked {
  923. err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  924. } else if hcsshim.IsAlreadyStopped(err) {
  925. err = nil
  926. }
  927. if err != nil {
  928. c.logger.WithError(err).WithField("container", ctr.id).
  929. Debug("failed to shutdown container, terminating it")
  930. terminateErr := c.terminateContainer(ctr)
  931. if terminateErr != nil {
  932. c.logger.WithError(terminateErr).WithField("container", ctr.id).
  933. Error("failed to shutdown container, and subsequent terminate also failed")
  934. return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  935. }
  936. return err
  937. }
  938. return nil
  939. }
  940. // ctr mutex must be held when calling this function.
  941. func (c *client) terminateContainer(ctr *container) error {
  942. const terminateTimeout = time.Minute * 5
  943. ctr.terminateInvoked = true
  944. err := ctr.hcsContainer.Terminate()
  945. if hcsshim.IsPending(err) {
  946. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  947. } else if hcsshim.IsAlreadyStopped(err) {
  948. err = nil
  949. }
  950. if err != nil {
  951. c.logger.WithError(err).WithField("container", ctr.id).
  952. Debug("failed to terminate container")
  953. return err
  954. }
  955. return nil
  956. }
  957. func (c *client) reapProcess(ctr *container, p *process) int {
  958. logger := c.logger.WithFields(logrus.Fields{
  959. "container": ctr.id,
  960. "process": p.id,
  961. })
  962. var eventErr error
  963. // Block indefinitely for the process to exit.
  964. if err := p.hcsProcess.Wait(); err != nil {
  965. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  966. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  967. }
  968. // Fall through here, do not return. This ensures we attempt to
  969. // continue the shutdown in HCS and tell the docker engine that the
  970. // process/container has exited to avoid a container being dropped on
  971. // the floor.
  972. }
  973. exitedAt := time.Now()
  974. exitCode, err := p.hcsProcess.ExitCode()
  975. if err != nil {
  976. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  977. logger.WithError(err).Warnf("unable to get exit code for process")
  978. }
  979. // Since we got an error retrieving the exit code, make sure that the
  980. // code we return doesn't incorrectly indicate success.
  981. exitCode = -1
  982. // Fall through here, do not return. This ensures we attempt to
  983. // continue the shutdown in HCS and tell the docker engine that the
  984. // process/container has exited to avoid a container being dropped on
  985. // the floor.
  986. }
  987. if err := p.hcsProcess.Close(); err != nil {
  988. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  989. exitCode = -1
  990. eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  991. }
  992. if p.id == libcontainerdtypes.InitProcessName {
  993. exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  994. }
  995. c.eventQ.Append(ctr.id, func() {
  996. ei := libcontainerdtypes.EventInfo{
  997. ContainerID: ctr.id,
  998. ProcessID: p.id,
  999. Pid: uint32(p.pid),
  1000. ExitCode: uint32(exitCode),
  1001. ExitedAt: exitedAt,
  1002. Error: eventErr,
  1003. }
  1004. c.logger.WithFields(logrus.Fields{
  1005. "container": ctr.id,
  1006. "event": libcontainerdtypes.EventExit,
  1007. "event-info": ei,
  1008. }).Info("sending event")
  1009. err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1010. if err != nil {
  1011. c.logger.WithError(err).WithFields(logrus.Fields{
  1012. "container": ctr.id,
  1013. "event": libcontainerdtypes.EventExit,
  1014. "event-info": ei,
  1015. }).Error("failed to process event")
  1016. }
  1017. if p.id != libcontainerdtypes.InitProcessName {
  1018. ctr.Lock()
  1019. delete(ctr.execs, p.id)
  1020. ctr.Unlock()
  1021. }
  1022. })
  1023. return exitCode
  1024. }
  1025. // reapContainer shuts down the container and releases associated resources. It returns
  1026. // the error to be logged in the eventInfo sent back to the monitor.
  1027. func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1028. // Update container status
  1029. ctr.Lock()
  1030. ctr.status = containerd.Stopped
  1031. ctr.exitedAt = exitedAt
  1032. ctr.exitCode = uint32(exitCode)
  1033. close(ctr.waitCh)
  1034. if err := c.shutdownContainer(ctr); err != nil {
  1035. exitCode = -1
  1036. logger.WithError(err).Warn("failed to shutdown container")
  1037. thisErr := errors.Wrap(err, "failed to shutdown container")
  1038. if eventErr != nil {
  1039. eventErr = errors.Wrap(eventErr, thisErr.Error())
  1040. } else {
  1041. eventErr = thisErr
  1042. }
  1043. } else {
  1044. logger.Debug("completed container shutdown")
  1045. }
  1046. ctr.Unlock()
  1047. if err := ctr.hcsContainer.Close(); err != nil {
  1048. exitCode = -1
  1049. logger.WithError(err).Error("failed to clean hcs container resources")
  1050. thisErr := errors.Wrap(err, "failed to terminate container")
  1051. if eventErr != nil {
  1052. eventErr = errors.Wrap(eventErr, thisErr.Error())
  1053. } else {
  1054. eventErr = thisErr
  1055. }
  1056. }
  1057. return exitCode, eventErr
  1058. }