local_windows.go 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. package local // import "github.com/docker/docker/libcontainerd/local"
  2. // This package contains the legacy in-proc calls in HCS using the v1 schema
  3. // for Windows runtime purposes.
  4. import (
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "time"
  15. "github.com/Microsoft/hcsshim"
  16. "github.com/containerd/containerd"
  17. "github.com/containerd/containerd/cio"
  18. cerrdefs "github.com/containerd/containerd/errdefs"
  19. "github.com/containerd/log"
  20. "github.com/docker/docker/errdefs"
  21. "github.com/docker/docker/libcontainerd/queue"
  22. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  23. "github.com/docker/docker/pkg/sysinfo"
  24. "github.com/docker/docker/pkg/system"
  25. specs "github.com/opencontainers/runtime-spec/specs-go"
  26. "github.com/pkg/errors"
  27. "golang.org/x/sys/windows"
  28. )
  29. type process struct {
  30. // mu guards the mutable fields of this struct.
  31. //
  32. // Always lock mu before ctr's mutex to prevent deadlocks.
  33. mu sync.Mutex
  34. id string // Invariants: immutable
  35. ctr *container // Invariants: immutable, ctr != nil
  36. hcsProcess hcsshim.Process // Is set to nil on process exit
  37. exited *containerd.ExitStatus // Valid iff waitCh is closed
  38. waitCh chan struct{}
  39. }
  40. type task struct {
  41. process
  42. }
  43. type container struct {
  44. mu sync.Mutex
  45. // The ociSpec is required, as client.Create() needs a spec, but can
  46. // be called from the RestartManager context which does not otherwise
  47. // have access to the Spec
  48. //
  49. // A container value with ociSpec == nil represents a container which
  50. // has been loaded with (*client).LoadContainer, and is ineligible to
  51. // be Start()ed.
  52. ociSpec *specs.Spec
  53. hcsContainer hcsshim.Container // Is set to nil on container delete
  54. isPaused bool
  55. client *client
  56. id string
  57. terminateInvoked bool
  58. // task is a reference to the current task for the container. As a
  59. // corollary, when task == nil the container has no current task: the
  60. // container was never Start()ed or the task was Delete()d.
  61. task *task
  62. }
  63. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  64. // container creator management stacks. We hard code "docker" in the case
  65. // of docker.
  66. const defaultOwner = "docker"
  67. type client struct {
  68. stateDir string
  69. backend libcontainerdtypes.Backend
  70. logger *log.Entry
  71. eventQ queue.Queue
  72. }
  73. // NewClient creates a new local executor for windows
  74. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  75. c := &client{
  76. stateDir: stateDir,
  77. backend: b,
  78. logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
  79. }
  80. return c, nil
  81. }
  82. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  83. return containerd.Version{}, errors.New("not implemented on Windows")
  84. }
  85. // NewContainer is the entrypoint to create a container from a spec.
  86. // Table below shows the fields required for HCS JSON calling parameters,
  87. // where if not populated, is omitted.
  88. // +-----------------+--------------------------------------------+---------------------------------------------------+
  89. // | | Isolation=Process | Isolation=Hyper-V |
  90. // +-----------------+--------------------------------------------+---------------------------------------------------+
  91. // | VolumePath | \\?\\Volume{GUIDa} | |
  92. // | LayerFolderPath | %root%\windowsfilter\containerID | |
  93. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  94. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  95. // +-----------------+--------------------------------------------+---------------------------------------------------+
  96. //
  97. // Isolation=Process example:
  98. //
  99. // {
  100. // "SystemType": "Container",
  101. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  102. // "Owner": "docker",
  103. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  104. // "IgnoreFlushesDuringBoot": true,
  105. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  106. // "Layers": [{
  107. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  108. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  109. // }],
  110. // "HostName": "5e0055c814a6",
  111. // "MappedDirectories": [],
  112. // "HvPartition": false,
  113. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  114. // }
  115. //
  116. // Isolation=Hyper-V example:
  117. //
  118. // {
  119. // "SystemType": "Container",
  120. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  121. // "Owner": "docker",
  122. // "IgnoreFlushesDuringBoot": true,
  123. // "Layers": [{
  124. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  125. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  126. // }],
  127. // "HostName": "475c2c58933b",
  128. // "MappedDirectories": [],
  129. // "HvPartition": true,
  130. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  131. // "DNSSearchList": "a.com,b.com,c.com",
  132. // "HvRuntime": {
  133. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  134. // },
  135. // }
  136. func (c *client) NewContainer(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  137. var err error
  138. if spec.Linux != nil {
  139. return nil, errors.New("linux containers are not supported on this platform")
  140. }
  141. ctr, err := c.createWindows(id, spec, runtimeOptions)
  142. if err == nil {
  143. c.eventQ.Append(id, func() {
  144. ei := libcontainerdtypes.EventInfo{
  145. ContainerID: id,
  146. }
  147. c.logger.WithFields(log.Fields{
  148. "container": id,
  149. "event": libcontainerdtypes.EventCreate,
  150. }).Info("sending event")
  151. err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
  152. if err != nil {
  153. c.logger.WithError(err).WithFields(log.Fields{
  154. "container": id,
  155. "event": libcontainerdtypes.EventCreate,
  156. }).Error("failed to process event")
  157. }
  158. })
  159. }
  160. return ctr, err
  161. }
  162. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) (*container, error) {
  163. logger := c.logger.WithField("container", id)
  164. configuration := &hcsshim.ContainerConfig{
  165. SystemType: "Container",
  166. Name: id,
  167. Owner: defaultOwner,
  168. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  169. HostName: spec.Hostname,
  170. HvPartition: false,
  171. }
  172. c.extractResourcesFromSpec(spec, configuration)
  173. if spec.Windows.Resources != nil {
  174. if spec.Windows.Resources.Storage != nil {
  175. if spec.Windows.Resources.Storage.Bps != nil {
  176. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  177. }
  178. if spec.Windows.Resources.Storage.Iops != nil {
  179. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  180. }
  181. }
  182. }
  183. if spec.Windows.HyperV != nil {
  184. configuration.HvPartition = true
  185. }
  186. if spec.Windows.Network != nil {
  187. configuration.EndpointList = spec.Windows.Network.EndpointList
  188. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  189. if spec.Windows.Network.DNSSearchList != nil {
  190. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  191. }
  192. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  193. }
  194. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  195. configuration.Credentials = cs
  196. }
  197. // We must have least two layers in the spec, the bottom one being a
  198. // base image, the top one being the RW layer.
  199. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  200. return nil, fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  201. }
  202. // Strip off the top-most layer as that's passed in separately to HCS
  203. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  204. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  205. if configuration.HvPartition {
  206. // We don't currently support setting the utility VM image explicitly.
  207. // TODO circa RS5, this may be re-locatable.
  208. if spec.Windows.HyperV.UtilityVMPath != "" {
  209. return nil, errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  210. }
  211. // Find the upper-most utility VM image.
  212. var uvmImagePath string
  213. for _, path := range layerFolders {
  214. fullPath := filepath.Join(path, "UtilityVM")
  215. _, err := os.Stat(fullPath)
  216. if err == nil {
  217. uvmImagePath = fullPath
  218. break
  219. }
  220. if !os.IsNotExist(err) {
  221. return nil, err
  222. }
  223. }
  224. if uvmImagePath == "" {
  225. return nil, errors.New("utility VM image could not be found")
  226. }
  227. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  228. if spec.Root.Path != "" {
  229. return nil, errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  230. }
  231. } else {
  232. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  233. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  234. return nil, fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  235. }
  236. // HCS API requires the trailing backslash to be removed
  237. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  238. }
  239. if spec.Root.Readonly {
  240. return nil, errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  241. }
  242. for _, layerPath := range layerFolders {
  243. _, filename := filepath.Split(layerPath)
  244. g, err := hcsshim.NameToGuid(filename)
  245. if err != nil {
  246. return nil, err
  247. }
  248. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  249. ID: g.ToString(),
  250. Path: layerPath,
  251. })
  252. }
  253. // Add the mounts (volumes, bind mounts etc) to the structure
  254. var mds []hcsshim.MappedDir
  255. var mps []hcsshim.MappedPipe
  256. for _, mount := range spec.Mounts {
  257. const pipePrefix = `\\.\pipe\`
  258. if mount.Type != "" {
  259. return nil, fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  260. }
  261. if strings.HasPrefix(mount.Destination, pipePrefix) {
  262. mp := hcsshim.MappedPipe{
  263. HostPath: mount.Source,
  264. ContainerPipeName: mount.Destination[len(pipePrefix):],
  265. }
  266. mps = append(mps, mp)
  267. } else {
  268. md := hcsshim.MappedDir{
  269. HostPath: mount.Source,
  270. ContainerPath: mount.Destination,
  271. ReadOnly: false,
  272. }
  273. for _, o := range mount.Options {
  274. if strings.ToLower(o) == "ro" {
  275. md.ReadOnly = true
  276. }
  277. }
  278. mds = append(mds, md)
  279. }
  280. }
  281. configuration.MappedDirectories = mds
  282. configuration.MappedPipes = mps
  283. if len(spec.Windows.Devices) > 0 {
  284. // Add any device assignments
  285. if configuration.HvPartition {
  286. return nil, errors.New("device assignment is not supported for HyperV containers")
  287. }
  288. for _, d := range spec.Windows.Devices {
  289. // Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
  290. // these represent an Interface Class GUID.
  291. if d.IDType != "class" && d.IDType != "vpci-class-guid" {
  292. return nil, errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
  293. }
  294. configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
  295. }
  296. }
  297. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  298. if err != nil {
  299. return nil, err
  300. }
  301. // Construct a container object for calling start on it.
  302. ctr := &container{
  303. client: c,
  304. id: id,
  305. ociSpec: spec,
  306. hcsContainer: hcsContainer,
  307. }
  308. logger.Debug("starting container")
  309. if err := ctr.hcsContainer.Start(); err != nil {
  310. logger.WithError(err).Error("failed to start container")
  311. ctr.mu.Lock()
  312. if err := ctr.terminateContainer(); err != nil {
  313. logger.WithError(err).Error("failed to cleanup after a failed Start")
  314. } else {
  315. logger.Debug("cleaned up after failed Start by calling Terminate")
  316. }
  317. ctr.mu.Unlock()
  318. return nil, err
  319. }
  320. logger.Debug("createWindows() completed successfully")
  321. return ctr, nil
  322. }
  323. func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
  324. if spec.Windows.Resources != nil {
  325. if spec.Windows.Resources.CPU != nil {
  326. if spec.Windows.Resources.CPU.Count != nil {
  327. // This check is being done here rather than in adaptContainerSettings
  328. // because we don't want to update the HostConfig in case this container
  329. // is moved to a host with more CPUs than this one.
  330. cpuCount := *spec.Windows.Resources.CPU.Count
  331. hostCPUCount := uint64(sysinfo.NumCPU())
  332. if cpuCount > hostCPUCount {
  333. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  334. cpuCount = hostCPUCount
  335. }
  336. configuration.ProcessorCount = uint32(cpuCount)
  337. }
  338. if spec.Windows.Resources.CPU.Shares != nil {
  339. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  340. }
  341. if spec.Windows.Resources.CPU.Maximum != nil {
  342. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  343. }
  344. }
  345. if spec.Windows.Resources.Memory != nil {
  346. if spec.Windows.Resources.Memory.Limit != nil {
  347. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  348. }
  349. }
  350. }
  351. }
  352. func (ctr *container) NewTask(_ context.Context, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, retErr error) {
  353. ctr.mu.Lock()
  354. defer ctr.mu.Unlock()
  355. switch {
  356. case ctr.ociSpec == nil:
  357. return nil, errors.WithStack(errdefs.NotImplemented(errors.New("a restored container cannot be started")))
  358. case ctr.task != nil:
  359. return nil, errors.WithStack(errdefs.NotModified(cerrdefs.ErrAlreadyExists))
  360. }
  361. logger := ctr.client.logger.WithField("container", ctr.id)
  362. // Note we always tell HCS to create stdout as it's required
  363. // regardless of '-i' or '-t' options, so that docker can always grab
  364. // the output through logs. We also tell HCS to always create stdin,
  365. // even if it's not used - it will be closed shortly. Stderr is only
  366. // created if it we're not -t.
  367. var (
  368. emulateConsole bool
  369. createStdErrPipe bool
  370. )
  371. if ctr.ociSpec.Process != nil {
  372. emulateConsole = ctr.ociSpec.Process.Terminal
  373. createStdErrPipe = !ctr.ociSpec.Process.Terminal
  374. }
  375. createProcessParms := &hcsshim.ProcessConfig{
  376. EmulateConsole: emulateConsole,
  377. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  378. CreateStdInPipe: true,
  379. CreateStdOutPipe: true,
  380. CreateStdErrPipe: createStdErrPipe,
  381. }
  382. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  383. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  384. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  385. }
  386. // Configure the environment for the process
  387. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  388. // Configure the CommandLine/CommandArgs
  389. setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
  390. logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
  391. createProcessParms.User = ctr.ociSpec.Process.User.Username
  392. // Start the command running in the container.
  393. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  394. if err != nil {
  395. logger.WithError(err).Error("CreateProcess() failed")
  396. return nil, err
  397. }
  398. defer func() {
  399. if retErr != nil {
  400. if err := newProcess.Kill(); err != nil {
  401. logger.WithError(err).Error("failed to kill process")
  402. }
  403. go func() {
  404. if err := newProcess.Wait(); err != nil {
  405. logger.WithError(err).Error("failed to wait for process")
  406. }
  407. if err := newProcess.Close(); err != nil {
  408. logger.WithError(err).Error("failed to clean process resources")
  409. }
  410. }()
  411. }
  412. }()
  413. pid := newProcess.Pid()
  414. logger.WithField("pid", pid).Debug("init process started")
  415. dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
  416. if err != nil {
  417. logger.WithError(err).Error("failed to get stdio pipes")
  418. return nil, err
  419. }
  420. _, err = attachStdio(dio)
  421. if err != nil {
  422. logger.WithError(err).Error("failed to attach stdio")
  423. return nil, err
  424. }
  425. t := &task{process{
  426. id: ctr.id,
  427. ctr: ctr,
  428. hcsProcess: newProcess,
  429. waitCh: make(chan struct{}),
  430. }}
  431. // All fallible operations have succeeded so it is now safe to set the
  432. // container's current task.
  433. ctr.task = t
  434. // Spin up a goroutine to notify the backend and clean up resources when
  435. // the task exits. Defer until after the start event is sent so that the
  436. // exit event is not sent out-of-order.
  437. defer func() { go t.reap() }()
  438. // Generate the associated event
  439. ctr.client.eventQ.Append(ctr.id, func() {
  440. ei := libcontainerdtypes.EventInfo{
  441. ContainerID: ctr.id,
  442. ProcessID: t.id,
  443. Pid: uint32(pid),
  444. }
  445. ctr.client.logger.WithFields(log.Fields{
  446. "container": ctr.id,
  447. "event": libcontainerdtypes.EventStart,
  448. "event-info": ei,
  449. }).Info("sending event")
  450. err := ctr.client.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
  451. if err != nil {
  452. ctr.client.logger.WithError(err).WithFields(log.Fields{
  453. "container": ei.ContainerID,
  454. "event": libcontainerdtypes.EventStart,
  455. "event-info": ei,
  456. }).Error("failed to process event")
  457. }
  458. })
  459. logger.Debug("start() completed")
  460. return t, nil
  461. }
  462. func (*task) Start(context.Context) error {
  463. // No-op on Windows.
  464. return nil
  465. }
  466. func (ctr *container) Task(context.Context) (libcontainerdtypes.Task, error) {
  467. ctr.mu.Lock()
  468. defer ctr.mu.Unlock()
  469. if ctr.task == nil {
  470. return nil, errdefs.NotFound(cerrdefs.ErrNotFound)
  471. }
  472. return ctr.task, nil
  473. }
  474. // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
  475. func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
  476. if process.CommandLine != "" {
  477. createProcessParms.CommandLine = process.CommandLine
  478. } else {
  479. createProcessParms.CommandLine = system.EscapeArgs(process.Args)
  480. }
  481. }
  482. func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
  483. stdin, stdout, stderr, err := newProcess.Stdio()
  484. if err != nil {
  485. return nil, err
  486. }
  487. dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
  488. // Convert io.ReadClosers to io.Readers
  489. if stdout != nil {
  490. dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
  491. }
  492. if stderr != nil {
  493. dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
  494. }
  495. return dio, nil
  496. }
  497. // Exec launches a process in a running container.
  498. //
  499. // The processID argument is entirely informational. As there is no mechanism
  500. // (exposed through the libcontainerd interfaces) to enumerate or reference an
  501. // exec'd process by ID, uniqueness is not currently enforced.
  502. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Process, retErr error) {
  503. hcsContainer, err := t.getHCSContainer()
  504. if err != nil {
  505. return nil, err
  506. }
  507. logger := t.ctr.client.logger.WithFields(log.Fields{
  508. "container": t.ctr.id,
  509. "exec": processID,
  510. })
  511. // Note we always tell HCS to
  512. // create stdout as it's required regardless of '-i' or '-t' options, so that
  513. // docker can always grab the output through logs. We also tell HCS to always
  514. // create stdin, even if it's not used - it will be closed shortly. Stderr
  515. // is only created if it we're not -t.
  516. createProcessParms := &hcsshim.ProcessConfig{
  517. CreateStdInPipe: true,
  518. CreateStdOutPipe: true,
  519. CreateStdErrPipe: !spec.Terminal,
  520. }
  521. if spec.Terminal {
  522. createProcessParms.EmulateConsole = true
  523. if spec.ConsoleSize != nil {
  524. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  525. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  526. }
  527. }
  528. // Take working directory from the process to add if it is defined,
  529. // otherwise take from the first process.
  530. if spec.Cwd != "" {
  531. createProcessParms.WorkingDirectory = spec.Cwd
  532. } else {
  533. createProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd
  534. }
  535. // Configure the environment for the process
  536. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  537. // Configure the CommandLine/CommandArgs
  538. setCommandLineAndArgs(spec, createProcessParms)
  539. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  540. createProcessParms.User = spec.User.Username
  541. // Start the command running in the container.
  542. newProcess, err := hcsContainer.CreateProcess(createProcessParms)
  543. if err != nil {
  544. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  545. return nil, err
  546. }
  547. defer func() {
  548. if retErr != nil {
  549. if err := newProcess.Kill(); err != nil {
  550. logger.WithError(err).Error("failed to kill process")
  551. }
  552. go func() {
  553. if err := newProcess.Wait(); err != nil {
  554. logger.WithError(err).Error("failed to wait for process")
  555. }
  556. if err := newProcess.Close(); err != nil {
  557. logger.WithError(err).Error("failed to clean process resources")
  558. }
  559. }()
  560. }
  561. }()
  562. dio, err := newIOFromProcess(newProcess, spec.Terminal)
  563. if err != nil {
  564. logger.WithError(err).Error("failed to get stdio pipes")
  565. return nil, err
  566. }
  567. // Tell the engine to attach streams back to the client
  568. _, err = attachStdio(dio)
  569. if err != nil {
  570. return nil, err
  571. }
  572. p := &process{
  573. id: processID,
  574. ctr: t.ctr,
  575. hcsProcess: newProcess,
  576. waitCh: make(chan struct{}),
  577. }
  578. // Spin up a goroutine to notify the backend and clean up resources when
  579. // the process exits. Defer until after the start event is sent so that
  580. // the exit event is not sent out-of-order.
  581. defer func() { go p.reap() }()
  582. pid := newProcess.Pid()
  583. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  584. ei := libcontainerdtypes.EventInfo{
  585. ContainerID: t.ctr.id,
  586. ProcessID: p.id,
  587. Pid: uint32(pid),
  588. }
  589. t.ctr.client.logger.WithFields(log.Fields{
  590. "container": t.ctr.id,
  591. "event": libcontainerdtypes.EventExecAdded,
  592. "event-info": ei,
  593. }).Info("sending event")
  594. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)
  595. if err != nil {
  596. t.ctr.client.logger.WithError(err).WithFields(log.Fields{
  597. "container": t.ctr.id,
  598. "event": libcontainerdtypes.EventExecAdded,
  599. "event-info": ei,
  600. }).Error("failed to process event")
  601. }
  602. err = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)
  603. if err != nil {
  604. t.ctr.client.logger.WithError(err).WithFields(log.Fields{
  605. "container": t.ctr.id,
  606. "event": libcontainerdtypes.EventExecStarted,
  607. "event-info": ei,
  608. }).Error("failed to process event")
  609. }
  610. })
  611. return p, nil
  612. }
  613. func (p *process) Pid() uint32 {
  614. p.mu.Lock()
  615. hcsProcess := p.hcsProcess
  616. p.mu.Unlock()
  617. if hcsProcess == nil {
  618. return 0
  619. }
  620. return uint32(hcsProcess.Pid())
  621. }
  622. func (p *process) Kill(_ context.Context, signal syscall.Signal) error {
  623. p.mu.Lock()
  624. hcsProcess := p.hcsProcess
  625. p.mu.Unlock()
  626. if hcsProcess == nil {
  627. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  628. }
  629. return hcsProcess.Kill()
  630. }
  631. // Kill handles `docker stop` on Windows. While Linux has support for
  632. // the full range of signals, signals aren't really implemented on Windows.
  633. // We fake supporting regular stop and -9 to force kill.
  634. func (t *task) Kill(_ context.Context, signal syscall.Signal) error {
  635. hcsContainer, err := t.getHCSContainer()
  636. if err != nil {
  637. return err
  638. }
  639. logger := t.ctr.client.logger.WithFields(log.Fields{
  640. "container": t.ctr.id,
  641. "process": t.id,
  642. "pid": t.Pid(),
  643. "signal": signal,
  644. })
  645. logger.Debug("Signal()")
  646. var op string
  647. if signal == syscall.SIGKILL {
  648. // Terminate the compute system
  649. t.ctr.mu.Lock()
  650. t.ctr.terminateInvoked = true
  651. t.ctr.mu.Unlock()
  652. op, err = "terminate", hcsContainer.Terminate()
  653. } else {
  654. // Shut down the container
  655. op, err = "shutdown", hcsContainer.Shutdown()
  656. }
  657. if err != nil {
  658. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  659. // ignore errors
  660. logger.WithError(err).Errorf("failed to %s hccshim container", op)
  661. }
  662. }
  663. return nil
  664. }
  665. // Resize handles a CLI event to resize an interactive docker run or docker
  666. // exec window.
  667. func (p *process) Resize(_ context.Context, width, height uint32) error {
  668. p.mu.Lock()
  669. hcsProcess := p.hcsProcess
  670. p.mu.Unlock()
  671. if hcsProcess == nil {
  672. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  673. }
  674. p.ctr.client.logger.WithFields(log.Fields{
  675. "container": p.ctr.id,
  676. "process": p.id,
  677. "height": height,
  678. "width": width,
  679. "pid": hcsProcess.Pid(),
  680. }).Debug("resizing")
  681. return hcsProcess.ResizeConsole(uint16(width), uint16(height))
  682. }
  683. func (p *process) CloseStdin(context.Context) error {
  684. p.mu.Lock()
  685. hcsProcess := p.hcsProcess
  686. p.mu.Unlock()
  687. if hcsProcess == nil {
  688. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  689. }
  690. return hcsProcess.CloseStdin()
  691. }
  692. // Pause handles pause requests for containers
  693. func (t *task) Pause(_ context.Context) error {
  694. if t.ctr.ociSpec.Windows.HyperV == nil {
  695. return cerrdefs.ErrNotImplemented
  696. }
  697. t.ctr.mu.Lock()
  698. defer t.ctr.mu.Unlock()
  699. if err := t.assertIsCurrentTask(); err != nil {
  700. return err
  701. }
  702. if t.ctr.hcsContainer == nil {
  703. return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
  704. }
  705. if err := t.ctr.hcsContainer.Pause(); err != nil {
  706. return err
  707. }
  708. t.ctr.isPaused = true
  709. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  710. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  711. ContainerID: t.ctr.id,
  712. ProcessID: t.id,
  713. })
  714. t.ctr.client.logger.WithFields(log.Fields{
  715. "container": t.ctr.id,
  716. "event": libcontainerdtypes.EventPaused,
  717. }).Info("sending event")
  718. if err != nil {
  719. t.ctr.client.logger.WithError(err).WithFields(log.Fields{
  720. "container": t.ctr.id,
  721. "event": libcontainerdtypes.EventPaused,
  722. }).Error("failed to process event")
  723. }
  724. })
  725. return nil
  726. }
  727. // Resume handles resume requests for containers
  728. func (t *task) Resume(ctx context.Context) error {
  729. if t.ctr.ociSpec.Windows.HyperV == nil {
  730. return errors.New("cannot resume Windows Server Containers")
  731. }
  732. t.ctr.mu.Lock()
  733. defer t.ctr.mu.Unlock()
  734. if err := t.assertIsCurrentTask(); err != nil {
  735. return err
  736. }
  737. if t.ctr.hcsContainer == nil {
  738. return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
  739. }
  740. if err := t.ctr.hcsContainer.Resume(); err != nil {
  741. return err
  742. }
  743. t.ctr.isPaused = false
  744. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  745. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  746. ContainerID: t.ctr.id,
  747. ProcessID: t.id,
  748. })
  749. t.ctr.client.logger.WithFields(log.Fields{
  750. "container": t.ctr.id,
  751. "event": libcontainerdtypes.EventResumed,
  752. }).Info("sending event")
  753. if err != nil {
  754. t.ctr.client.logger.WithError(err).WithFields(log.Fields{
  755. "container": t.ctr.id,
  756. "event": libcontainerdtypes.EventResumed,
  757. }).Error("failed to process event")
  758. }
  759. })
  760. return nil
  761. }
  762. // Stats handles stats requests for containers
  763. func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {
  764. hc, err := t.getHCSContainer()
  765. if err != nil {
  766. return nil, err
  767. }
  768. readAt := time.Now()
  769. s, err := hc.Statistics()
  770. if err != nil {
  771. return nil, err
  772. }
  773. return &libcontainerdtypes.Stats{
  774. Read: readAt,
  775. HCSStats: &s,
  776. }, nil
  777. }
  778. // LoadContainer is the handler for restoring a container
  779. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  780. c.logger.WithField("container", id).Debug("LoadContainer()")
  781. // TODO Windows: On RS1, a re-attach isn't possible.
  782. // However, there is a scenario in which there is an issue.
  783. // Consider a background container. The daemon dies unexpectedly.
  784. // HCS will still have the compute service alive and running.
  785. // For consistence, we call in to shoot it regardless if HCS knows about it
  786. // We explicitly just log a warning if the terminate fails.
  787. // Then we tell the backend the container exited.
  788. hc, err := hcsshim.OpenContainer(id)
  789. if err != nil {
  790. return nil, errdefs.NotFound(errors.New("container not found"))
  791. }
  792. const terminateTimeout = time.Minute * 2
  793. err = hc.Terminate()
  794. if hcsshim.IsPending(err) {
  795. err = hc.WaitTimeout(terminateTimeout)
  796. } else if hcsshim.IsAlreadyStopped(err) {
  797. err = nil
  798. }
  799. if err != nil {
  800. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  801. return nil, err
  802. }
  803. return &container{
  804. client: c,
  805. hcsContainer: hc,
  806. id: id,
  807. }, nil
  808. }
  809. // AttachTask is only called by the daemon when restoring containers. As
  810. // re-attach isn't possible (see LoadContainer), a NotFound error is
  811. // unconditionally returned to allow restore to make progress.
  812. func (*container) AttachTask(context.Context, libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  813. return nil, errdefs.NotFound(cerrdefs.ErrNotImplemented)
  814. }
  815. // Pids returns a list of process IDs running in a container. It is not
  816. // implemented on Windows.
  817. func (t *task) Pids(context.Context) ([]containerd.ProcessInfo, error) {
  818. return nil, errors.New("not implemented on Windows")
  819. }
  820. // Summary returns a summary of the processes running in a container.
  821. // This is present in Windows to support docker top. In linux, the
  822. // engine shells out to ps to get process information. On Windows, as
  823. // the containers could be Hyper-V containers, they would not be
  824. // visible on the container host. However, libcontainerd does have
  825. // that information.
  826. func (t *task) Summary(_ context.Context) ([]libcontainerdtypes.Summary, error) {
  827. hc, err := t.getHCSContainer()
  828. if err != nil {
  829. return nil, err
  830. }
  831. p, err := hc.ProcessList()
  832. if err != nil {
  833. return nil, err
  834. }
  835. pl := make([]libcontainerdtypes.Summary, len(p))
  836. for i := range p {
  837. pl[i] = libcontainerdtypes.Summary{
  838. ImageName: p[i].ImageName,
  839. CreatedAt: p[i].CreateTimestamp,
  840. KernelTime_100Ns: p[i].KernelTime100ns,
  841. MemoryCommitBytes: p[i].MemoryCommitBytes,
  842. MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  843. MemoryWorkingSetSharedBytes: p[i].MemoryWorkingSetSharedBytes,
  844. ProcessID: p[i].ProcessId,
  845. UserTime_100Ns: p[i].UserTime100ns,
  846. ExecID: "",
  847. }
  848. }
  849. return pl, nil
  850. }
  851. func (p *process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  852. select {
  853. case <-ctx.Done():
  854. return nil, errors.WithStack(ctx.Err())
  855. case <-p.waitCh:
  856. default:
  857. return nil, errdefs.Conflict(errors.New("process is running"))
  858. }
  859. return p.exited, nil
  860. }
  861. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  862. select {
  863. case <-ctx.Done():
  864. return nil, errors.WithStack(ctx.Err())
  865. case <-t.waitCh:
  866. default:
  867. return nil, errdefs.Conflict(errors.New("container is not stopped"))
  868. }
  869. t.ctr.mu.Lock()
  870. defer t.ctr.mu.Unlock()
  871. if err := t.assertIsCurrentTask(); err != nil {
  872. return nil, err
  873. }
  874. t.ctr.task = nil
  875. return t.exited, nil
  876. }
  877. func (t *task) ForceDelete(ctx context.Context) error {
  878. select {
  879. case <-t.waitCh: // Task is already stopped.
  880. _, err := t.Delete(ctx)
  881. return err
  882. default:
  883. }
  884. if err := t.Kill(ctx, syscall.SIGKILL); err != nil {
  885. return errors.Wrap(err, "could not force-kill task")
  886. }
  887. select {
  888. case <-ctx.Done():
  889. return ctx.Err()
  890. case <-t.waitCh:
  891. _, err := t.Delete(ctx)
  892. return err
  893. }
  894. }
  895. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  896. select {
  897. case <-t.waitCh:
  898. return containerd.Status{
  899. Status: containerd.Stopped,
  900. ExitStatus: t.exited.ExitCode(),
  901. ExitTime: t.exited.ExitTime(),
  902. }, nil
  903. default:
  904. }
  905. t.ctr.mu.Lock()
  906. defer t.ctr.mu.Unlock()
  907. s := containerd.Running
  908. if t.ctr.isPaused {
  909. s = containerd.Paused
  910. }
  911. return containerd.Status{Status: s}, nil
  912. }
  913. func (*task) UpdateResources(ctx context.Context, resources *libcontainerdtypes.Resources) error {
  914. // Updating resource isn't supported on Windows
  915. // but we should return nil for enabling updating container
  916. return nil
  917. }
  918. func (*task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  919. return errors.New("Windows: Containers do not support checkpoints")
  920. }
  921. // assertIsCurrentTask returns a non-nil error if the task has been deleted.
  922. func (t *task) assertIsCurrentTask() error {
  923. if t.ctr.task != t {
  924. return errors.WithStack(errdefs.NotFound(fmt.Errorf("task %q not found", t.id)))
  925. }
  926. return nil
  927. }
  928. // getHCSContainer returns a reference to the hcsshim Container for the task's
  929. // container if neither the task nor container have been deleted.
  930. //
  931. // t.ctr.mu must not be locked by the calling goroutine when calling this
  932. // function.
  933. func (t *task) getHCSContainer() (hcsshim.Container, error) {
  934. t.ctr.mu.Lock()
  935. defer t.ctr.mu.Unlock()
  936. if err := t.assertIsCurrentTask(); err != nil {
  937. return nil, err
  938. }
  939. hc := t.ctr.hcsContainer
  940. if hc == nil {
  941. return nil, errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", t.ctr.id)))
  942. }
  943. return hc, nil
  944. }
  945. // ctr mutex must be held when calling this function.
  946. func (ctr *container) shutdownContainer() error {
  947. var err error
  948. const waitTimeout = time.Minute * 5
  949. if !ctr.terminateInvoked {
  950. err = ctr.hcsContainer.Shutdown()
  951. }
  952. if hcsshim.IsPending(err) || ctr.terminateInvoked {
  953. err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  954. } else if hcsshim.IsAlreadyStopped(err) {
  955. err = nil
  956. }
  957. if err != nil {
  958. ctr.client.logger.WithError(err).WithField("container", ctr.id).
  959. Debug("failed to shutdown container, terminating it")
  960. terminateErr := ctr.terminateContainer()
  961. if terminateErr != nil {
  962. ctr.client.logger.WithError(terminateErr).WithField("container", ctr.id).
  963. Error("failed to shutdown container, and subsequent terminate also failed")
  964. return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  965. }
  966. return err
  967. }
  968. return nil
  969. }
  970. // ctr mutex must be held when calling this function.
  971. func (ctr *container) terminateContainer() error {
  972. const terminateTimeout = time.Minute * 5
  973. ctr.terminateInvoked = true
  974. err := ctr.hcsContainer.Terminate()
  975. if hcsshim.IsPending(err) {
  976. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  977. } else if hcsshim.IsAlreadyStopped(err) {
  978. err = nil
  979. }
  980. if err != nil {
  981. ctr.client.logger.WithError(err).WithField("container", ctr.id).
  982. Debug("failed to terminate container")
  983. return err
  984. }
  985. return nil
  986. }
  987. func (p *process) reap() {
  988. logger := p.ctr.client.logger.WithFields(log.Fields{
  989. "container": p.ctr.id,
  990. "process": p.id,
  991. })
  992. var eventErr error
  993. // Block indefinitely for the process to exit.
  994. if err := p.hcsProcess.Wait(); err != nil {
  995. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  996. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  997. }
  998. // Fall through here, do not return. This ensures we tell the
  999. // docker engine that the process/container has exited to avoid
  1000. // a container being dropped on the floor.
  1001. }
  1002. exitedAt := time.Now()
  1003. exitCode, err := p.hcsProcess.ExitCode()
  1004. if err != nil {
  1005. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1006. logger.WithError(err).Warnf("unable to get exit code for process")
  1007. }
  1008. // Since we got an error retrieving the exit code, make sure that the
  1009. // code we return doesn't incorrectly indicate success.
  1010. exitCode = -1
  1011. // Fall through here, do not return. This ensures we tell the
  1012. // docker engine that the process/container has exited to avoid
  1013. // a container being dropped on the floor.
  1014. }
  1015. p.mu.Lock()
  1016. hcsProcess := p.hcsProcess
  1017. p.hcsProcess = nil
  1018. p.mu.Unlock()
  1019. if err := hcsProcess.Close(); err != nil {
  1020. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1021. exitCode = -1
  1022. eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1023. }
  1024. // Explicit locking is not required as reads from exited are
  1025. // synchronized using waitCh.
  1026. p.exited = containerd.NewExitStatus(uint32(exitCode), exitedAt, nil)
  1027. close(p.waitCh)
  1028. p.ctr.client.eventQ.Append(p.ctr.id, func() {
  1029. ei := libcontainerdtypes.EventInfo{
  1030. ContainerID: p.ctr.id,
  1031. ProcessID: p.id,
  1032. Pid: uint32(hcsProcess.Pid()),
  1033. ExitCode: uint32(exitCode),
  1034. ExitedAt: exitedAt,
  1035. Error: eventErr,
  1036. }
  1037. p.ctr.client.logger.WithFields(log.Fields{
  1038. "container": p.ctr.id,
  1039. "event": libcontainerdtypes.EventExit,
  1040. "event-info": ei,
  1041. }).Info("sending event")
  1042. err := p.ctr.client.backend.ProcessEvent(p.ctr.id, libcontainerdtypes.EventExit, ei)
  1043. if err != nil {
  1044. p.ctr.client.logger.WithError(err).WithFields(log.Fields{
  1045. "container": p.ctr.id,
  1046. "event": libcontainerdtypes.EventExit,
  1047. "event-info": ei,
  1048. }).Error("failed to process event")
  1049. }
  1050. })
  1051. }
  1052. func (ctr *container) Delete(context.Context) error {
  1053. ctr.mu.Lock()
  1054. defer ctr.mu.Unlock()
  1055. if ctr.hcsContainer == nil {
  1056. return errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", ctr.id)))
  1057. }
  1058. // Check that there is no task currently running.
  1059. if ctr.task != nil {
  1060. select {
  1061. case <-ctr.task.waitCh:
  1062. default:
  1063. return errors.WithStack(errdefs.Conflict(errors.New("container is not stopped")))
  1064. }
  1065. }
  1066. var (
  1067. logger = ctr.client.logger.WithFields(log.Fields{
  1068. "container": ctr.id,
  1069. })
  1070. thisErr error
  1071. )
  1072. if err := ctr.shutdownContainer(); err != nil {
  1073. logger.WithError(err).Warn("failed to shutdown container")
  1074. thisErr = errors.Wrap(err, "failed to shutdown container")
  1075. } else {
  1076. logger.Debug("completed container shutdown")
  1077. }
  1078. if err := ctr.hcsContainer.Close(); err != nil {
  1079. logger.WithError(err).Error("failed to clean hcs container resources")
  1080. thisErr = errors.Wrap(err, "failed to terminate container")
  1081. }
  1082. ctr.hcsContainer = nil
  1083. return thisErr
  1084. }