local_windows.go 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. package local // import "github.com/docker/docker/libcontainerd/local"
  2. // This package contains the legacy in-proc calls in HCS using the v1 schema
  3. // for Windows runtime purposes.
  4. import (
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "time"
  15. "github.com/sirupsen/logrus"
  16. "github.com/Microsoft/hcsshim"
  17. "github.com/containerd/containerd"
  18. "github.com/containerd/containerd/cio"
  19. cerrdefs "github.com/containerd/containerd/errdefs"
  20. "github.com/containerd/containerd/log"
  21. "github.com/docker/docker/errdefs"
  22. "github.com/docker/docker/libcontainerd/queue"
  23. libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
  24. "github.com/docker/docker/pkg/sysinfo"
  25. "github.com/docker/docker/pkg/system"
  26. specs "github.com/opencontainers/runtime-spec/specs-go"
  27. "github.com/pkg/errors"
  28. "golang.org/x/sys/windows"
  29. )
  30. type process struct {
  31. // mu guards the mutable fields of this struct.
  32. //
  33. // Always lock mu before ctr's mutex to prevent deadlocks.
  34. mu sync.Mutex
  35. id string // Invariants: immutable
  36. ctr *container // Invariants: immutable, ctr != nil
  37. hcsProcess hcsshim.Process // Is set to nil on process exit
  38. exited *containerd.ExitStatus // Valid iff waitCh is closed
  39. waitCh chan struct{}
  40. }
  41. type task struct {
  42. process
  43. }
  44. type container struct {
  45. mu sync.Mutex
  46. // The ociSpec is required, as client.Create() needs a spec, but can
  47. // be called from the RestartManager context which does not otherwise
  48. // have access to the Spec
  49. //
  50. // A container value with ociSpec == nil represents a container which
  51. // has been loaded with (*client).LoadContainer, and is ineligible to
  52. // be Start()ed.
  53. ociSpec *specs.Spec
  54. hcsContainer hcsshim.Container // Is set to nil on container delete
  55. isPaused bool
  56. client *client
  57. id string
  58. terminateInvoked bool
  59. // task is a reference to the current task for the container. As a
  60. // corollary, when task == nil the container has no current task: the
  61. // container was never Start()ed or the task was Delete()d.
  62. task *task
  63. }
  64. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  65. // container creator management stacks. We hard code "docker" in the case
  66. // of docker.
  67. const defaultOwner = "docker"
  68. type client struct {
  69. stateDir string
  70. backend libcontainerdtypes.Backend
  71. logger *logrus.Entry
  72. eventQ queue.Queue
  73. }
  74. // NewClient creates a new local executor for windows
  75. func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
  76. c := &client{
  77. stateDir: stateDir,
  78. backend: b,
  79. logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
  80. }
  81. return c, nil
  82. }
  83. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  84. return containerd.Version{}, errors.New("not implemented on Windows")
  85. }
  86. // NewContainer is the entrypoint to create a container from a spec.
  87. // Table below shows the fields required for HCS JSON calling parameters,
  88. // where if not populated, is omitted.
  89. // +-----------------+--------------------------------------------+---------------------------------------------------+
  90. // | | Isolation=Process | Isolation=Hyper-V |
  91. // +-----------------+--------------------------------------------+---------------------------------------------------+
  92. // | VolumePath | \\?\\Volume{GUIDa} | |
  93. // | LayerFolderPath | %root%\windowsfilter\containerID | |
  94. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  95. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  96. // +-----------------+--------------------------------------------+---------------------------------------------------+
  97. //
  98. // Isolation=Process example:
  99. //
  100. // {
  101. // "SystemType": "Container",
  102. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  103. // "Owner": "docker",
  104. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  105. // "IgnoreFlushesDuringBoot": true,
  106. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  107. // "Layers": [{
  108. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  109. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  110. // }],
  111. // "HostName": "5e0055c814a6",
  112. // "MappedDirectories": [],
  113. // "HvPartition": false,
  114. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  115. // }
  116. //
  117. // Isolation=Hyper-V example:
  118. //
  119. // {
  120. // "SystemType": "Container",
  121. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  122. // "Owner": "docker",
  123. // "IgnoreFlushesDuringBoot": true,
  124. // "Layers": [{
  125. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  126. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  127. // }],
  128. // "HostName": "475c2c58933b",
  129. // "MappedDirectories": [],
  130. // "HvPartition": true,
  131. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  132. // "DNSSearchList": "a.com,b.com,c.com",
  133. // "HvRuntime": {
  134. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  135. // },
  136. // }
  137. func (c *client) NewContainer(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
  138. var err error
  139. if spec.Linux != nil {
  140. return nil, errors.New("linux containers are not supported on this platform")
  141. }
  142. ctr, err := c.createWindows(id, spec, runtimeOptions)
  143. if err == nil {
  144. c.eventQ.Append(id, func() {
  145. ei := libcontainerdtypes.EventInfo{
  146. ContainerID: id,
  147. }
  148. c.logger.WithFields(logrus.Fields{
  149. "container": id,
  150. "event": libcontainerdtypes.EventCreate,
  151. }).Info("sending event")
  152. err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
  153. if err != nil {
  154. c.logger.WithError(err).WithFields(logrus.Fields{
  155. "container": id,
  156. "event": libcontainerdtypes.EventCreate,
  157. }).Error("failed to process event")
  158. }
  159. })
  160. }
  161. return ctr, err
  162. }
  163. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) (*container, error) {
  164. logger := c.logger.WithField("container", id)
  165. configuration := &hcsshim.ContainerConfig{
  166. SystemType: "Container",
  167. Name: id,
  168. Owner: defaultOwner,
  169. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  170. HostName: spec.Hostname,
  171. HvPartition: false,
  172. }
  173. c.extractResourcesFromSpec(spec, configuration)
  174. if spec.Windows.Resources != nil {
  175. if spec.Windows.Resources.Storage != nil {
  176. if spec.Windows.Resources.Storage.Bps != nil {
  177. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  178. }
  179. if spec.Windows.Resources.Storage.Iops != nil {
  180. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  181. }
  182. }
  183. }
  184. if spec.Windows.HyperV != nil {
  185. configuration.HvPartition = true
  186. }
  187. if spec.Windows.Network != nil {
  188. configuration.EndpointList = spec.Windows.Network.EndpointList
  189. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  190. if spec.Windows.Network.DNSSearchList != nil {
  191. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  192. }
  193. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  194. }
  195. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  196. configuration.Credentials = cs
  197. }
  198. // We must have least two layers in the spec, the bottom one being a
  199. // base image, the top one being the RW layer.
  200. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  201. return nil, fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  202. }
  203. // Strip off the top-most layer as that's passed in separately to HCS
  204. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  205. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  206. if configuration.HvPartition {
  207. // We don't currently support setting the utility VM image explicitly.
  208. // TODO circa RS5, this may be re-locatable.
  209. if spec.Windows.HyperV.UtilityVMPath != "" {
  210. return nil, errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  211. }
  212. // Find the upper-most utility VM image.
  213. var uvmImagePath string
  214. for _, path := range layerFolders {
  215. fullPath := filepath.Join(path, "UtilityVM")
  216. _, err := os.Stat(fullPath)
  217. if err == nil {
  218. uvmImagePath = fullPath
  219. break
  220. }
  221. if !os.IsNotExist(err) {
  222. return nil, err
  223. }
  224. }
  225. if uvmImagePath == "" {
  226. return nil, errors.New("utility VM image could not be found")
  227. }
  228. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  229. if spec.Root.Path != "" {
  230. return nil, errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  231. }
  232. } else {
  233. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  234. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  235. return nil, fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  236. }
  237. // HCS API requires the trailing backslash to be removed
  238. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  239. }
  240. if spec.Root.Readonly {
  241. return nil, errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  242. }
  243. for _, layerPath := range layerFolders {
  244. _, filename := filepath.Split(layerPath)
  245. g, err := hcsshim.NameToGuid(filename)
  246. if err != nil {
  247. return nil, err
  248. }
  249. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  250. ID: g.ToString(),
  251. Path: layerPath,
  252. })
  253. }
  254. // Add the mounts (volumes, bind mounts etc) to the structure
  255. var mds []hcsshim.MappedDir
  256. var mps []hcsshim.MappedPipe
  257. for _, mount := range spec.Mounts {
  258. const pipePrefix = `\\.\pipe\`
  259. if mount.Type != "" {
  260. return nil, fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  261. }
  262. if strings.HasPrefix(mount.Destination, pipePrefix) {
  263. mp := hcsshim.MappedPipe{
  264. HostPath: mount.Source,
  265. ContainerPipeName: mount.Destination[len(pipePrefix):],
  266. }
  267. mps = append(mps, mp)
  268. } else {
  269. md := hcsshim.MappedDir{
  270. HostPath: mount.Source,
  271. ContainerPath: mount.Destination,
  272. ReadOnly: false,
  273. }
  274. for _, o := range mount.Options {
  275. if strings.ToLower(o) == "ro" {
  276. md.ReadOnly = true
  277. }
  278. }
  279. mds = append(mds, md)
  280. }
  281. }
  282. configuration.MappedDirectories = mds
  283. configuration.MappedPipes = mps
  284. if len(spec.Windows.Devices) > 0 {
  285. // Add any device assignments
  286. if configuration.HvPartition {
  287. return nil, errors.New("device assignment is not supported for HyperV containers")
  288. }
  289. for _, d := range spec.Windows.Devices {
  290. // Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
  291. // these represent an Interface Class GUID.
  292. if d.IDType != "class" && d.IDType != "vpci-class-guid" {
  293. return nil, errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
  294. }
  295. configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
  296. }
  297. }
  298. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  299. if err != nil {
  300. return nil, err
  301. }
  302. // Construct a container object for calling start on it.
  303. ctr := &container{
  304. client: c,
  305. id: id,
  306. ociSpec: spec,
  307. hcsContainer: hcsContainer,
  308. }
  309. logger.Debug("starting container")
  310. if err := ctr.hcsContainer.Start(); err != nil {
  311. logger.WithError(err).Error("failed to start container")
  312. ctr.mu.Lock()
  313. if err := ctr.terminateContainer(); err != nil {
  314. logger.WithError(err).Error("failed to cleanup after a failed Start")
  315. } else {
  316. logger.Debug("cleaned up after failed Start by calling Terminate")
  317. }
  318. ctr.mu.Unlock()
  319. return nil, err
  320. }
  321. logger.Debug("createWindows() completed successfully")
  322. return ctr, nil
  323. }
  324. func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
  325. if spec.Windows.Resources != nil {
  326. if spec.Windows.Resources.CPU != nil {
  327. if spec.Windows.Resources.CPU.Count != nil {
  328. // This check is being done here rather than in adaptContainerSettings
  329. // because we don't want to update the HostConfig in case this container
  330. // is moved to a host with more CPUs than this one.
  331. cpuCount := *spec.Windows.Resources.CPU.Count
  332. hostCPUCount := uint64(sysinfo.NumCPU())
  333. if cpuCount > hostCPUCount {
  334. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  335. cpuCount = hostCPUCount
  336. }
  337. configuration.ProcessorCount = uint32(cpuCount)
  338. }
  339. if spec.Windows.Resources.CPU.Shares != nil {
  340. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  341. }
  342. if spec.Windows.Resources.CPU.Maximum != nil {
  343. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  344. }
  345. }
  346. if spec.Windows.Resources.Memory != nil {
  347. if spec.Windows.Resources.Memory.Limit != nil {
  348. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  349. }
  350. }
  351. }
  352. }
  353. func (ctr *container) Start(_ context.Context, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  354. ctr.mu.Lock()
  355. defer ctr.mu.Unlock()
  356. switch {
  357. case ctr.ociSpec == nil:
  358. return nil, errors.WithStack(errdefs.NotImplemented(errors.New("a restored container cannot be started")))
  359. case ctr.task != nil:
  360. return nil, errors.WithStack(errdefs.NotModified(cerrdefs.ErrAlreadyExists))
  361. }
  362. logger := ctr.client.logger.WithField("container", ctr.id)
  363. // Note we always tell HCS to create stdout as it's required
  364. // regardless of '-i' or '-t' options, so that docker can always grab
  365. // the output through logs. We also tell HCS to always create stdin,
  366. // even if it's not used - it will be closed shortly. Stderr is only
  367. // created if it we're not -t.
  368. var (
  369. emulateConsole bool
  370. createStdErrPipe bool
  371. )
  372. if ctr.ociSpec.Process != nil {
  373. emulateConsole = ctr.ociSpec.Process.Terminal
  374. createStdErrPipe = !ctr.ociSpec.Process.Terminal
  375. }
  376. createProcessParms := &hcsshim.ProcessConfig{
  377. EmulateConsole: emulateConsole,
  378. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  379. CreateStdInPipe: true,
  380. CreateStdOutPipe: true,
  381. CreateStdErrPipe: createStdErrPipe,
  382. }
  383. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  384. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  385. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  386. }
  387. // Configure the environment for the process
  388. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  389. // Configure the CommandLine/CommandArgs
  390. setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
  391. logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
  392. createProcessParms.User = ctr.ociSpec.Process.User.Username
  393. // Start the command running in the container.
  394. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  395. if err != nil {
  396. logger.WithError(err).Error("CreateProcess() failed")
  397. return nil, err
  398. }
  399. defer func() {
  400. if err != nil {
  401. if err := newProcess.Kill(); err != nil {
  402. logger.WithError(err).Error("failed to kill process")
  403. }
  404. go func() {
  405. if err := newProcess.Wait(); err != nil {
  406. logger.WithError(err).Error("failed to wait for process")
  407. }
  408. if err := newProcess.Close(); err != nil {
  409. logger.WithError(err).Error("failed to clean process resources")
  410. }
  411. }()
  412. }
  413. }()
  414. t := &task{process: process{
  415. id: ctr.id,
  416. ctr: ctr,
  417. hcsProcess: newProcess,
  418. waitCh: make(chan struct{}),
  419. }}
  420. pid := t.Pid()
  421. logger.WithField("pid", pid).Debug("init process started")
  422. // Spin up a goroutine to notify the backend and clean up resources when
  423. // the task exits. Defer until after the start event is sent so that the
  424. // exit event is not sent out-of-order.
  425. defer func() { go t.reap() }()
  426. // Don't shadow err here due to our deferred clean-up.
  427. var dio *cio.DirectIO
  428. dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
  429. if err != nil {
  430. logger.WithError(err).Error("failed to get stdio pipes")
  431. return nil, err
  432. }
  433. _, err = attachStdio(dio)
  434. if err != nil {
  435. logger.WithError(err).Error("failed to attach stdio")
  436. return nil, err
  437. }
  438. // All fallible operations have succeeded so it is now safe to set the
  439. // container's current task.
  440. ctr.task = t
  441. // Generate the associated event
  442. ctr.client.eventQ.Append(ctr.id, func() {
  443. ei := libcontainerdtypes.EventInfo{
  444. ContainerID: ctr.id,
  445. ProcessID: t.id,
  446. Pid: pid,
  447. }
  448. ctr.client.logger.WithFields(logrus.Fields{
  449. "container": ctr.id,
  450. "event": libcontainerdtypes.EventStart,
  451. "event-info": ei,
  452. }).Info("sending event")
  453. err := ctr.client.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
  454. if err != nil {
  455. ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  456. "container": ei.ContainerID,
  457. "event": libcontainerdtypes.EventStart,
  458. "event-info": ei,
  459. }).Error("failed to process event")
  460. }
  461. })
  462. logger.Debug("start() completed")
  463. return t, nil
  464. }
  465. func (ctr *container) Task(context.Context) (libcontainerdtypes.Task, error) {
  466. ctr.mu.Lock()
  467. defer ctr.mu.Unlock()
  468. if ctr.task == nil {
  469. return nil, errdefs.NotFound(cerrdefs.ErrNotFound)
  470. }
  471. return ctr.task, nil
  472. }
  473. // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
  474. func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
  475. if process.CommandLine != "" {
  476. createProcessParms.CommandLine = process.CommandLine
  477. } else {
  478. createProcessParms.CommandLine = system.EscapeArgs(process.Args)
  479. }
  480. }
  481. func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
  482. stdin, stdout, stderr, err := newProcess.Stdio()
  483. if err != nil {
  484. return nil, err
  485. }
  486. dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
  487. // Convert io.ReadClosers to io.Readers
  488. if stdout != nil {
  489. dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
  490. }
  491. if stderr != nil {
  492. dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
  493. }
  494. return dio, nil
  495. }
  496. // Exec launches a process in a running container.
  497. //
  498. // The processID argument is entirely informational. As there is no mechanism
  499. // (exposed through the libcontainerd interfaces) to enumerate or reference an
  500. // exec'd process by ID, uniqueness is not currently enforced.
  501. func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
  502. hcsContainer, err := t.getHCSContainer()
  503. if err != nil {
  504. return nil, err
  505. }
  506. logger := t.ctr.client.logger.WithFields(logrus.Fields{
  507. "container": t.ctr.id,
  508. "exec": processID,
  509. })
  510. // Note we always tell HCS to
  511. // create stdout as it's required regardless of '-i' or '-t' options, so that
  512. // docker can always grab the output through logs. We also tell HCS to always
  513. // create stdin, even if it's not used - it will be closed shortly. Stderr
  514. // is only created if it we're not -t.
  515. createProcessParms := &hcsshim.ProcessConfig{
  516. CreateStdInPipe: true,
  517. CreateStdOutPipe: true,
  518. CreateStdErrPipe: !spec.Terminal,
  519. }
  520. if spec.Terminal {
  521. createProcessParms.EmulateConsole = true
  522. if spec.ConsoleSize != nil {
  523. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  524. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  525. }
  526. }
  527. // Take working directory from the process to add if it is defined,
  528. // otherwise take from the first process.
  529. if spec.Cwd != "" {
  530. createProcessParms.WorkingDirectory = spec.Cwd
  531. } else {
  532. createProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd
  533. }
  534. // Configure the environment for the process
  535. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  536. // Configure the CommandLine/CommandArgs
  537. setCommandLineAndArgs(spec, createProcessParms)
  538. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  539. createProcessParms.User = spec.User.Username
  540. // Start the command running in the container.
  541. newProcess, err := hcsContainer.CreateProcess(createProcessParms)
  542. if err != nil {
  543. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  544. return nil, err
  545. }
  546. pid := newProcess.Pid()
  547. defer func() {
  548. if err != nil {
  549. if err := newProcess.Kill(); err != nil {
  550. logger.WithError(err).Error("failed to kill process")
  551. }
  552. go func() {
  553. if err := newProcess.Wait(); err != nil {
  554. logger.WithError(err).Error("failed to wait for process")
  555. }
  556. if err := newProcess.Close(); err != nil {
  557. logger.WithError(err).Error("failed to clean process resources")
  558. }
  559. }()
  560. }
  561. }()
  562. dio, err := newIOFromProcess(newProcess, spec.Terminal)
  563. if err != nil {
  564. logger.WithError(err).Error("failed to get stdio pipes")
  565. return nil, err
  566. }
  567. // Tell the engine to attach streams back to the client
  568. _, err = attachStdio(dio)
  569. if err != nil {
  570. return nil, err
  571. }
  572. p := &process{
  573. id: processID,
  574. ctr: t.ctr,
  575. hcsProcess: newProcess,
  576. waitCh: make(chan struct{}),
  577. }
  578. // Spin up a goroutine to notify the backend and clean up resources when
  579. // the process exits. Defer until after the start event is sent so that
  580. // the exit event is not sent out-of-order.
  581. defer func() { go p.reap() }()
  582. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  583. ei := libcontainerdtypes.EventInfo{
  584. ContainerID: t.ctr.id,
  585. ProcessID: p.id,
  586. Pid: uint32(pid),
  587. }
  588. t.ctr.client.logger.WithFields(logrus.Fields{
  589. "container": t.ctr.id,
  590. "event": libcontainerdtypes.EventExecAdded,
  591. "event-info": ei,
  592. }).Info("sending event")
  593. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)
  594. if err != nil {
  595. t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  596. "container": t.ctr.id,
  597. "event": libcontainerdtypes.EventExecAdded,
  598. "event-info": ei,
  599. }).Error("failed to process event")
  600. }
  601. err = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)
  602. if err != nil {
  603. t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  604. "container": t.ctr.id,
  605. "event": libcontainerdtypes.EventExecStarted,
  606. "event-info": ei,
  607. }).Error("failed to process event")
  608. }
  609. })
  610. return p, nil
  611. }
  612. func (p *process) Pid() uint32 {
  613. p.mu.Lock()
  614. hcsProcess := p.hcsProcess
  615. p.mu.Unlock()
  616. if hcsProcess == nil {
  617. return 0
  618. }
  619. return uint32(hcsProcess.Pid())
  620. }
  621. func (p *process) Kill(_ context.Context, signal syscall.Signal) error {
  622. p.mu.Lock()
  623. hcsProcess := p.hcsProcess
  624. p.mu.Unlock()
  625. if hcsProcess == nil {
  626. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  627. }
  628. return hcsProcess.Kill()
  629. }
  630. // Kill handles `docker stop` on Windows. While Linux has support for
  631. // the full range of signals, signals aren't really implemented on Windows.
  632. // We fake supporting regular stop and -9 to force kill.
  633. func (t *task) Kill(_ context.Context, signal syscall.Signal) error {
  634. hcsContainer, err := t.getHCSContainer()
  635. if err != nil {
  636. return err
  637. }
  638. logger := t.ctr.client.logger.WithFields(logrus.Fields{
  639. "container": t.ctr.id,
  640. "process": t.id,
  641. "pid": t.Pid(),
  642. "signal": signal,
  643. })
  644. logger.Debug("Signal()")
  645. var op string
  646. if signal == syscall.SIGKILL {
  647. // Terminate the compute system
  648. t.ctr.mu.Lock()
  649. t.ctr.terminateInvoked = true
  650. t.ctr.mu.Unlock()
  651. op, err = "terminate", hcsContainer.Terminate()
  652. } else {
  653. // Shut down the container
  654. op, err = "shutdown", hcsContainer.Shutdown()
  655. }
  656. if err != nil {
  657. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  658. // ignore errors
  659. logger.WithError(err).Errorf("failed to %s hccshim container", op)
  660. }
  661. }
  662. return nil
  663. }
  664. // Resize handles a CLI event to resize an interactive docker run or docker
  665. // exec window.
  666. func (p *process) Resize(_ context.Context, width, height uint32) error {
  667. p.mu.Lock()
  668. hcsProcess := p.hcsProcess
  669. p.mu.Unlock()
  670. if hcsProcess == nil {
  671. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  672. }
  673. p.ctr.client.logger.WithFields(logrus.Fields{
  674. "container": p.ctr.id,
  675. "process": p.id,
  676. "height": height,
  677. "width": width,
  678. "pid": hcsProcess.Pid(),
  679. }).Debug("resizing")
  680. return hcsProcess.ResizeConsole(uint16(width), uint16(height))
  681. }
  682. func (p *process) CloseStdin(context.Context) error {
  683. p.mu.Lock()
  684. hcsProcess := p.hcsProcess
  685. p.mu.Unlock()
  686. if hcsProcess == nil {
  687. return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
  688. }
  689. return hcsProcess.CloseStdin()
  690. }
  691. // Pause handles pause requests for containers
  692. func (t *task) Pause(_ context.Context) error {
  693. if t.ctr.ociSpec.Windows.HyperV == nil {
  694. return cerrdefs.ErrNotImplemented
  695. }
  696. t.ctr.mu.Lock()
  697. defer t.ctr.mu.Unlock()
  698. if err := t.assertIsCurrentTask(); err != nil {
  699. return err
  700. }
  701. if t.ctr.hcsContainer == nil {
  702. return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
  703. }
  704. if err := t.ctr.hcsContainer.Pause(); err != nil {
  705. return err
  706. }
  707. t.ctr.isPaused = true
  708. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  709. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  710. ContainerID: t.ctr.id,
  711. ProcessID: t.id,
  712. })
  713. t.ctr.client.logger.WithFields(logrus.Fields{
  714. "container": t.ctr.id,
  715. "event": libcontainerdtypes.EventPaused,
  716. }).Info("sending event")
  717. if err != nil {
  718. t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  719. "container": t.ctr.id,
  720. "event": libcontainerdtypes.EventPaused,
  721. }).Error("failed to process event")
  722. }
  723. })
  724. return nil
  725. }
  726. // Resume handles resume requests for containers
  727. func (t *task) Resume(ctx context.Context) error {
  728. if t.ctr.ociSpec.Windows.HyperV == nil {
  729. return errors.New("cannot resume Windows Server Containers")
  730. }
  731. t.ctr.mu.Lock()
  732. defer t.ctr.mu.Unlock()
  733. if err := t.assertIsCurrentTask(); err != nil {
  734. return err
  735. }
  736. if t.ctr.hcsContainer == nil {
  737. return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
  738. }
  739. if err := t.ctr.hcsContainer.Resume(); err != nil {
  740. return err
  741. }
  742. t.ctr.isPaused = false
  743. t.ctr.client.eventQ.Append(t.ctr.id, func() {
  744. err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  745. ContainerID: t.ctr.id,
  746. ProcessID: t.id,
  747. })
  748. t.ctr.client.logger.WithFields(logrus.Fields{
  749. "container": t.ctr.id,
  750. "event": libcontainerdtypes.EventResumed,
  751. }).Info("sending event")
  752. if err != nil {
  753. t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  754. "container": t.ctr.id,
  755. "event": libcontainerdtypes.EventResumed,
  756. }).Error("failed to process event")
  757. }
  758. })
  759. return nil
  760. }
  761. // Stats handles stats requests for containers
  762. func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {
  763. hc, err := t.getHCSContainer()
  764. if err != nil {
  765. return nil, err
  766. }
  767. readAt := time.Now()
  768. s, err := hc.Statistics()
  769. if err != nil {
  770. return nil, err
  771. }
  772. return &libcontainerdtypes.Stats{
  773. Read: readAt,
  774. HCSStats: &s,
  775. }, nil
  776. }
  777. // LoadContainer is the handler for restoring a container
  778. func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
  779. c.logger.WithField("container", id).Debug("LoadContainer()")
  780. // TODO Windows: On RS1, a re-attach isn't possible.
  781. // However, there is a scenario in which there is an issue.
  782. // Consider a background container. The daemon dies unexpectedly.
  783. // HCS will still have the compute service alive and running.
  784. // For consistence, we call in to shoot it regardless if HCS knows about it
  785. // We explicitly just log a warning if the terminate fails.
  786. // Then we tell the backend the container exited.
  787. hc, err := hcsshim.OpenContainer(id)
  788. if err != nil {
  789. return nil, errdefs.NotFound(errors.New("container not found"))
  790. }
  791. const terminateTimeout = time.Minute * 2
  792. err = hc.Terminate()
  793. if hcsshim.IsPending(err) {
  794. err = hc.WaitTimeout(terminateTimeout)
  795. } else if hcsshim.IsAlreadyStopped(err) {
  796. err = nil
  797. }
  798. if err != nil {
  799. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  800. return nil, err
  801. }
  802. return &container{
  803. client: c,
  804. hcsContainer: hc,
  805. id: id,
  806. }, nil
  807. }
  808. // AttachTask is only called by the daemon when restoring containers. As
  809. // re-attach isn't possible (see LoadContainer), a NotFound error is
  810. // unconditionally returned to allow restore to make progress.
  811. func (*container) AttachTask(context.Context, libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
  812. return nil, errdefs.NotFound(cerrdefs.ErrNotImplemented)
  813. }
  814. // Pids returns a list of process IDs running in a container. It is not
  815. // implemented on Windows.
  816. func (t *task) Pids(context.Context) ([]containerd.ProcessInfo, error) {
  817. return nil, errors.New("not implemented on Windows")
  818. }
  819. // Summary returns a summary of the processes running in a container.
  820. // This is present in Windows to support docker top. In linux, the
  821. // engine shells out to ps to get process information. On Windows, as
  822. // the containers could be Hyper-V containers, they would not be
  823. // visible on the container host. However, libcontainerd does have
  824. // that information.
  825. func (t *task) Summary(_ context.Context) ([]libcontainerdtypes.Summary, error) {
  826. hc, err := t.getHCSContainer()
  827. if err != nil {
  828. return nil, err
  829. }
  830. p, err := hc.ProcessList()
  831. if err != nil {
  832. return nil, err
  833. }
  834. pl := make([]libcontainerdtypes.Summary, len(p))
  835. for i := range p {
  836. pl[i] = libcontainerdtypes.Summary{
  837. ImageName: p[i].ImageName,
  838. CreatedAt: p[i].CreateTimestamp,
  839. KernelTime_100Ns: p[i].KernelTime100ns,
  840. MemoryCommitBytes: p[i].MemoryCommitBytes,
  841. MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  842. MemoryWorkingSetSharedBytes: p[i].MemoryWorkingSetSharedBytes,
  843. ProcessID: p[i].ProcessId,
  844. UserTime_100Ns: p[i].UserTime100ns,
  845. ExecID: "",
  846. }
  847. }
  848. return pl, nil
  849. }
  850. func (p *process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  851. select {
  852. case <-ctx.Done():
  853. return nil, errors.WithStack(ctx.Err())
  854. case <-p.waitCh:
  855. default:
  856. return nil, errdefs.Conflict(errors.New("process is running"))
  857. }
  858. return p.exited, nil
  859. }
  860. func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
  861. select {
  862. case <-ctx.Done():
  863. return nil, errors.WithStack(ctx.Err())
  864. case <-t.waitCh:
  865. default:
  866. return nil, errdefs.Conflict(errors.New("container is not stopped"))
  867. }
  868. t.ctr.mu.Lock()
  869. defer t.ctr.mu.Unlock()
  870. if err := t.assertIsCurrentTask(); err != nil {
  871. return nil, err
  872. }
  873. t.ctr.task = nil
  874. return t.exited, nil
  875. }
  876. func (t *task) ForceDelete(ctx context.Context) error {
  877. select {
  878. case <-t.waitCh: // Task is already stopped.
  879. _, err := t.Delete(ctx)
  880. return err
  881. default:
  882. }
  883. if err := t.Kill(ctx, syscall.SIGKILL); err != nil {
  884. return errors.Wrap(err, "could not force-kill task")
  885. }
  886. select {
  887. case <-ctx.Done():
  888. return ctx.Err()
  889. case <-t.waitCh:
  890. _, err := t.Delete(ctx)
  891. return err
  892. }
  893. }
  894. func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  895. select {
  896. case <-t.waitCh:
  897. return containerd.Status{
  898. Status: containerd.Stopped,
  899. ExitStatus: t.exited.ExitCode(),
  900. ExitTime: t.exited.ExitTime(),
  901. }, nil
  902. default:
  903. }
  904. t.ctr.mu.Lock()
  905. defer t.ctr.mu.Unlock()
  906. s := containerd.Running
  907. if t.ctr.isPaused {
  908. s = containerd.Paused
  909. }
  910. return containerd.Status{Status: s}, nil
  911. }
  912. func (*task) UpdateResources(ctx context.Context, resources *libcontainerdtypes.Resources) error {
  913. // Updating resource isn't supported on Windows
  914. // but we should return nil for enabling updating container
  915. return nil
  916. }
  917. func (*task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  918. return errors.New("Windows: Containers do not support checkpoints")
  919. }
  920. // assertIsCurrentTask returns a non-nil error if the task has been deleted.
  921. func (t *task) assertIsCurrentTask() error {
  922. if t.ctr.task != t {
  923. return errors.WithStack(errdefs.NotFound(fmt.Errorf("task %q not found", t.id)))
  924. }
  925. return nil
  926. }
  927. // getHCSContainer returns a reference to the hcsshim Container for the task's
  928. // container if neither the task nor container have been deleted.
  929. //
  930. // t.ctr.mu must not be locked by the calling goroutine when calling this
  931. // function.
  932. func (t *task) getHCSContainer() (hcsshim.Container, error) {
  933. t.ctr.mu.Lock()
  934. defer t.ctr.mu.Unlock()
  935. if err := t.assertIsCurrentTask(); err != nil {
  936. return nil, err
  937. }
  938. hc := t.ctr.hcsContainer
  939. if hc == nil {
  940. return nil, errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", t.ctr.id)))
  941. }
  942. return hc, nil
  943. }
  944. // ctr mutex must be held when calling this function.
  945. func (ctr *container) shutdownContainer() error {
  946. var err error
  947. const waitTimeout = time.Minute * 5
  948. if !ctr.terminateInvoked {
  949. err = ctr.hcsContainer.Shutdown()
  950. }
  951. if hcsshim.IsPending(err) || ctr.terminateInvoked {
  952. err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  953. } else if hcsshim.IsAlreadyStopped(err) {
  954. err = nil
  955. }
  956. if err != nil {
  957. ctr.client.logger.WithError(err).WithField("container", ctr.id).
  958. Debug("failed to shutdown container, terminating it")
  959. terminateErr := ctr.terminateContainer()
  960. if terminateErr != nil {
  961. ctr.client.logger.WithError(terminateErr).WithField("container", ctr.id).
  962. Error("failed to shutdown container, and subsequent terminate also failed")
  963. return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  964. }
  965. return err
  966. }
  967. return nil
  968. }
  969. // ctr mutex must be held when calling this function.
  970. func (ctr *container) terminateContainer() error {
  971. const terminateTimeout = time.Minute * 5
  972. ctr.terminateInvoked = true
  973. err := ctr.hcsContainer.Terminate()
  974. if hcsshim.IsPending(err) {
  975. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  976. } else if hcsshim.IsAlreadyStopped(err) {
  977. err = nil
  978. }
  979. if err != nil {
  980. ctr.client.logger.WithError(err).WithField("container", ctr.id).
  981. Debug("failed to terminate container")
  982. return err
  983. }
  984. return nil
  985. }
  986. func (p *process) reap() {
  987. logger := p.ctr.client.logger.WithFields(logrus.Fields{
  988. "container": p.ctr.id,
  989. "process": p.id,
  990. })
  991. var eventErr error
  992. // Block indefinitely for the process to exit.
  993. if err := p.hcsProcess.Wait(); err != nil {
  994. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  995. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  996. }
  997. // Fall through here, do not return. This ensures we tell the
  998. // docker engine that the process/container has exited to avoid
  999. // a container being dropped on the floor.
  1000. }
  1001. exitedAt := time.Now()
  1002. exitCode, err := p.hcsProcess.ExitCode()
  1003. if err != nil {
  1004. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1005. logger.WithError(err).Warnf("unable to get exit code for process")
  1006. }
  1007. // Since we got an error retrieving the exit code, make sure that the
  1008. // code we return doesn't incorrectly indicate success.
  1009. exitCode = -1
  1010. // Fall through here, do not return. This ensures we tell the
  1011. // docker engine that the process/container has exited to avoid
  1012. // a container being dropped on the floor.
  1013. }
  1014. p.mu.Lock()
  1015. hcsProcess := p.hcsProcess
  1016. p.hcsProcess = nil
  1017. p.mu.Unlock()
  1018. if err := hcsProcess.Close(); err != nil {
  1019. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1020. exitCode = -1
  1021. eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1022. }
  1023. // Explicit locking is not required as reads from exited are
  1024. // synchronized using waitCh.
  1025. p.exited = containerd.NewExitStatus(uint32(exitCode), exitedAt, nil)
  1026. close(p.waitCh)
  1027. p.ctr.client.eventQ.Append(p.ctr.id, func() {
  1028. ei := libcontainerdtypes.EventInfo{
  1029. ContainerID: p.ctr.id,
  1030. ProcessID: p.id,
  1031. Pid: uint32(hcsProcess.Pid()),
  1032. ExitCode: uint32(exitCode),
  1033. ExitedAt: exitedAt,
  1034. Error: eventErr,
  1035. }
  1036. p.ctr.client.logger.WithFields(logrus.Fields{
  1037. "container": p.ctr.id,
  1038. "event": libcontainerdtypes.EventExit,
  1039. "event-info": ei,
  1040. }).Info("sending event")
  1041. err := p.ctr.client.backend.ProcessEvent(p.ctr.id, libcontainerdtypes.EventExit, ei)
  1042. if err != nil {
  1043. p.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  1044. "container": p.ctr.id,
  1045. "event": libcontainerdtypes.EventExit,
  1046. "event-info": ei,
  1047. }).Error("failed to process event")
  1048. }
  1049. })
  1050. }
  1051. func (ctr *container) Delete(context.Context) error {
  1052. ctr.mu.Lock()
  1053. defer ctr.mu.Unlock()
  1054. if ctr.hcsContainer == nil {
  1055. return errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", ctr.id)))
  1056. }
  1057. // Check that there is no task currently running.
  1058. if ctr.task != nil {
  1059. select {
  1060. case <-ctr.task.waitCh:
  1061. default:
  1062. return errors.WithStack(errdefs.Conflict(errors.New("container is not stopped")))
  1063. }
  1064. }
  1065. var (
  1066. logger = ctr.client.logger.WithFields(logrus.Fields{
  1067. "container": ctr.id,
  1068. })
  1069. thisErr error
  1070. )
  1071. if err := ctr.shutdownContainer(); err != nil {
  1072. logger.WithError(err).Warn("failed to shutdown container")
  1073. thisErr = errors.Wrap(err, "failed to shutdown container")
  1074. } else {
  1075. logger.Debug("completed container shutdown")
  1076. }
  1077. if err := ctr.hcsContainer.Close(); err != nil {
  1078. logger.WithError(err).Error("failed to clean hcs container resources")
  1079. thisErr = errors.Wrap(err, "failed to terminate container")
  1080. }
  1081. ctr.hcsContainer = nil
  1082. return thisErr
  1083. }