client_local_windows.go 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. package libcontainerd
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "os"
  9. "path"
  10. "path/filepath"
  11. "regexp"
  12. "strings"
  13. "sync"
  14. "syscall"
  15. "time"
  16. "github.com/Microsoft/hcsshim"
  17. opengcs "github.com/Microsoft/opengcs/client"
  18. "github.com/containerd/containerd"
  19. "github.com/docker/docker/pkg/sysinfo"
  20. "github.com/docker/docker/pkg/system"
  21. specs "github.com/opencontainers/runtime-spec/specs-go"
  22. "github.com/pkg/errors"
  23. "github.com/sirupsen/logrus"
  24. "golang.org/x/sys/windows"
  25. )
  26. const InitProcessName = "init"
  27. type process struct {
  28. id string
  29. pid int
  30. hcsProcess hcsshim.Process
  31. }
  32. type container struct {
  33. sync.Mutex
  34. // The ociSpec is required, as client.Create() needs a spec, but can
  35. // be called from the RestartManager context which does not otherwise
  36. // have access to the Spec
  37. ociSpec *specs.Spec
  38. isWindows bool
  39. manualStopRequested bool
  40. hcsContainer hcsshim.Container
  41. id string
  42. status Status
  43. exitedAt time.Time
  44. exitCode uint32
  45. waitCh chan struct{}
  46. init *process
  47. execs map[string]*process
  48. updatePending bool
  49. }
  50. // Win32 error codes that are used for various workarounds
  51. // These really should be ALL_CAPS to match golangs syscall library and standard
  52. // Win32 error conventions, but golint insists on CamelCase.
  53. const (
  54. CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
  55. ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
  56. ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
  57. ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
  58. )
  59. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  60. // container creator management stacks. We hard code "docker" in the case
  61. // of docker.
  62. const defaultOwner = "docker"
  63. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  64. return containerd.Version{}, errors.New("not implemented on Windows")
  65. }
  66. // Create is the entrypoint to create a container from a spec.
  67. // Table below shows the fields required for HCS JSON calling parameters,
  68. // where if not populated, is omitted.
  69. // +-----------------+--------------------------------------------+---------------------------------------------------+
  70. // | | Isolation=Process | Isolation=Hyper-V |
  71. // +-----------------+--------------------------------------------+---------------------------------------------------+
  72. // | VolumePath | \\?\\Volume{GUIDa} | |
  73. // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) |
  74. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  75. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  76. // +-----------------+--------------------------------------------+---------------------------------------------------+
  77. //
  78. // Isolation=Process example:
  79. //
  80. // {
  81. // "SystemType": "Container",
  82. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  83. // "Owner": "docker",
  84. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  85. // "IgnoreFlushesDuringBoot": true,
  86. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  87. // "Layers": [{
  88. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  89. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  90. // }],
  91. // "HostName": "5e0055c814a6",
  92. // "MappedDirectories": [],
  93. // "HvPartition": false,
  94. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  95. // "Servicing": false
  96. //}
  97. //
  98. // Isolation=Hyper-V example:
  99. //
  100. //{
  101. // "SystemType": "Container",
  102. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  103. // "Owner": "docker",
  104. // "IgnoreFlushesDuringBoot": true,
  105. // "Layers": [{
  106. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  107. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  108. // }],
  109. // "HostName": "475c2c58933b",
  110. // "MappedDirectories": [],
  111. // "HvPartition": true,
  112. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  113. // "DNSSearchList": "a.com,b.com,c.com",
  114. // "HvRuntime": {
  115. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  116. // },
  117. // "Servicing": false
  118. //}
  119. func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error {
  120. if ctr := c.getContainer(id); ctr != nil {
  121. return errors.WithStack(newConflictError("id already in use"))
  122. }
  123. // spec.Linux must be nil for Windows containers, but spec.Windows
  124. // will be filled in regardless of container platform. This is a
  125. // temporary workaround due to LCOW requiring layer folder paths,
  126. // which are stored under spec.Windows.
  127. //
  128. // TODO: @darrenstahlmsft fix this once the OCI spec is updated to
  129. // support layer folder paths for LCOW
  130. if spec.Linux == nil {
  131. return c.createWindows(id, spec, runtimeOptions)
  132. }
  133. return c.createLinux(id, spec, runtimeOptions)
  134. }
  135. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  136. logger := c.logger.WithField("container", id)
  137. configuration := &hcsshim.ContainerConfig{
  138. SystemType: "Container",
  139. Name: id,
  140. Owner: defaultOwner,
  141. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  142. HostName: spec.Hostname,
  143. HvPartition: false,
  144. Servicing: spec.Windows.Servicing,
  145. }
  146. if spec.Windows.Resources != nil {
  147. if spec.Windows.Resources.CPU != nil {
  148. if spec.Windows.Resources.CPU.Count != nil {
  149. // This check is being done here rather than in adaptContainerSettings
  150. // because we don't want to update the HostConfig in case this container
  151. // is moved to a host with more CPUs than this one.
  152. cpuCount := *spec.Windows.Resources.CPU.Count
  153. hostCPUCount := uint64(sysinfo.NumCPU())
  154. if cpuCount > hostCPUCount {
  155. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  156. cpuCount = hostCPUCount
  157. }
  158. configuration.ProcessorCount = uint32(cpuCount)
  159. }
  160. if spec.Windows.Resources.CPU.Shares != nil {
  161. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  162. }
  163. if spec.Windows.Resources.CPU.Maximum != nil {
  164. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  165. }
  166. }
  167. if spec.Windows.Resources.Memory != nil {
  168. if spec.Windows.Resources.Memory.Limit != nil {
  169. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  170. }
  171. }
  172. if spec.Windows.Resources.Storage != nil {
  173. if spec.Windows.Resources.Storage.Bps != nil {
  174. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  175. }
  176. if spec.Windows.Resources.Storage.Iops != nil {
  177. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  178. }
  179. }
  180. }
  181. if spec.Windows.HyperV != nil {
  182. configuration.HvPartition = true
  183. }
  184. if spec.Windows.Network != nil {
  185. configuration.EndpointList = spec.Windows.Network.EndpointList
  186. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  187. if spec.Windows.Network.DNSSearchList != nil {
  188. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  189. }
  190. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  191. }
  192. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  193. configuration.Credentials = cs
  194. }
  195. // We must have least two layers in the spec, the bottom one being a
  196. // base image, the top one being the RW layer.
  197. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  198. return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  199. }
  200. // Strip off the top-most layer as that's passed in separately to HCS
  201. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  202. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  203. if configuration.HvPartition {
  204. // We don't currently support setting the utility VM image explicitly.
  205. // TODO @swernli/jhowardmsft circa RS3/4, this may be re-locatable.
  206. if spec.Windows.HyperV.UtilityVMPath != "" {
  207. return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  208. }
  209. // Find the upper-most utility VM image.
  210. var uvmImagePath string
  211. for _, path := range layerFolders {
  212. fullPath := filepath.Join(path, "UtilityVM")
  213. _, err := os.Stat(fullPath)
  214. if err == nil {
  215. uvmImagePath = fullPath
  216. break
  217. }
  218. if !os.IsNotExist(err) {
  219. return err
  220. }
  221. }
  222. if uvmImagePath == "" {
  223. return errors.New("utility VM image could not be found")
  224. }
  225. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  226. if spec.Root.Path != "" {
  227. return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  228. }
  229. } else {
  230. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  231. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  232. return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  233. }
  234. // HCS API requires the trailing backslash to be removed
  235. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  236. }
  237. if spec.Root.Readonly {
  238. return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  239. }
  240. for _, layerPath := range layerFolders {
  241. _, filename := filepath.Split(layerPath)
  242. g, err := hcsshim.NameToGuid(filename)
  243. if err != nil {
  244. return err
  245. }
  246. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  247. ID: g.ToString(),
  248. Path: layerPath,
  249. })
  250. }
  251. // Add the mounts (volumes, bind mounts etc) to the structure
  252. var mds []hcsshim.MappedDir
  253. var mps []hcsshim.MappedPipe
  254. for _, mount := range spec.Mounts {
  255. const pipePrefix = `\\.\pipe\`
  256. if mount.Type != "" {
  257. return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  258. }
  259. if strings.HasPrefix(mount.Destination, pipePrefix) {
  260. mp := hcsshim.MappedPipe{
  261. HostPath: mount.Source,
  262. ContainerPipeName: mount.Destination[len(pipePrefix):],
  263. }
  264. mps = append(mps, mp)
  265. } else {
  266. md := hcsshim.MappedDir{
  267. HostPath: mount.Source,
  268. ContainerPath: mount.Destination,
  269. ReadOnly: false,
  270. }
  271. for _, o := range mount.Options {
  272. if strings.ToLower(o) == "ro" {
  273. md.ReadOnly = true
  274. }
  275. }
  276. mds = append(mds, md)
  277. }
  278. }
  279. configuration.MappedDirectories = mds
  280. if len(mps) > 0 && system.GetOSVersion().Build < 16210 { // replace with Win10 RS3 build number at RTM
  281. return errors.New("named pipe mounts are not supported on this version of Windows")
  282. }
  283. configuration.MappedPipes = mps
  284. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  285. if err != nil {
  286. return err
  287. }
  288. // Construct a container object for calling start on it.
  289. ctr := &container{
  290. id: id,
  291. execs: make(map[string]*process),
  292. isWindows: true,
  293. ociSpec: spec,
  294. hcsContainer: hcsContainer,
  295. status: StatusCreated,
  296. waitCh: make(chan struct{}),
  297. }
  298. // Start the container. If this is a servicing container, this call
  299. // will block until the container is done with the servicing
  300. // execution.
  301. logger.Debug("starting container")
  302. if err = hcsContainer.Start(); err != nil {
  303. c.logger.WithError(err).Error("failed to start container")
  304. ctr.debugGCS()
  305. if err := c.terminateContainer(ctr); err != nil {
  306. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  307. } else {
  308. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  309. }
  310. return err
  311. }
  312. ctr.debugGCS()
  313. c.Lock()
  314. c.containers[id] = ctr
  315. c.Unlock()
  316. logger.Debug("createWindows() completed successfully")
  317. return nil
  318. }
  319. func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  320. logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
  321. logger := c.logger.WithField("container", id)
  322. if runtimeOptions == nil {
  323. return fmt.Errorf("lcow option must be supplied to the runtime")
  324. }
  325. lcowConfig, ok := runtimeOptions.(*opengcs.Config)
  326. if !ok {
  327. return fmt.Errorf("lcow option must be supplied to the runtime")
  328. }
  329. configuration := &hcsshim.ContainerConfig{
  330. HvPartition: true,
  331. Name: id,
  332. SystemType: "container",
  333. ContainerType: "linux",
  334. Owner: defaultOwner,
  335. TerminateOnLastHandleClosed: true,
  336. }
  337. if lcowConfig.ActualMode == opengcs.ModeActualVhdx {
  338. configuration.HvRuntime = &hcsshim.HvRuntime{
  339. ImagePath: lcowConfig.Vhdx,
  340. BootSource: "Vhd",
  341. WritableBootSource: false,
  342. }
  343. } else {
  344. configuration.HvRuntime = &hcsshim.HvRuntime{
  345. ImagePath: lcowConfig.KirdPath,
  346. LinuxKernelFile: lcowConfig.KernelFile,
  347. LinuxInitrdFile: lcowConfig.InitrdFile,
  348. LinuxBootParameters: lcowConfig.BootParameters,
  349. }
  350. }
  351. if spec.Windows == nil {
  352. return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
  353. }
  354. // We must have least one layer in the spec
  355. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
  356. return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
  357. }
  358. // Strip off the top-most layer as that's passed in separately to HCS
  359. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  360. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  361. for _, layerPath := range layerFolders {
  362. _, filename := filepath.Split(layerPath)
  363. g, err := hcsshim.NameToGuid(filename)
  364. if err != nil {
  365. return err
  366. }
  367. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  368. ID: g.ToString(),
  369. Path: filepath.Join(layerPath, "layer.vhd"),
  370. })
  371. }
  372. if spec.Windows.Network != nil {
  373. configuration.EndpointList = spec.Windows.Network.EndpointList
  374. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  375. if spec.Windows.Network.DNSSearchList != nil {
  376. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  377. }
  378. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  379. }
  380. // Add the mounts (volumes, bind mounts etc) to the structure. We have to do
  381. // some translation for both the mapped directories passed into HCS and in
  382. // the spec.
  383. //
  384. // For HCS, we only pass in the mounts from the spec which are type "bind".
  385. // Further, the "ContainerPath" field (which is a little mis-leadingly
  386. // named when it applies to the utility VM rather than the container in the
  387. // utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
  388. // by the caller through a 'uvmpath' option.
  389. //
  390. // We do similar translation for the mounts in the spec by stripping out
  391. // the uvmpath option, and translating the Source path to the location in the
  392. // utility VM calculated above.
  393. //
  394. // From inside the utility VM, you would see a 9p mount such as in the following
  395. // where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
  396. // specifically:
  397. //
  398. // / # mount
  399. // rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
  400. // proc on /proc type proc (rw,relatime)
  401. // sysfs on /sys type sysfs (rw,relatime)
  402. // udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
  403. // tmpfs on /run type tmpfs (rw,relatime)
  404. // cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
  405. // mqueue on /dev/mqueue type mqueue (rw,relatime)
  406. // devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
  407. // /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
  408. // /dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
  409. // /dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
  410. // overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
  411. //
  412. // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
  413. // total 16
  414. // drwx------ 3 0 0 60 Sep 7 18:54 binds
  415. // -rw-r--r-- 1 0 0 3345 Sep 7 18:54 config.json
  416. // drwxr-xr-x 10 0 0 4096 Sep 6 17:26 layer0
  417. // drwxr-xr-x 1 0 0 4096 Sep 7 18:54 rootfs
  418. // drwxr-xr-x 5 0 0 4096 Sep 7 18:54 scratch
  419. //
  420. // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
  421. // total 0
  422. // drwxrwxrwt 2 0 0 4096 Sep 7 16:51 target
  423. mds := []hcsshim.MappedDir{}
  424. specMounts := []specs.Mount{}
  425. for _, mount := range spec.Mounts {
  426. specMount := mount
  427. if mount.Type == "bind" {
  428. // Strip out the uvmpath from the options
  429. updatedOptions := []string{}
  430. uvmPath := ""
  431. readonly := false
  432. for _, opt := range mount.Options {
  433. dropOption := false
  434. elements := strings.SplitN(opt, "=", 2)
  435. switch elements[0] {
  436. case "uvmpath":
  437. uvmPath = elements[1]
  438. dropOption = true
  439. case "rw":
  440. case "ro":
  441. readonly = true
  442. case "rbind":
  443. default:
  444. return fmt.Errorf("unsupported option %q", opt)
  445. }
  446. if !dropOption {
  447. updatedOptions = append(updatedOptions, opt)
  448. }
  449. }
  450. mount.Options = updatedOptions
  451. if uvmPath == "" {
  452. return fmt.Errorf("no uvmpath for bind mount %+v", mount)
  453. }
  454. md := hcsshim.MappedDir{
  455. HostPath: mount.Source,
  456. ContainerPath: path.Join(uvmPath, mount.Destination),
  457. CreateInUtilityVM: true,
  458. ReadOnly: readonly,
  459. }
  460. mds = append(mds, md)
  461. specMount.Source = path.Join(uvmPath, mount.Destination)
  462. }
  463. specMounts = append(specMounts, specMount)
  464. }
  465. configuration.MappedDirectories = mds
  466. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  467. if err != nil {
  468. return err
  469. }
  470. spec.Mounts = specMounts
  471. // Construct a container object for calling start on it.
  472. ctr := &container{
  473. id: id,
  474. execs: make(map[string]*process),
  475. isWindows: false,
  476. ociSpec: spec,
  477. hcsContainer: hcsContainer,
  478. status: StatusCreated,
  479. waitCh: make(chan struct{}),
  480. }
  481. // Start the container. If this is a servicing container, this call
  482. // will block until the container is done with the servicing
  483. // execution.
  484. logger.Debug("starting container")
  485. if err = hcsContainer.Start(); err != nil {
  486. c.logger.WithError(err).Error("failed to start container")
  487. ctr.debugGCS()
  488. if err := c.terminateContainer(ctr); err != nil {
  489. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  490. } else {
  491. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  492. }
  493. return err
  494. }
  495. ctr.debugGCS()
  496. c.Lock()
  497. c.containers[id] = ctr
  498. c.Unlock()
  499. c.eventQ.append(id, func() {
  500. ei := EventInfo{
  501. ContainerID: id,
  502. }
  503. c.logger.WithFields(logrus.Fields{
  504. "container": ctr.id,
  505. "event": EventCreate,
  506. }).Info("sending event")
  507. err := c.backend.ProcessEvent(id, EventCreate, ei)
  508. if err != nil {
  509. c.logger.WithError(err).WithFields(logrus.Fields{
  510. "container": id,
  511. "event": EventCreate,
  512. }).Error("failed to process event")
  513. }
  514. })
  515. logger.Debug("createLinux() completed successfully")
  516. return nil
  517. }
  518. func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
  519. ctr := c.getContainer(id)
  520. switch {
  521. case ctr == nil:
  522. return -1, errors.WithStack(newNotFoundError("no such container"))
  523. case ctr.init != nil:
  524. return -1, errors.WithStack(newConflictError("container already started"))
  525. }
  526. logger := c.logger.WithField("container", id)
  527. // Note we always tell HCS to create stdout as it's required
  528. // regardless of '-i' or '-t' options, so that docker can always grab
  529. // the output through logs. We also tell HCS to always create stdin,
  530. // even if it's not used - it will be closed shortly. Stderr is only
  531. // created if it we're not -t.
  532. var (
  533. emulateConsole bool
  534. createStdErrPipe bool
  535. )
  536. if ctr.ociSpec.Process != nil {
  537. emulateConsole = ctr.ociSpec.Process.Terminal
  538. createStdErrPipe = !ctr.ociSpec.Process.Terminal && !ctr.ociSpec.Windows.Servicing
  539. }
  540. createProcessParms := &hcsshim.ProcessConfig{
  541. EmulateConsole: emulateConsole,
  542. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  543. CreateStdInPipe: !ctr.ociSpec.Windows.Servicing,
  544. CreateStdOutPipe: !ctr.ociSpec.Windows.Servicing,
  545. CreateStdErrPipe: createStdErrPipe,
  546. }
  547. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  548. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  549. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  550. }
  551. // Configure the environment for the process
  552. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  553. if ctr.isWindows {
  554. createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ")
  555. } else {
  556. createProcessParms.CommandArgs = ctr.ociSpec.Process.Args
  557. }
  558. createProcessParms.User = ctr.ociSpec.Process.User.Username
  559. // LCOW requires the raw OCI spec passed through HCS and onwards to
  560. // GCS for the utility VM.
  561. if !ctr.isWindows {
  562. ociBuf, err := json.Marshal(ctr.ociSpec)
  563. if err != nil {
  564. return -1, err
  565. }
  566. ociRaw := json.RawMessage(ociBuf)
  567. createProcessParms.OCISpecification = &ociRaw
  568. }
  569. ctr.Lock()
  570. defer ctr.Unlock()
  571. // Start the command running in the container.
  572. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  573. if err != nil {
  574. logger.WithError(err).Error("CreateProcess() failed")
  575. return -1, err
  576. }
  577. defer func() {
  578. if err != nil {
  579. if err := newProcess.Kill(); err != nil {
  580. logger.WithError(err).Error("failed to kill process")
  581. }
  582. go func() {
  583. if err := newProcess.Wait(); err != nil {
  584. logger.WithError(err).Error("failed to wait for process")
  585. }
  586. if err := newProcess.Close(); err != nil {
  587. logger.WithError(err).Error("failed to clean process resources")
  588. }
  589. }()
  590. }
  591. }()
  592. p := &process{
  593. hcsProcess: newProcess,
  594. id: InitProcessName,
  595. pid: newProcess.Pid(),
  596. }
  597. logger.WithField("pid", p.pid).Debug("init process started")
  598. // If this is a servicing container, wait on the process synchronously here and
  599. // if it succeeds, wait for it cleanly shutdown and merge into the parent container.
  600. if ctr.ociSpec.Windows.Servicing {
  601. // reapProcess takes the lock
  602. ctr.Unlock()
  603. defer ctr.Lock()
  604. exitCode := c.reapProcess(ctr, p)
  605. if exitCode != 0 {
  606. return -1, errors.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.id, exitCode)
  607. }
  608. return p.pid, nil
  609. }
  610. var (
  611. stdout, stderr io.ReadCloser
  612. stdin io.WriteCloser
  613. )
  614. stdin, stdout, stderr, err = newProcess.Stdio()
  615. if err != nil {
  616. logger.WithError(err).Error("failed to get stdio pipes")
  617. return -1, err
  618. }
  619. iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal}
  620. iopipe.Stdin = createStdInCloser(stdin, newProcess)
  621. // Convert io.ReadClosers to io.Readers
  622. if stdout != nil {
  623. iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  624. }
  625. if stderr != nil {
  626. iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  627. }
  628. _, err = attachStdio(iopipe)
  629. if err != nil {
  630. logger.WithError(err).Error("failed to attache stdio")
  631. return -1, err
  632. }
  633. ctr.status = StatusRunning
  634. ctr.init = p
  635. // Spin up a go routine waiting for exit to handle cleanup
  636. go c.reapProcess(ctr, p)
  637. // Generate the associated event
  638. c.eventQ.append(id, func() {
  639. ei := EventInfo{
  640. ContainerID: id,
  641. ProcessID: InitProcessName,
  642. Pid: uint32(p.pid),
  643. }
  644. c.logger.WithFields(logrus.Fields{
  645. "container": ctr.id,
  646. "event": EventStart,
  647. "event-info": ei,
  648. }).Info("sending event")
  649. err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei)
  650. if err != nil {
  651. c.logger.WithError(err).WithFields(logrus.Fields{
  652. "container": id,
  653. "event": EventStart,
  654. "event-info": ei,
  655. }).Error("failed to process event")
  656. }
  657. })
  658. logger.Debug("start() completed")
  659. return p.pid, nil
  660. }
  661. // Exec adds a process in an running container
  662. func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
  663. ctr := c.getContainer(containerID)
  664. switch {
  665. case ctr == nil:
  666. return -1, errors.WithStack(newNotFoundError("no such container"))
  667. case ctr.hcsContainer == nil:
  668. return -1, errors.WithStack(newInvalidParameterError("container is not running"))
  669. case ctr.execs != nil && ctr.execs[processID] != nil:
  670. return -1, errors.WithStack(newConflictError("id already in use"))
  671. }
  672. logger := c.logger.WithFields(logrus.Fields{
  673. "container": containerID,
  674. "exec": processID,
  675. })
  676. // Note we always tell HCS to
  677. // create stdout as it's required regardless of '-i' or '-t' options, so that
  678. // docker can always grab the output through logs. We also tell HCS to always
  679. // create stdin, even if it's not used - it will be closed shortly. Stderr
  680. // is only created if it we're not -t.
  681. createProcessParms := hcsshim.ProcessConfig{
  682. CreateStdInPipe: true,
  683. CreateStdOutPipe: true,
  684. CreateStdErrPipe: !spec.Terminal,
  685. }
  686. if spec.Terminal {
  687. createProcessParms.EmulateConsole = true
  688. if spec.ConsoleSize != nil {
  689. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  690. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  691. }
  692. }
  693. // Take working directory from the process to add if it is defined,
  694. // otherwise take from the first process.
  695. if spec.Cwd != "" {
  696. createProcessParms.WorkingDirectory = spec.Cwd
  697. } else {
  698. createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
  699. }
  700. // Configure the environment for the process
  701. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  702. if ctr.isWindows {
  703. createProcessParms.CommandLine = strings.Join(spec.Args, " ")
  704. } else {
  705. createProcessParms.CommandArgs = spec.Args
  706. }
  707. createProcessParms.User = spec.User.Username
  708. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  709. // Start the command running in the container.
  710. var (
  711. stdout, stderr io.ReadCloser
  712. stdin io.WriteCloser
  713. )
  714. newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms)
  715. if err != nil {
  716. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  717. return -1, err
  718. }
  719. pid := newProcess.Pid()
  720. defer func() {
  721. if err != nil {
  722. if err := newProcess.Kill(); err != nil {
  723. logger.WithError(err).Error("failed to kill process")
  724. }
  725. go func() {
  726. if err := newProcess.Wait(); err != nil {
  727. logger.WithError(err).Error("failed to wait for process")
  728. }
  729. if err := newProcess.Close(); err != nil {
  730. logger.WithError(err).Error("failed to clean process resources")
  731. }
  732. }()
  733. }
  734. }()
  735. stdin, stdout, stderr, err = newProcess.Stdio()
  736. if err != nil {
  737. logger.WithError(err).Error("getting std pipes failed")
  738. return -1, err
  739. }
  740. iopipe := &IOPipe{Terminal: spec.Terminal}
  741. iopipe.Stdin = createStdInCloser(stdin, newProcess)
  742. // Convert io.ReadClosers to io.Readers
  743. if stdout != nil {
  744. iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  745. }
  746. if stderr != nil {
  747. iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  748. }
  749. // Tell the engine to attach streams back to the client
  750. _, err = attachStdio(iopipe)
  751. if err != nil {
  752. return -1, err
  753. }
  754. p := &process{
  755. id: processID,
  756. pid: pid,
  757. hcsProcess: newProcess,
  758. }
  759. // Add the process to the container's list of processes
  760. ctr.Lock()
  761. ctr.execs[processID] = p
  762. ctr.Unlock()
  763. // Spin up a go routine waiting for exit to handle cleanup
  764. go c.reapProcess(ctr, p)
  765. c.eventQ.append(ctr.id, func() {
  766. ei := EventInfo{
  767. ContainerID: ctr.id,
  768. ProcessID: p.id,
  769. Pid: uint32(p.pid),
  770. }
  771. c.logger.WithFields(logrus.Fields{
  772. "container": ctr.id,
  773. "event": EventExecAdded,
  774. "event-info": ei,
  775. }).Info("sending event")
  776. err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei)
  777. if err != nil {
  778. c.logger.WithError(err).WithFields(logrus.Fields{
  779. "container": ctr.id,
  780. "event": EventExecAdded,
  781. "event-info": ei,
  782. }).Error("failed to process event")
  783. }
  784. err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei)
  785. if err != nil {
  786. c.logger.WithError(err).WithFields(logrus.Fields{
  787. "container": ctr.id,
  788. "event": EventExecStarted,
  789. "event-info": ei,
  790. }).Error("failed to process event")
  791. }
  792. })
  793. return pid, nil
  794. }
  795. // Signal handles `docker stop` on Windows. While Linux has support for
  796. // the full range of signals, signals aren't really implemented on Windows.
  797. // We fake supporting regular stop and -9 to force kill.
  798. func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
  799. ctr, p, err := c.getProcess(containerID, processID)
  800. if err != nil {
  801. return err
  802. }
  803. ctr.manualStopRequested = true
  804. logger := c.logger.WithFields(logrus.Fields{
  805. "container": containerID,
  806. "process": processID,
  807. "pid": p.pid,
  808. "signal": signal,
  809. })
  810. logger.Debug("Signal()")
  811. if processID == InitProcessName {
  812. if syscall.Signal(signal) == syscall.SIGKILL {
  813. // Terminate the compute system
  814. if err := ctr.hcsContainer.Terminate(); err != nil {
  815. if !hcsshim.IsPending(err) {
  816. logger.WithError(err).Error("failed to terminate hccshim container")
  817. }
  818. }
  819. } else {
  820. // Shut down the container
  821. if err := ctr.hcsContainer.Shutdown(); err != nil {
  822. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  823. // ignore errors
  824. logger.WithError(err).Error("failed to shutdown hccshim container")
  825. }
  826. }
  827. }
  828. } else {
  829. return p.hcsProcess.Kill()
  830. }
  831. return nil
  832. }
  833. // Resize handles a CLI event to resize an interactive docker run or docker
  834. // exec window.
  835. func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
  836. _, p, err := c.getProcess(containerID, processID)
  837. if err != nil {
  838. return err
  839. }
  840. c.logger.WithFields(logrus.Fields{
  841. "container": containerID,
  842. "process": processID,
  843. "height": height,
  844. "width": width,
  845. "pid": p.pid,
  846. }).Debug("resizing")
  847. return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
  848. }
  849. func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
  850. _, p, err := c.getProcess(containerID, processID)
  851. if err != nil {
  852. return err
  853. }
  854. return p.hcsProcess.CloseStdin()
  855. }
  856. // Pause handles pause requests for containers
  857. func (c *client) Pause(_ context.Context, containerID string) error {
  858. ctr, _, err := c.getProcess(containerID, InitProcessName)
  859. if err != nil {
  860. return err
  861. }
  862. if ctr.ociSpec.Windows.HyperV == nil {
  863. return errors.New("cannot pause Windows Server Containers")
  864. }
  865. ctr.Lock()
  866. defer ctr.Unlock()
  867. if err = ctr.hcsContainer.Pause(); err != nil {
  868. return err
  869. }
  870. ctr.status = StatusPaused
  871. c.eventQ.append(containerID, func() {
  872. err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{
  873. ContainerID: containerID,
  874. ProcessID: InitProcessName,
  875. })
  876. c.logger.WithFields(logrus.Fields{
  877. "container": ctr.id,
  878. "event": EventPaused,
  879. }).Info("sending event")
  880. if err != nil {
  881. c.logger.WithError(err).WithFields(logrus.Fields{
  882. "container": containerID,
  883. "event": EventPaused,
  884. }).Error("failed to process event")
  885. }
  886. })
  887. return nil
  888. }
  889. // Resume handles resume requests for containers
  890. func (c *client) Resume(_ context.Context, containerID string) error {
  891. ctr, _, err := c.getProcess(containerID, InitProcessName)
  892. if err != nil {
  893. return err
  894. }
  895. if ctr.ociSpec.Windows.HyperV == nil {
  896. return errors.New("cannot resume Windows Server Containers")
  897. }
  898. ctr.Lock()
  899. defer ctr.Unlock()
  900. if err = ctr.hcsContainer.Resume(); err != nil {
  901. return err
  902. }
  903. ctr.status = StatusRunning
  904. c.eventQ.append(containerID, func() {
  905. err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{
  906. ContainerID: containerID,
  907. ProcessID: InitProcessName,
  908. })
  909. c.logger.WithFields(logrus.Fields{
  910. "container": ctr.id,
  911. "event": EventResumed,
  912. }).Info("sending event")
  913. if err != nil {
  914. c.logger.WithError(err).WithFields(logrus.Fields{
  915. "container": containerID,
  916. "event": EventResumed,
  917. }).Error("failed to process event")
  918. }
  919. })
  920. return nil
  921. }
  922. // Stats handles stats requests for containers
  923. func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) {
  924. ctr, _, err := c.getProcess(containerID, InitProcessName)
  925. if err != nil {
  926. return nil, err
  927. }
  928. readAt := time.Now()
  929. s, err := ctr.hcsContainer.Statistics()
  930. if err != nil {
  931. return nil, err
  932. }
  933. return &Stats{
  934. Read: readAt,
  935. HCSStats: &s,
  936. }, nil
  937. }
  938. // Restore is the handler for restoring a container
  939. func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) {
  940. c.logger.WithField("container", id).Debug("restore()")
  941. // TODO Windows: On RS1, a re-attach isn't possible.
  942. // However, there is a scenario in which there is an issue.
  943. // Consider a background container. The daemon dies unexpectedly.
  944. // HCS will still have the compute service alive and running.
  945. // For consistence, we call in to shoot it regardless if HCS knows about it
  946. // We explicitly just log a warning if the terminate fails.
  947. // Then we tell the backend the container exited.
  948. if hc, err := hcsshim.OpenContainer(id); err == nil {
  949. const terminateTimeout = time.Minute * 2
  950. err := hc.Terminate()
  951. if hcsshim.IsPending(err) {
  952. err = hc.WaitTimeout(terminateTimeout)
  953. } else if hcsshim.IsAlreadyStopped(err) {
  954. err = nil
  955. }
  956. if err != nil {
  957. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  958. return false, -1, err
  959. }
  960. }
  961. return false, -1, nil
  962. }
  963. // GetPidsForContainer returns a list of process IDs running in a container.
  964. // Not used on Windows.
  965. func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  966. return nil, errors.New("not implemented on Windows")
  967. }
  968. // Summary returns a summary of the processes running in a container.
  969. // This is present in Windows to support docker top. In linux, the
  970. // engine shells out to ps to get process information. On Windows, as
  971. // the containers could be Hyper-V containers, they would not be
  972. // visible on the container host. However, libcontainerd does have
  973. // that information.
  974. func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) {
  975. ctr, _, err := c.getProcess(containerID, InitProcessName)
  976. if err != nil {
  977. return nil, err
  978. }
  979. p, err := ctr.hcsContainer.ProcessList()
  980. if err != nil {
  981. return nil, err
  982. }
  983. pl := make([]Summary, len(p))
  984. for i := range p {
  985. pl[i] = Summary(p[i])
  986. }
  987. return pl, nil
  988. }
  989. func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  990. ec := -1
  991. ctr := c.getContainer(containerID)
  992. if ctr == nil {
  993. return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container"))
  994. }
  995. select {
  996. case <-ctx.Done():
  997. return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  998. case <-ctr.waitCh:
  999. default:
  1000. return uint32(ec), time.Now(), errors.New("container is not stopped")
  1001. }
  1002. ctr.Lock()
  1003. defer ctr.Unlock()
  1004. return ctr.exitCode, ctr.exitedAt, nil
  1005. }
  1006. func (c *client) Delete(_ context.Context, containerID string) error {
  1007. c.Lock()
  1008. defer c.Unlock()
  1009. ctr := c.containers[containerID]
  1010. if ctr == nil {
  1011. return errors.WithStack(newNotFoundError("no such container"))
  1012. }
  1013. ctr.Lock()
  1014. defer ctr.Unlock()
  1015. switch ctr.status {
  1016. case StatusCreated:
  1017. if err := c.shutdownContainer(ctr); err != nil {
  1018. return err
  1019. }
  1020. fallthrough
  1021. case StatusStopped:
  1022. delete(c.containers, containerID)
  1023. return nil
  1024. }
  1025. return errors.WithStack(newInvalidParameterError("container is not stopped"))
  1026. }
  1027. func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
  1028. c.Lock()
  1029. defer c.Unlock()
  1030. ctr := c.containers[containerID]
  1031. if ctr == nil {
  1032. return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
  1033. }
  1034. ctr.Lock()
  1035. defer ctr.Unlock()
  1036. return ctr.status, nil
  1037. }
  1038. func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error {
  1039. // Updating resource isn't supported on Windows
  1040. // but we should return nil for enabling updating container
  1041. return nil
  1042. }
  1043. func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1044. return errors.New("Windows: Containers do not support checkpoints")
  1045. }
  1046. func (c *client) getContainer(id string) *container {
  1047. c.Lock()
  1048. ctr := c.containers[id]
  1049. c.Unlock()
  1050. return ctr
  1051. }
  1052. func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1053. ctr := c.getContainer(containerID)
  1054. switch {
  1055. case ctr == nil:
  1056. return nil, nil, errors.WithStack(newNotFoundError("no such container"))
  1057. case ctr.init == nil:
  1058. return nil, nil, errors.WithStack(newNotFoundError("container is not running"))
  1059. case processID == InitProcessName:
  1060. return ctr, ctr.init, nil
  1061. default:
  1062. ctr.Lock()
  1063. defer ctr.Unlock()
  1064. if ctr.execs == nil {
  1065. return nil, nil, errors.WithStack(newNotFoundError("no execs"))
  1066. }
  1067. }
  1068. p := ctr.execs[processID]
  1069. if p == nil {
  1070. return nil, nil, errors.WithStack(newNotFoundError("no such exec"))
  1071. }
  1072. return ctr, p, nil
  1073. }
  1074. func (c *client) shutdownContainer(ctr *container) error {
  1075. const shutdownTimeout = time.Minute * 5
  1076. err := ctr.hcsContainer.Shutdown()
  1077. if hcsshim.IsPending(err) {
  1078. err = ctr.hcsContainer.WaitTimeout(shutdownTimeout)
  1079. } else if hcsshim.IsAlreadyStopped(err) {
  1080. err = nil
  1081. }
  1082. if err != nil {
  1083. c.logger.WithError(err).WithField("container", ctr.id).
  1084. Debug("failed to shutdown container, terminating it")
  1085. return c.terminateContainer(ctr)
  1086. }
  1087. return nil
  1088. }
  1089. func (c *client) terminateContainer(ctr *container) error {
  1090. const terminateTimeout = time.Minute * 5
  1091. err := ctr.hcsContainer.Terminate()
  1092. if hcsshim.IsPending(err) {
  1093. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1094. } else if hcsshim.IsAlreadyStopped(err) {
  1095. err = nil
  1096. }
  1097. if err != nil {
  1098. c.logger.WithError(err).WithField("container", ctr.id).
  1099. Debug("failed to terminate container")
  1100. return err
  1101. }
  1102. return nil
  1103. }
  1104. func (c *client) reapProcess(ctr *container, p *process) int {
  1105. logger := c.logger.WithFields(logrus.Fields{
  1106. "container": ctr.id,
  1107. "process": p.id,
  1108. })
  1109. // Block indefinitely for the process to exit.
  1110. if err := p.hcsProcess.Wait(); err != nil {
  1111. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1112. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1113. }
  1114. // Fall through here, do not return. This ensures we attempt to
  1115. // continue the shutdown in HCS and tell the docker engine that the
  1116. // process/container has exited to avoid a container being dropped on
  1117. // the floor.
  1118. }
  1119. exitedAt := time.Now()
  1120. exitCode, err := p.hcsProcess.ExitCode()
  1121. if err != nil {
  1122. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1123. logger.WithError(err).Warnf("unable to get exit code for process")
  1124. }
  1125. // Since we got an error retrieving the exit code, make sure that the
  1126. // code we return doesn't incorrectly indicate success.
  1127. exitCode = -1
  1128. // Fall through here, do not return. This ensures we attempt to
  1129. // continue the shutdown in HCS and tell the docker engine that the
  1130. // process/container has exited to avoid a container being dropped on
  1131. // the floor.
  1132. }
  1133. if err := p.hcsProcess.Close(); err != nil {
  1134. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1135. }
  1136. var pendingUpdates bool
  1137. if p.id == InitProcessName {
  1138. // Update container status
  1139. ctr.Lock()
  1140. ctr.status = StatusStopped
  1141. ctr.exitedAt = exitedAt
  1142. ctr.exitCode = uint32(exitCode)
  1143. close(ctr.waitCh)
  1144. ctr.Unlock()
  1145. // Handle any servicing
  1146. if exitCode == 0 && ctr.isWindows && !ctr.ociSpec.Windows.Servicing {
  1147. pendingUpdates, err = ctr.hcsContainer.HasPendingUpdates()
  1148. logger.Infof("Pending updates: %v", pendingUpdates)
  1149. if err != nil {
  1150. logger.WithError(err).
  1151. Warnf("failed to check for pending updates (container may have been killed)")
  1152. }
  1153. }
  1154. if err := c.shutdownContainer(ctr); err != nil {
  1155. logger.WithError(err).Warn("failed to shutdown container")
  1156. } else {
  1157. logger.Debug("completed container shutdown")
  1158. }
  1159. if err := ctr.hcsContainer.Close(); err != nil {
  1160. logger.WithError(err).Error("failed to clean hcs container resources")
  1161. }
  1162. }
  1163. if !(ctr.isWindows && ctr.ociSpec.Windows.Servicing) {
  1164. c.eventQ.append(ctr.id, func() {
  1165. ei := EventInfo{
  1166. ContainerID: ctr.id,
  1167. ProcessID: p.id,
  1168. Pid: uint32(p.pid),
  1169. ExitCode: uint32(exitCode),
  1170. ExitedAt: exitedAt,
  1171. UpdatePending: pendingUpdates,
  1172. }
  1173. c.logger.WithFields(logrus.Fields{
  1174. "container": ctr.id,
  1175. "event": EventExit,
  1176. "event-info": ei,
  1177. }).Info("sending event")
  1178. err := c.backend.ProcessEvent(ctr.id, EventExit, ei)
  1179. if err != nil {
  1180. c.logger.WithError(err).WithFields(logrus.Fields{
  1181. "container": ctr.id,
  1182. "event": EventExit,
  1183. "event-info": ei,
  1184. }).Error("failed to process event")
  1185. }
  1186. if p.id != InitProcessName {
  1187. ctr.Lock()
  1188. delete(ctr.execs, p.id)
  1189. ctr.Unlock()
  1190. }
  1191. })
  1192. }
  1193. return exitCode
  1194. }