client_local_windows.go 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. package libcontainerd
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "os"
  8. "path"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "time"
  15. "github.com/Microsoft/hcsshim"
  16. opengcs "github.com/Microsoft/opengcs/client"
  17. "github.com/containerd/containerd"
  18. "github.com/containerd/containerd/cio"
  19. "github.com/docker/docker/pkg/sysinfo"
  20. "github.com/docker/docker/pkg/system"
  21. specs "github.com/opencontainers/runtime-spec/specs-go"
  22. "github.com/pkg/errors"
  23. "github.com/sirupsen/logrus"
  24. "golang.org/x/sys/windows"
  25. )
  26. const InitProcessName = "init"
  27. type process struct {
  28. id string
  29. pid int
  30. hcsProcess hcsshim.Process
  31. }
  32. type container struct {
  33. sync.Mutex
  34. // The ociSpec is required, as client.Create() needs a spec, but can
  35. // be called from the RestartManager context which does not otherwise
  36. // have access to the Spec
  37. ociSpec *specs.Spec
  38. isWindows bool
  39. manualStopRequested bool
  40. hcsContainer hcsshim.Container
  41. id string
  42. status Status
  43. exitedAt time.Time
  44. exitCode uint32
  45. waitCh chan struct{}
  46. init *process
  47. execs map[string]*process
  48. updatePending bool
  49. }
  50. // Win32 error codes that are used for various workarounds
  51. // These really should be ALL_CAPS to match golangs syscall library and standard
  52. // Win32 error conventions, but golint insists on CamelCase.
  53. const (
  54. CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
  55. ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
  56. ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
  57. ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
  58. )
  59. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  60. // container creator management stacks. We hard code "docker" in the case
  61. // of docker.
  62. const defaultOwner = "docker"
  63. func (c *client) Version(ctx context.Context) (containerd.Version, error) {
  64. return containerd.Version{}, errors.New("not implemented on Windows")
  65. }
  66. // Create is the entrypoint to create a container from a spec.
  67. // Table below shows the fields required for HCS JSON calling parameters,
  68. // where if not populated, is omitted.
  69. // +-----------------+--------------------------------------------+---------------------------------------------------+
  70. // | | Isolation=Process | Isolation=Hyper-V |
  71. // +-----------------+--------------------------------------------+---------------------------------------------------+
  72. // | VolumePath | \\?\\Volume{GUIDa} | |
  73. // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) |
  74. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  75. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  76. // +-----------------+--------------------------------------------+---------------------------------------------------+
  77. //
  78. // Isolation=Process example:
  79. //
  80. // {
  81. // "SystemType": "Container",
  82. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  83. // "Owner": "docker",
  84. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  85. // "IgnoreFlushesDuringBoot": true,
  86. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  87. // "Layers": [{
  88. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  89. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  90. // }],
  91. // "HostName": "5e0055c814a6",
  92. // "MappedDirectories": [],
  93. // "HvPartition": false,
  94. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  95. // "Servicing": false
  96. //}
  97. //
  98. // Isolation=Hyper-V example:
  99. //
  100. //{
  101. // "SystemType": "Container",
  102. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  103. // "Owner": "docker",
  104. // "IgnoreFlushesDuringBoot": true,
  105. // "Layers": [{
  106. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  107. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  108. // }],
  109. // "HostName": "475c2c58933b",
  110. // "MappedDirectories": [],
  111. // "HvPartition": true,
  112. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  113. // "DNSSearchList": "a.com,b.com,c.com",
  114. // "HvRuntime": {
  115. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  116. // },
  117. // "Servicing": false
  118. //}
  119. func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error {
  120. if ctr := c.getContainer(id); ctr != nil {
  121. return errors.WithStack(newConflictError("id already in use"))
  122. }
  123. // spec.Linux must be nil for Windows containers, but spec.Windows
  124. // will be filled in regardless of container platform. This is a
  125. // temporary workaround due to LCOW requiring layer folder paths,
  126. // which are stored under spec.Windows.
  127. //
  128. // TODO: @darrenstahlmsft fix this once the OCI spec is updated to
  129. // support layer folder paths for LCOW
  130. if spec.Linux == nil {
  131. return c.createWindows(id, spec, runtimeOptions)
  132. }
  133. return c.createLinux(id, spec, runtimeOptions)
  134. }
  135. func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  136. logger := c.logger.WithField("container", id)
  137. configuration := &hcsshim.ContainerConfig{
  138. SystemType: "Container",
  139. Name: id,
  140. Owner: defaultOwner,
  141. IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
  142. HostName: spec.Hostname,
  143. HvPartition: false,
  144. Servicing: spec.Windows.Servicing,
  145. }
  146. if spec.Windows.Resources != nil {
  147. if spec.Windows.Resources.CPU != nil {
  148. if spec.Windows.Resources.CPU.Count != nil {
  149. // This check is being done here rather than in adaptContainerSettings
  150. // because we don't want to update the HostConfig in case this container
  151. // is moved to a host with more CPUs than this one.
  152. cpuCount := *spec.Windows.Resources.CPU.Count
  153. hostCPUCount := uint64(sysinfo.NumCPU())
  154. if cpuCount > hostCPUCount {
  155. c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  156. cpuCount = hostCPUCount
  157. }
  158. configuration.ProcessorCount = uint32(cpuCount)
  159. }
  160. if spec.Windows.Resources.CPU.Shares != nil {
  161. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  162. }
  163. if spec.Windows.Resources.CPU.Maximum != nil {
  164. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  165. }
  166. }
  167. if spec.Windows.Resources.Memory != nil {
  168. if spec.Windows.Resources.Memory.Limit != nil {
  169. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  170. }
  171. }
  172. if spec.Windows.Resources.Storage != nil {
  173. if spec.Windows.Resources.Storage.Bps != nil {
  174. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  175. }
  176. if spec.Windows.Resources.Storage.Iops != nil {
  177. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  178. }
  179. }
  180. }
  181. if spec.Windows.HyperV != nil {
  182. configuration.HvPartition = true
  183. }
  184. if spec.Windows.Network != nil {
  185. configuration.EndpointList = spec.Windows.Network.EndpointList
  186. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  187. if spec.Windows.Network.DNSSearchList != nil {
  188. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  189. }
  190. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  191. }
  192. if cs, ok := spec.Windows.CredentialSpec.(string); ok {
  193. configuration.Credentials = cs
  194. }
  195. // We must have least two layers in the spec, the bottom one being a
  196. // base image, the top one being the RW layer.
  197. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
  198. return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
  199. }
  200. // Strip off the top-most layer as that's passed in separately to HCS
  201. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  202. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  203. if configuration.HvPartition {
  204. // We don't currently support setting the utility VM image explicitly.
  205. // TODO @swernli/jhowardmsft circa RS3/4, this may be re-locatable.
  206. if spec.Windows.HyperV.UtilityVMPath != "" {
  207. return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
  208. }
  209. // Find the upper-most utility VM image.
  210. var uvmImagePath string
  211. for _, path := range layerFolders {
  212. fullPath := filepath.Join(path, "UtilityVM")
  213. _, err := os.Stat(fullPath)
  214. if err == nil {
  215. uvmImagePath = fullPath
  216. break
  217. }
  218. if !os.IsNotExist(err) {
  219. return err
  220. }
  221. }
  222. if uvmImagePath == "" {
  223. return errors.New("utility VM image could not be found")
  224. }
  225. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  226. if spec.Root.Path != "" {
  227. return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
  228. }
  229. } else {
  230. const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
  231. if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
  232. return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
  233. }
  234. // HCS API requires the trailing backslash to be removed
  235. configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
  236. }
  237. if spec.Root.Readonly {
  238. return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
  239. }
  240. for _, layerPath := range layerFolders {
  241. _, filename := filepath.Split(layerPath)
  242. g, err := hcsshim.NameToGuid(filename)
  243. if err != nil {
  244. return err
  245. }
  246. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  247. ID: g.ToString(),
  248. Path: layerPath,
  249. })
  250. }
  251. // Add the mounts (volumes, bind mounts etc) to the structure
  252. var mds []hcsshim.MappedDir
  253. var mps []hcsshim.MappedPipe
  254. for _, mount := range spec.Mounts {
  255. const pipePrefix = `\\.\pipe\`
  256. if mount.Type != "" {
  257. return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
  258. }
  259. if strings.HasPrefix(mount.Destination, pipePrefix) {
  260. mp := hcsshim.MappedPipe{
  261. HostPath: mount.Source,
  262. ContainerPipeName: mount.Destination[len(pipePrefix):],
  263. }
  264. mps = append(mps, mp)
  265. } else {
  266. md := hcsshim.MappedDir{
  267. HostPath: mount.Source,
  268. ContainerPath: mount.Destination,
  269. ReadOnly: false,
  270. }
  271. for _, o := range mount.Options {
  272. if strings.ToLower(o) == "ro" {
  273. md.ReadOnly = true
  274. }
  275. }
  276. mds = append(mds, md)
  277. }
  278. }
  279. configuration.MappedDirectories = mds
  280. if len(mps) > 0 && system.GetOSVersion().Build < 16210 { // replace with Win10 RS3 build number at RTM
  281. return errors.New("named pipe mounts are not supported on this version of Windows")
  282. }
  283. configuration.MappedPipes = mps
  284. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  285. if err != nil {
  286. return err
  287. }
  288. // Construct a container object for calling start on it.
  289. ctr := &container{
  290. id: id,
  291. execs: make(map[string]*process),
  292. isWindows: true,
  293. ociSpec: spec,
  294. hcsContainer: hcsContainer,
  295. status: StatusCreated,
  296. waitCh: make(chan struct{}),
  297. }
  298. // Start the container. If this is a servicing container, this call
  299. // will block until the container is done with the servicing
  300. // execution.
  301. logger.Debug("starting container")
  302. if err = hcsContainer.Start(); err != nil {
  303. c.logger.WithError(err).Error("failed to start container")
  304. ctr.debugGCS()
  305. if err := c.terminateContainer(ctr); err != nil {
  306. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  307. } else {
  308. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  309. }
  310. return err
  311. }
  312. ctr.debugGCS()
  313. c.Lock()
  314. c.containers[id] = ctr
  315. c.Unlock()
  316. logger.Debug("createWindows() completed successfully")
  317. return nil
  318. }
  319. func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
  320. logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
  321. logger := c.logger.WithField("container", id)
  322. if runtimeOptions == nil {
  323. return fmt.Errorf("lcow option must be supplied to the runtime")
  324. }
  325. lcowConfig, ok := runtimeOptions.(*opengcs.Config)
  326. if !ok {
  327. return fmt.Errorf("lcow option must be supplied to the runtime")
  328. }
  329. configuration := &hcsshim.ContainerConfig{
  330. HvPartition: true,
  331. Name: id,
  332. SystemType: "container",
  333. ContainerType: "linux",
  334. Owner: defaultOwner,
  335. TerminateOnLastHandleClosed: true,
  336. }
  337. if lcowConfig.ActualMode == opengcs.ModeActualVhdx {
  338. configuration.HvRuntime = &hcsshim.HvRuntime{
  339. ImagePath: lcowConfig.Vhdx,
  340. BootSource: "Vhd",
  341. WritableBootSource: false,
  342. }
  343. } else {
  344. configuration.HvRuntime = &hcsshim.HvRuntime{
  345. ImagePath: lcowConfig.KirdPath,
  346. LinuxKernelFile: lcowConfig.KernelFile,
  347. LinuxInitrdFile: lcowConfig.InitrdFile,
  348. LinuxBootParameters: lcowConfig.BootParameters,
  349. }
  350. }
  351. if spec.Windows == nil {
  352. return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
  353. }
  354. // We must have least one layer in the spec
  355. if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
  356. return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
  357. }
  358. // Strip off the top-most layer as that's passed in separately to HCS
  359. configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
  360. layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
  361. for _, layerPath := range layerFolders {
  362. _, filename := filepath.Split(layerPath)
  363. g, err := hcsshim.NameToGuid(filename)
  364. if err != nil {
  365. return err
  366. }
  367. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  368. ID: g.ToString(),
  369. Path: filepath.Join(layerPath, "layer.vhd"),
  370. })
  371. }
  372. if spec.Windows.Network != nil {
  373. configuration.EndpointList = spec.Windows.Network.EndpointList
  374. configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
  375. if spec.Windows.Network.DNSSearchList != nil {
  376. configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
  377. }
  378. configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
  379. }
  380. // Add the mounts (volumes, bind mounts etc) to the structure. We have to do
  381. // some translation for both the mapped directories passed into HCS and in
  382. // the spec.
  383. //
  384. // For HCS, we only pass in the mounts from the spec which are type "bind".
  385. // Further, the "ContainerPath" field (which is a little mis-leadingly
  386. // named when it applies to the utility VM rather than the container in the
  387. // utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
  388. // by the caller through a 'uvmpath' option.
  389. //
  390. // We do similar translation for the mounts in the spec by stripping out
  391. // the uvmpath option, and translating the Source path to the location in the
  392. // utility VM calculated above.
  393. //
  394. // From inside the utility VM, you would see a 9p mount such as in the following
  395. // where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
  396. // specifically:
  397. //
  398. // / # mount
  399. // rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
  400. // proc on /proc type proc (rw,relatime)
  401. // sysfs on /sys type sysfs (rw,relatime)
  402. // udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
  403. // tmpfs on /run type tmpfs (rw,relatime)
  404. // cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
  405. // mqueue on /dev/mqueue type mqueue (rw,relatime)
  406. // devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
  407. // /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
  408. // /dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
  409. // /dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
  410. // overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
  411. //
  412. // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
  413. // total 16
  414. // drwx------ 3 0 0 60 Sep 7 18:54 binds
  415. // -rw-r--r-- 1 0 0 3345 Sep 7 18:54 config.json
  416. // drwxr-xr-x 10 0 0 4096 Sep 6 17:26 layer0
  417. // drwxr-xr-x 1 0 0 4096 Sep 7 18:54 rootfs
  418. // drwxr-xr-x 5 0 0 4096 Sep 7 18:54 scratch
  419. //
  420. // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
  421. // total 0
  422. // drwxrwxrwt 2 0 0 4096 Sep 7 16:51 target
  423. mds := []hcsshim.MappedDir{}
  424. specMounts := []specs.Mount{}
  425. for _, mount := range spec.Mounts {
  426. specMount := mount
  427. if mount.Type == "bind" {
  428. // Strip out the uvmpath from the options
  429. updatedOptions := []string{}
  430. uvmPath := ""
  431. readonly := false
  432. for _, opt := range mount.Options {
  433. dropOption := false
  434. elements := strings.SplitN(opt, "=", 2)
  435. switch elements[0] {
  436. case "uvmpath":
  437. uvmPath = elements[1]
  438. dropOption = true
  439. case "rw":
  440. case "ro":
  441. readonly = true
  442. case "rbind":
  443. default:
  444. return fmt.Errorf("unsupported option %q", opt)
  445. }
  446. if !dropOption {
  447. updatedOptions = append(updatedOptions, opt)
  448. }
  449. }
  450. mount.Options = updatedOptions
  451. if uvmPath == "" {
  452. return fmt.Errorf("no uvmpath for bind mount %+v", mount)
  453. }
  454. md := hcsshim.MappedDir{
  455. HostPath: mount.Source,
  456. ContainerPath: path.Join(uvmPath, mount.Destination),
  457. CreateInUtilityVM: true,
  458. ReadOnly: readonly,
  459. }
  460. mds = append(mds, md)
  461. specMount.Source = path.Join(uvmPath, mount.Destination)
  462. }
  463. specMounts = append(specMounts, specMount)
  464. }
  465. configuration.MappedDirectories = mds
  466. hcsContainer, err := hcsshim.CreateContainer(id, configuration)
  467. if err != nil {
  468. return err
  469. }
  470. spec.Mounts = specMounts
  471. // Construct a container object for calling start on it.
  472. ctr := &container{
  473. id: id,
  474. execs: make(map[string]*process),
  475. isWindows: false,
  476. ociSpec: spec,
  477. hcsContainer: hcsContainer,
  478. status: StatusCreated,
  479. waitCh: make(chan struct{}),
  480. }
  481. // Start the container. If this is a servicing container, this call
  482. // will block until the container is done with the servicing
  483. // execution.
  484. logger.Debug("starting container")
  485. if err = hcsContainer.Start(); err != nil {
  486. c.logger.WithError(err).Error("failed to start container")
  487. ctr.debugGCS()
  488. if err := c.terminateContainer(ctr); err != nil {
  489. c.logger.WithError(err).Error("failed to cleanup after a failed Start")
  490. } else {
  491. c.logger.Debug("cleaned up after failed Start by calling Terminate")
  492. }
  493. return err
  494. }
  495. ctr.debugGCS()
  496. c.Lock()
  497. c.containers[id] = ctr
  498. c.Unlock()
  499. c.eventQ.append(id, func() {
  500. ei := EventInfo{
  501. ContainerID: id,
  502. }
  503. c.logger.WithFields(logrus.Fields{
  504. "container": ctr.id,
  505. "event": EventCreate,
  506. }).Info("sending event")
  507. err := c.backend.ProcessEvent(id, EventCreate, ei)
  508. if err != nil {
  509. c.logger.WithError(err).WithFields(logrus.Fields{
  510. "container": id,
  511. "event": EventCreate,
  512. }).Error("failed to process event")
  513. }
  514. })
  515. logger.Debug("createLinux() completed successfully")
  516. return nil
  517. }
  518. func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
  519. ctr := c.getContainer(id)
  520. switch {
  521. case ctr == nil:
  522. return -1, errors.WithStack(newNotFoundError("no such container"))
  523. case ctr.init != nil:
  524. return -1, errors.WithStack(newConflictError("container already started"))
  525. }
  526. logger := c.logger.WithField("container", id)
  527. // Note we always tell HCS to create stdout as it's required
  528. // regardless of '-i' or '-t' options, so that docker can always grab
  529. // the output through logs. We also tell HCS to always create stdin,
  530. // even if it's not used - it will be closed shortly. Stderr is only
  531. // created if it we're not -t.
  532. var (
  533. emulateConsole bool
  534. createStdErrPipe bool
  535. )
  536. if ctr.ociSpec.Process != nil {
  537. emulateConsole = ctr.ociSpec.Process.Terminal
  538. createStdErrPipe = !ctr.ociSpec.Process.Terminal && !ctr.ociSpec.Windows.Servicing
  539. }
  540. createProcessParms := &hcsshim.ProcessConfig{
  541. EmulateConsole: emulateConsole,
  542. WorkingDirectory: ctr.ociSpec.Process.Cwd,
  543. CreateStdInPipe: !ctr.ociSpec.Windows.Servicing,
  544. CreateStdOutPipe: !ctr.ociSpec.Windows.Servicing,
  545. CreateStdErrPipe: createStdErrPipe,
  546. }
  547. if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
  548. createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
  549. createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
  550. }
  551. // Configure the environment for the process
  552. createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
  553. if ctr.isWindows {
  554. createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ")
  555. } else {
  556. createProcessParms.CommandArgs = ctr.ociSpec.Process.Args
  557. }
  558. createProcessParms.User = ctr.ociSpec.Process.User.Username
  559. // LCOW requires the raw OCI spec passed through HCS and onwards to
  560. // GCS for the utility VM.
  561. if !ctr.isWindows {
  562. ociBuf, err := json.Marshal(ctr.ociSpec)
  563. if err != nil {
  564. return -1, err
  565. }
  566. ociRaw := json.RawMessage(ociBuf)
  567. createProcessParms.OCISpecification = &ociRaw
  568. }
  569. ctr.Lock()
  570. defer ctr.Unlock()
  571. // Start the command running in the container.
  572. newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
  573. if err != nil {
  574. logger.WithError(err).Error("CreateProcess() failed")
  575. return -1, err
  576. }
  577. defer func() {
  578. if err != nil {
  579. if err := newProcess.Kill(); err != nil {
  580. logger.WithError(err).Error("failed to kill process")
  581. }
  582. go func() {
  583. if err := newProcess.Wait(); err != nil {
  584. logger.WithError(err).Error("failed to wait for process")
  585. }
  586. if err := newProcess.Close(); err != nil {
  587. logger.WithError(err).Error("failed to clean process resources")
  588. }
  589. }()
  590. }
  591. }()
  592. p := &process{
  593. hcsProcess: newProcess,
  594. id: InitProcessName,
  595. pid: newProcess.Pid(),
  596. }
  597. logger.WithField("pid", p.pid).Debug("init process started")
  598. // If this is a servicing container, wait on the process synchronously here and
  599. // if it succeeds, wait for it cleanly shutdown and merge into the parent container.
  600. if ctr.ociSpec.Windows.Servicing {
  601. // reapProcess takes the lock
  602. ctr.Unlock()
  603. defer ctr.Lock()
  604. exitCode := c.reapProcess(ctr, p)
  605. if exitCode != 0 {
  606. return -1, errors.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.id, exitCode)
  607. }
  608. return p.pid, nil
  609. }
  610. dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
  611. if err != nil {
  612. logger.WithError(err).Error("failed to get stdio pipes")
  613. return -1, err
  614. }
  615. _, err = attachStdio(dio)
  616. if err != nil {
  617. logger.WithError(err).Error("failed to attache stdio")
  618. return -1, err
  619. }
  620. ctr.status = StatusRunning
  621. ctr.init = p
  622. // Spin up a go routine waiting for exit to handle cleanup
  623. go c.reapProcess(ctr, p)
  624. // Generate the associated event
  625. c.eventQ.append(id, func() {
  626. ei := EventInfo{
  627. ContainerID: id,
  628. ProcessID: InitProcessName,
  629. Pid: uint32(p.pid),
  630. }
  631. c.logger.WithFields(logrus.Fields{
  632. "container": ctr.id,
  633. "event": EventStart,
  634. "event-info": ei,
  635. }).Info("sending event")
  636. err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei)
  637. if err != nil {
  638. c.logger.WithError(err).WithFields(logrus.Fields{
  639. "container": id,
  640. "event": EventStart,
  641. "event-info": ei,
  642. }).Error("failed to process event")
  643. }
  644. })
  645. logger.Debug("start() completed")
  646. return p.pid, nil
  647. }
  648. func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
  649. stdin, stdout, stderr, err := newProcess.Stdio()
  650. if err != nil {
  651. return nil, err
  652. }
  653. dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
  654. // Convert io.ReadClosers to io.Readers
  655. if stdout != nil {
  656. dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  657. }
  658. if stderr != nil {
  659. dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  660. }
  661. return dio, nil
  662. }
  663. // Exec adds a process in an running container
  664. func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
  665. ctr := c.getContainer(containerID)
  666. switch {
  667. case ctr == nil:
  668. return -1, errors.WithStack(newNotFoundError("no such container"))
  669. case ctr.hcsContainer == nil:
  670. return -1, errors.WithStack(newInvalidParameterError("container is not running"))
  671. case ctr.execs != nil && ctr.execs[processID] != nil:
  672. return -1, errors.WithStack(newConflictError("id already in use"))
  673. }
  674. logger := c.logger.WithFields(logrus.Fields{
  675. "container": containerID,
  676. "exec": processID,
  677. })
  678. // Note we always tell HCS to
  679. // create stdout as it's required regardless of '-i' or '-t' options, so that
  680. // docker can always grab the output through logs. We also tell HCS to always
  681. // create stdin, even if it's not used - it will be closed shortly. Stderr
  682. // is only created if it we're not -t.
  683. createProcessParms := hcsshim.ProcessConfig{
  684. CreateStdInPipe: true,
  685. CreateStdOutPipe: true,
  686. CreateStdErrPipe: !spec.Terminal,
  687. }
  688. if spec.Terminal {
  689. createProcessParms.EmulateConsole = true
  690. if spec.ConsoleSize != nil {
  691. createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
  692. createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
  693. }
  694. }
  695. // Take working directory from the process to add if it is defined,
  696. // otherwise take from the first process.
  697. if spec.Cwd != "" {
  698. createProcessParms.WorkingDirectory = spec.Cwd
  699. } else {
  700. createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
  701. }
  702. // Configure the environment for the process
  703. createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
  704. if ctr.isWindows {
  705. createProcessParms.CommandLine = strings.Join(spec.Args, " ")
  706. } else {
  707. createProcessParms.CommandArgs = spec.Args
  708. }
  709. createProcessParms.User = spec.User.Username
  710. logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
  711. // Start the command running in the container.
  712. newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms)
  713. if err != nil {
  714. logger.WithError(err).Errorf("exec's CreateProcess() failed")
  715. return -1, err
  716. }
  717. pid := newProcess.Pid()
  718. defer func() {
  719. if err != nil {
  720. if err := newProcess.Kill(); err != nil {
  721. logger.WithError(err).Error("failed to kill process")
  722. }
  723. go func() {
  724. if err := newProcess.Wait(); err != nil {
  725. logger.WithError(err).Error("failed to wait for process")
  726. }
  727. if err := newProcess.Close(); err != nil {
  728. logger.WithError(err).Error("failed to clean process resources")
  729. }
  730. }()
  731. }
  732. }()
  733. dio, err := newIOFromProcess(newProcess, spec.Terminal)
  734. if err != nil {
  735. logger.WithError(err).Error("failed to get stdio pipes")
  736. return -1, err
  737. }
  738. // Tell the engine to attach streams back to the client
  739. _, err = attachStdio(dio)
  740. if err != nil {
  741. return -1, err
  742. }
  743. p := &process{
  744. id: processID,
  745. pid: pid,
  746. hcsProcess: newProcess,
  747. }
  748. // Add the process to the container's list of processes
  749. ctr.Lock()
  750. ctr.execs[processID] = p
  751. ctr.Unlock()
  752. // Spin up a go routine waiting for exit to handle cleanup
  753. go c.reapProcess(ctr, p)
  754. c.eventQ.append(ctr.id, func() {
  755. ei := EventInfo{
  756. ContainerID: ctr.id,
  757. ProcessID: p.id,
  758. Pid: uint32(p.pid),
  759. }
  760. c.logger.WithFields(logrus.Fields{
  761. "container": ctr.id,
  762. "event": EventExecAdded,
  763. "event-info": ei,
  764. }).Info("sending event")
  765. err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei)
  766. if err != nil {
  767. c.logger.WithError(err).WithFields(logrus.Fields{
  768. "container": ctr.id,
  769. "event": EventExecAdded,
  770. "event-info": ei,
  771. }).Error("failed to process event")
  772. }
  773. err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei)
  774. if err != nil {
  775. c.logger.WithError(err).WithFields(logrus.Fields{
  776. "container": ctr.id,
  777. "event": EventExecStarted,
  778. "event-info": ei,
  779. }).Error("failed to process event")
  780. }
  781. })
  782. return pid, nil
  783. }
  784. // Signal handles `docker stop` on Windows. While Linux has support for
  785. // the full range of signals, signals aren't really implemented on Windows.
  786. // We fake supporting regular stop and -9 to force kill.
  787. func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
  788. ctr, p, err := c.getProcess(containerID, processID)
  789. if err != nil {
  790. return err
  791. }
  792. ctr.manualStopRequested = true
  793. logger := c.logger.WithFields(logrus.Fields{
  794. "container": containerID,
  795. "process": processID,
  796. "pid": p.pid,
  797. "signal": signal,
  798. })
  799. logger.Debug("Signal()")
  800. if processID == InitProcessName {
  801. if syscall.Signal(signal) == syscall.SIGKILL {
  802. // Terminate the compute system
  803. if err := ctr.hcsContainer.Terminate(); err != nil {
  804. if !hcsshim.IsPending(err) {
  805. logger.WithError(err).Error("failed to terminate hccshim container")
  806. }
  807. }
  808. } else {
  809. // Shut down the container
  810. if err := ctr.hcsContainer.Shutdown(); err != nil {
  811. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  812. // ignore errors
  813. logger.WithError(err).Error("failed to shutdown hccshim container")
  814. }
  815. }
  816. }
  817. } else {
  818. return p.hcsProcess.Kill()
  819. }
  820. return nil
  821. }
  822. // Resize handles a CLI event to resize an interactive docker run or docker
  823. // exec window.
  824. func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
  825. _, p, err := c.getProcess(containerID, processID)
  826. if err != nil {
  827. return err
  828. }
  829. c.logger.WithFields(logrus.Fields{
  830. "container": containerID,
  831. "process": processID,
  832. "height": height,
  833. "width": width,
  834. "pid": p.pid,
  835. }).Debug("resizing")
  836. return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
  837. }
  838. func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
  839. _, p, err := c.getProcess(containerID, processID)
  840. if err != nil {
  841. return err
  842. }
  843. return p.hcsProcess.CloseStdin()
  844. }
  845. // Pause handles pause requests for containers
  846. func (c *client) Pause(_ context.Context, containerID string) error {
  847. ctr, _, err := c.getProcess(containerID, InitProcessName)
  848. if err != nil {
  849. return err
  850. }
  851. if ctr.ociSpec.Windows.HyperV == nil {
  852. return errors.New("cannot pause Windows Server Containers")
  853. }
  854. ctr.Lock()
  855. defer ctr.Unlock()
  856. if err = ctr.hcsContainer.Pause(); err != nil {
  857. return err
  858. }
  859. ctr.status = StatusPaused
  860. c.eventQ.append(containerID, func() {
  861. err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{
  862. ContainerID: containerID,
  863. ProcessID: InitProcessName,
  864. })
  865. c.logger.WithFields(logrus.Fields{
  866. "container": ctr.id,
  867. "event": EventPaused,
  868. }).Info("sending event")
  869. if err != nil {
  870. c.logger.WithError(err).WithFields(logrus.Fields{
  871. "container": containerID,
  872. "event": EventPaused,
  873. }).Error("failed to process event")
  874. }
  875. })
  876. return nil
  877. }
  878. // Resume handles resume requests for containers
  879. func (c *client) Resume(_ context.Context, containerID string) error {
  880. ctr, _, err := c.getProcess(containerID, InitProcessName)
  881. if err != nil {
  882. return err
  883. }
  884. if ctr.ociSpec.Windows.HyperV == nil {
  885. return errors.New("cannot resume Windows Server Containers")
  886. }
  887. ctr.Lock()
  888. defer ctr.Unlock()
  889. if err = ctr.hcsContainer.Resume(); err != nil {
  890. return err
  891. }
  892. ctr.status = StatusRunning
  893. c.eventQ.append(containerID, func() {
  894. err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{
  895. ContainerID: containerID,
  896. ProcessID: InitProcessName,
  897. })
  898. c.logger.WithFields(logrus.Fields{
  899. "container": ctr.id,
  900. "event": EventResumed,
  901. }).Info("sending event")
  902. if err != nil {
  903. c.logger.WithError(err).WithFields(logrus.Fields{
  904. "container": containerID,
  905. "event": EventResumed,
  906. }).Error("failed to process event")
  907. }
  908. })
  909. return nil
  910. }
  911. // Stats handles stats requests for containers
  912. func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) {
  913. ctr, _, err := c.getProcess(containerID, InitProcessName)
  914. if err != nil {
  915. return nil, err
  916. }
  917. readAt := time.Now()
  918. s, err := ctr.hcsContainer.Statistics()
  919. if err != nil {
  920. return nil, err
  921. }
  922. return &Stats{
  923. Read: readAt,
  924. HCSStats: &s,
  925. }, nil
  926. }
  927. // Restore is the handler for restoring a container
  928. func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) {
  929. c.logger.WithField("container", id).Debug("restore()")
  930. // TODO Windows: On RS1, a re-attach isn't possible.
  931. // However, there is a scenario in which there is an issue.
  932. // Consider a background container. The daemon dies unexpectedly.
  933. // HCS will still have the compute service alive and running.
  934. // For consistence, we call in to shoot it regardless if HCS knows about it
  935. // We explicitly just log a warning if the terminate fails.
  936. // Then we tell the backend the container exited.
  937. if hc, err := hcsshim.OpenContainer(id); err == nil {
  938. const terminateTimeout = time.Minute * 2
  939. err := hc.Terminate()
  940. if hcsshim.IsPending(err) {
  941. err = hc.WaitTimeout(terminateTimeout)
  942. } else if hcsshim.IsAlreadyStopped(err) {
  943. err = nil
  944. }
  945. if err != nil {
  946. c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  947. return false, -1, err
  948. }
  949. }
  950. return false, -1, nil
  951. }
  952. // GetPidsForContainer returns a list of process IDs running in a container.
  953. // Not used on Windows.
  954. func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  955. return nil, errors.New("not implemented on Windows")
  956. }
  957. // Summary returns a summary of the processes running in a container.
  958. // This is present in Windows to support docker top. In linux, the
  959. // engine shells out to ps to get process information. On Windows, as
  960. // the containers could be Hyper-V containers, they would not be
  961. // visible on the container host. However, libcontainerd does have
  962. // that information.
  963. func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) {
  964. ctr, _, err := c.getProcess(containerID, InitProcessName)
  965. if err != nil {
  966. return nil, err
  967. }
  968. p, err := ctr.hcsContainer.ProcessList()
  969. if err != nil {
  970. return nil, err
  971. }
  972. pl := make([]Summary, len(p))
  973. for i := range p {
  974. pl[i] = Summary(p[i])
  975. }
  976. return pl, nil
  977. }
  978. func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  979. ec := -1
  980. ctr := c.getContainer(containerID)
  981. if ctr == nil {
  982. return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container"))
  983. }
  984. select {
  985. case <-ctx.Done():
  986. return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  987. case <-ctr.waitCh:
  988. default:
  989. return uint32(ec), time.Now(), errors.New("container is not stopped")
  990. }
  991. ctr.Lock()
  992. defer ctr.Unlock()
  993. return ctr.exitCode, ctr.exitedAt, nil
  994. }
  995. func (c *client) Delete(_ context.Context, containerID string) error {
  996. c.Lock()
  997. defer c.Unlock()
  998. ctr := c.containers[containerID]
  999. if ctr == nil {
  1000. return errors.WithStack(newNotFoundError("no such container"))
  1001. }
  1002. ctr.Lock()
  1003. defer ctr.Unlock()
  1004. switch ctr.status {
  1005. case StatusCreated:
  1006. if err := c.shutdownContainer(ctr); err != nil {
  1007. return err
  1008. }
  1009. fallthrough
  1010. case StatusStopped:
  1011. delete(c.containers, containerID)
  1012. return nil
  1013. }
  1014. return errors.WithStack(newInvalidParameterError("container is not stopped"))
  1015. }
  1016. func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
  1017. c.Lock()
  1018. defer c.Unlock()
  1019. ctr := c.containers[containerID]
  1020. if ctr == nil {
  1021. return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
  1022. }
  1023. ctr.Lock()
  1024. defer ctr.Unlock()
  1025. return ctr.status, nil
  1026. }
  1027. func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error {
  1028. // Updating resource isn't supported on Windows
  1029. // but we should return nil for enabling updating container
  1030. return nil
  1031. }
  1032. func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1033. return errors.New("Windows: Containers do not support checkpoints")
  1034. }
  1035. func (c *client) getContainer(id string) *container {
  1036. c.Lock()
  1037. ctr := c.containers[id]
  1038. c.Unlock()
  1039. return ctr
  1040. }
  1041. func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1042. ctr := c.getContainer(containerID)
  1043. switch {
  1044. case ctr == nil:
  1045. return nil, nil, errors.WithStack(newNotFoundError("no such container"))
  1046. case ctr.init == nil:
  1047. return nil, nil, errors.WithStack(newNotFoundError("container is not running"))
  1048. case processID == InitProcessName:
  1049. return ctr, ctr.init, nil
  1050. default:
  1051. ctr.Lock()
  1052. defer ctr.Unlock()
  1053. if ctr.execs == nil {
  1054. return nil, nil, errors.WithStack(newNotFoundError("no execs"))
  1055. }
  1056. }
  1057. p := ctr.execs[processID]
  1058. if p == nil {
  1059. return nil, nil, errors.WithStack(newNotFoundError("no such exec"))
  1060. }
  1061. return ctr, p, nil
  1062. }
  1063. func (c *client) shutdownContainer(ctr *container) error {
  1064. const shutdownTimeout = time.Minute * 5
  1065. err := ctr.hcsContainer.Shutdown()
  1066. if hcsshim.IsPending(err) {
  1067. err = ctr.hcsContainer.WaitTimeout(shutdownTimeout)
  1068. } else if hcsshim.IsAlreadyStopped(err) {
  1069. err = nil
  1070. }
  1071. if err != nil {
  1072. c.logger.WithError(err).WithField("container", ctr.id).
  1073. Debug("failed to shutdown container, terminating it")
  1074. return c.terminateContainer(ctr)
  1075. }
  1076. return nil
  1077. }
  1078. func (c *client) terminateContainer(ctr *container) error {
  1079. const terminateTimeout = time.Minute * 5
  1080. err := ctr.hcsContainer.Terminate()
  1081. if hcsshim.IsPending(err) {
  1082. err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1083. } else if hcsshim.IsAlreadyStopped(err) {
  1084. err = nil
  1085. }
  1086. if err != nil {
  1087. c.logger.WithError(err).WithField("container", ctr.id).
  1088. Debug("failed to terminate container")
  1089. return err
  1090. }
  1091. return nil
  1092. }
  1093. func (c *client) reapProcess(ctr *container, p *process) int {
  1094. logger := c.logger.WithFields(logrus.Fields{
  1095. "container": ctr.id,
  1096. "process": p.id,
  1097. })
  1098. // Block indefinitely for the process to exit.
  1099. if err := p.hcsProcess.Wait(); err != nil {
  1100. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1101. logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1102. }
  1103. // Fall through here, do not return. This ensures we attempt to
  1104. // continue the shutdown in HCS and tell the docker engine that the
  1105. // process/container has exited to avoid a container being dropped on
  1106. // the floor.
  1107. }
  1108. exitedAt := time.Now()
  1109. exitCode, err := p.hcsProcess.ExitCode()
  1110. if err != nil {
  1111. if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1112. logger.WithError(err).Warnf("unable to get exit code for process")
  1113. }
  1114. // Since we got an error retrieving the exit code, make sure that the
  1115. // code we return doesn't incorrectly indicate success.
  1116. exitCode = -1
  1117. // Fall through here, do not return. This ensures we attempt to
  1118. // continue the shutdown in HCS and tell the docker engine that the
  1119. // process/container has exited to avoid a container being dropped on
  1120. // the floor.
  1121. }
  1122. if err := p.hcsProcess.Close(); err != nil {
  1123. logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1124. }
  1125. var pendingUpdates bool
  1126. if p.id == InitProcessName {
  1127. // Update container status
  1128. ctr.Lock()
  1129. ctr.status = StatusStopped
  1130. ctr.exitedAt = exitedAt
  1131. ctr.exitCode = uint32(exitCode)
  1132. close(ctr.waitCh)
  1133. ctr.Unlock()
  1134. // Handle any servicing
  1135. if exitCode == 0 && ctr.isWindows && !ctr.ociSpec.Windows.Servicing {
  1136. pendingUpdates, err = ctr.hcsContainer.HasPendingUpdates()
  1137. logger.Infof("Pending updates: %v", pendingUpdates)
  1138. if err != nil {
  1139. logger.WithError(err).
  1140. Warnf("failed to check for pending updates (container may have been killed)")
  1141. }
  1142. }
  1143. if err := c.shutdownContainer(ctr); err != nil {
  1144. logger.WithError(err).Warn("failed to shutdown container")
  1145. } else {
  1146. logger.Debug("completed container shutdown")
  1147. }
  1148. if err := ctr.hcsContainer.Close(); err != nil {
  1149. logger.WithError(err).Error("failed to clean hcs container resources")
  1150. }
  1151. }
  1152. if !(ctr.isWindows && ctr.ociSpec.Windows.Servicing) {
  1153. c.eventQ.append(ctr.id, func() {
  1154. ei := EventInfo{
  1155. ContainerID: ctr.id,
  1156. ProcessID: p.id,
  1157. Pid: uint32(p.pid),
  1158. ExitCode: uint32(exitCode),
  1159. ExitedAt: exitedAt,
  1160. UpdatePending: pendingUpdates,
  1161. }
  1162. c.logger.WithFields(logrus.Fields{
  1163. "container": ctr.id,
  1164. "event": EventExit,
  1165. "event-info": ei,
  1166. }).Info("sending event")
  1167. err := c.backend.ProcessEvent(ctr.id, EventExit, ei)
  1168. if err != nil {
  1169. c.logger.WithError(err).WithFields(logrus.Fields{
  1170. "container": ctr.id,
  1171. "event": EventExit,
  1172. "event-info": ei,
  1173. }).Error("failed to process event")
  1174. }
  1175. if p.id != InitProcessName {
  1176. ctr.Lock()
  1177. delete(ctr.execs, p.id)
  1178. ctr.Unlock()
  1179. }
  1180. })
  1181. }
  1182. return exitCode
  1183. }