client_windows.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. package libcontainerd
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "io/ioutil"
  7. "os"
  8. "path/filepath"
  9. "strings"
  10. "syscall"
  11. "time"
  12. "golang.org/x/net/context"
  13. "github.com/Microsoft/hcsshim"
  14. "github.com/Sirupsen/logrus"
  15. "github.com/docker/docker/pkg/sysinfo"
  16. specs "github.com/opencontainers/runtime-spec/specs-go"
  17. )
  18. type client struct {
  19. clientCommon
  20. // Platform specific properties below here (none presently on Windows)
  21. }
  22. // Win32 error codes that are used for various workarounds
  23. // These really should be ALL_CAPS to match golangs syscall library and standard
  24. // Win32 error conventions, but golint insists on CamelCase.
  25. const (
  26. CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
  27. ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
  28. ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
  29. ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
  30. )
  31. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  32. // container creator management stacks. We hard code "docker" in the case
  33. // of docker.
  34. const defaultOwner = "docker"
  35. // Create is the entrypoint to create a container from a spec, and if successfully
  36. // created, start it too. Table below shows the fields required for HCS JSON calling parameters,
  37. // where if not populated, is omitted.
  38. // +-----------------+--------------------------------------------+---------------------------------------------------+
  39. // | | Isolation=Process | Isolation=Hyper-V |
  40. // +-----------------+--------------------------------------------+---------------------------------------------------+
  41. // | VolumePath | \\?\\Volume{GUIDa} | |
  42. // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) |
  43. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  44. // | SandboxPath | | %root%\windowsfilter |
  45. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  46. // +-----------------+--------------------------------------------+---------------------------------------------------+
  47. //
  48. // Isolation=Process example:
  49. //
  50. // {
  51. // "SystemType": "Container",
  52. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  53. // "Owner": "docker",
  54. // "IsDummy": false,
  55. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  56. // "IgnoreFlushesDuringBoot": true,
  57. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  58. // "Layers": [{
  59. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  60. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  61. // }],
  62. // "HostName": "5e0055c814a6",
  63. // "MappedDirectories": [],
  64. // "HvPartition": false,
  65. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  66. // "Servicing": false
  67. //}
  68. //
  69. // Isolation=Hyper-V example:
  70. //
  71. //{
  72. // "SystemType": "Container",
  73. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  74. // "Owner": "docker",
  75. // "IsDummy": false,
  76. // "IgnoreFlushesDuringBoot": true,
  77. // "Layers": [{
  78. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  79. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  80. // }],
  81. // "HostName": "475c2c58933b",
  82. // "MappedDirectories": [],
  83. // "SandboxPath": "C:\\\\control\\\\windowsfilter",
  84. // "HvPartition": true,
  85. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  86. // "DNSSearchList": "a.com,b.com,c.com",
  87. // "HvRuntime": {
  88. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  89. // },
  90. // "Servicing": false
  91. //}
  92. func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
  93. clnt.lock(containerID)
  94. defer clnt.unlock(containerID)
  95. logrus.Debugln("libcontainerd: client.Create() with spec", spec)
  96. configuration := &hcsshim.ContainerConfig{
  97. SystemType: "Container",
  98. Name: containerID,
  99. Owner: defaultOwner,
  100. IgnoreFlushesDuringBoot: false,
  101. HostName: spec.Hostname,
  102. HvPartition: false,
  103. }
  104. if spec.Windows.Resources != nil {
  105. if spec.Windows.Resources.CPU != nil {
  106. if spec.Windows.Resources.CPU.Count != nil {
  107. // This check is being done here rather than in adaptContainerSettings
  108. // because we don't want to update the HostConfig in case this container
  109. // is moved to a host with more CPUs than this one.
  110. cpuCount := *spec.Windows.Resources.CPU.Count
  111. hostCPUCount := uint64(sysinfo.NumCPU())
  112. if cpuCount > hostCPUCount {
  113. logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  114. cpuCount = hostCPUCount
  115. }
  116. configuration.ProcessorCount = uint32(cpuCount)
  117. }
  118. if spec.Windows.Resources.CPU.Shares != nil {
  119. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  120. }
  121. if spec.Windows.Resources.CPU.Percent != nil {
  122. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000
  123. }
  124. }
  125. if spec.Windows.Resources.Memory != nil {
  126. if spec.Windows.Resources.Memory.Limit != nil {
  127. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  128. }
  129. }
  130. if spec.Windows.Resources.Storage != nil {
  131. if spec.Windows.Resources.Storage.Bps != nil {
  132. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  133. }
  134. if spec.Windows.Resources.Storage.Iops != nil {
  135. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  136. }
  137. }
  138. }
  139. var layerOpt *LayerOption
  140. for _, option := range options {
  141. if s, ok := option.(*ServicingOption); ok {
  142. configuration.Servicing = s.IsServicing
  143. continue
  144. }
  145. if f, ok := option.(*FlushOption); ok {
  146. configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot
  147. continue
  148. }
  149. if h, ok := option.(*HyperVIsolationOption); ok {
  150. configuration.HvPartition = h.IsHyperV
  151. configuration.SandboxPath = h.SandboxPath
  152. continue
  153. }
  154. if l, ok := option.(*LayerOption); ok {
  155. layerOpt = l
  156. }
  157. if n, ok := option.(*NetworkEndpointsOption); ok {
  158. configuration.EndpointList = n.Endpoints
  159. configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery
  160. if n.DNSSearchList != nil {
  161. configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",")
  162. }
  163. configuration.NetworkSharedContainerName = n.NetworkSharedContainerID
  164. continue
  165. }
  166. if c, ok := option.(*CredentialsOption); ok {
  167. configuration.Credentials = c.Credentials
  168. continue
  169. }
  170. }
  171. // We must have a layer option with at least one path
  172. if layerOpt == nil || layerOpt.LayerPaths == nil {
  173. return fmt.Errorf("no layer option or paths were supplied to the runtime")
  174. }
  175. if configuration.HvPartition {
  176. // Find the upper-most utility VM image, since the utility VM does not
  177. // use layering in RS1.
  178. // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable.
  179. var uvmImagePath string
  180. for _, path := range layerOpt.LayerPaths {
  181. fullPath := filepath.Join(path, "UtilityVM")
  182. _, err := os.Stat(fullPath)
  183. if err == nil {
  184. uvmImagePath = fullPath
  185. break
  186. }
  187. if !os.IsNotExist(err) {
  188. return err
  189. }
  190. }
  191. if uvmImagePath == "" {
  192. return errors.New("utility VM image could not be found")
  193. }
  194. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  195. } else {
  196. configuration.VolumePath = spec.Root.Path
  197. }
  198. configuration.LayerFolderPath = layerOpt.LayerFolderPath
  199. for _, layerPath := range layerOpt.LayerPaths {
  200. _, filename := filepath.Split(layerPath)
  201. g, err := hcsshim.NameToGuid(filename)
  202. if err != nil {
  203. return err
  204. }
  205. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  206. ID: g.ToString(),
  207. Path: layerPath,
  208. })
  209. }
  210. // Add the mounts (volumes, bind mounts etc) to the structure
  211. mds := make([]hcsshim.MappedDir, len(spec.Mounts))
  212. for i, mount := range spec.Mounts {
  213. mds[i] = hcsshim.MappedDir{
  214. HostPath: mount.Source,
  215. ContainerPath: mount.Destination,
  216. ReadOnly: false,
  217. }
  218. for _, o := range mount.Options {
  219. if strings.ToLower(o) == "ro" {
  220. mds[i].ReadOnly = true
  221. }
  222. }
  223. }
  224. configuration.MappedDirectories = mds
  225. hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
  226. if err != nil {
  227. return err
  228. }
  229. // Construct a container object for calling start on it.
  230. container := &container{
  231. containerCommon: containerCommon{
  232. process: process{
  233. processCommon: processCommon{
  234. containerID: containerID,
  235. client: clnt,
  236. friendlyName: InitFriendlyName,
  237. },
  238. },
  239. processes: make(map[string]*process),
  240. },
  241. ociSpec: spec,
  242. hcsContainer: hcsContainer,
  243. }
  244. container.options = options
  245. for _, option := range options {
  246. if err := option.Apply(container); err != nil {
  247. logrus.Errorf("libcontainerd: %v", err)
  248. }
  249. }
  250. // Call start, and if it fails, delete the container from our
  251. // internal structure, start will keep HCS in sync by deleting the
  252. // container there.
  253. logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
  254. if err := container.start(attachStdio); err != nil {
  255. clnt.deleteContainer(containerID)
  256. return err
  257. }
  258. logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
  259. return nil
  260. }
  261. // AddProcess is the handler for adding a process to an already running
  262. // container. It's called through docker exec. It returns the system pid of the
  263. // exec'd process.
  264. func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
  265. clnt.lock(containerID)
  266. defer clnt.unlock(containerID)
  267. container, err := clnt.getContainer(containerID)
  268. if err != nil {
  269. return -1, err
  270. }
  271. // Note we always tell HCS to
  272. // create stdout as it's required regardless of '-i' or '-t' options, so that
  273. // docker can always grab the output through logs. We also tell HCS to always
  274. // create stdin, even if it's not used - it will be closed shortly. Stderr
  275. // is only created if it we're not -t.
  276. createProcessParms := hcsshim.ProcessConfig{
  277. EmulateConsole: procToAdd.Terminal,
  278. CreateStdInPipe: true,
  279. CreateStdOutPipe: true,
  280. CreateStdErrPipe: !procToAdd.Terminal,
  281. }
  282. createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
  283. createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
  284. // Take working directory from the process to add if it is defined,
  285. // otherwise take from the first process.
  286. if procToAdd.Cwd != "" {
  287. createProcessParms.WorkingDirectory = procToAdd.Cwd
  288. } else {
  289. createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
  290. }
  291. // Configure the environment for the process
  292. createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
  293. createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
  294. createProcessParms.User = procToAdd.User.Username
  295. logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
  296. // Start the command running in the container.
  297. var stdout, stderr io.ReadCloser
  298. var stdin io.WriteCloser
  299. newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
  300. if err != nil {
  301. logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
  302. return -1, err
  303. }
  304. pid := newProcess.Pid()
  305. stdin, stdout, stderr, err = newProcess.Stdio()
  306. if err != nil {
  307. logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
  308. return -1, err
  309. }
  310. iopipe := &IOPipe{Terminal: procToAdd.Terminal}
  311. iopipe.Stdin = createStdInCloser(stdin, newProcess)
  312. // Convert io.ReadClosers to io.Readers
  313. if stdout != nil {
  314. iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  315. }
  316. if stderr != nil {
  317. iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  318. }
  319. proc := &process{
  320. processCommon: processCommon{
  321. containerID: containerID,
  322. friendlyName: processFriendlyName,
  323. client: clnt,
  324. systemPid: uint32(pid),
  325. },
  326. hcsProcess: newProcess,
  327. }
  328. // Add the process to the container's list of processes
  329. container.processes[processFriendlyName] = proc
  330. // Tell the engine to attach streams back to the client
  331. if err := attachStdio(*iopipe); err != nil {
  332. return -1, err
  333. }
  334. // Spin up a go routine waiting for exit to handle cleanup
  335. go container.waitExit(proc, false)
  336. return pid, nil
  337. }
  338. // Signal handles `docker stop` on Windows. While Linux has support for
  339. // the full range of signals, signals aren't really implemented on Windows.
  340. // We fake supporting regular stop and -9 to force kill.
  341. func (clnt *client) Signal(containerID string, sig int) error {
  342. var (
  343. cont *container
  344. err error
  345. )
  346. // Get the container as we need it to get the container handle.
  347. clnt.lock(containerID)
  348. defer clnt.unlock(containerID)
  349. if cont, err = clnt.getContainer(containerID); err != nil {
  350. return err
  351. }
  352. cont.manualStopRequested = true
  353. logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
  354. if syscall.Signal(sig) == syscall.SIGKILL {
  355. // Terminate the compute system
  356. if err := cont.hcsContainer.Terminate(); err != nil {
  357. if !hcsshim.IsPending(err) {
  358. logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
  359. }
  360. }
  361. } else {
  362. // Shut down the container
  363. if err := cont.hcsContainer.Shutdown(); err != nil {
  364. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  365. // ignore errors
  366. logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err)
  367. }
  368. }
  369. }
  370. return nil
  371. }
  372. // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
  373. // We try to terminate the specified process whatever signal is requested.
  374. func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
  375. clnt.lock(containerID)
  376. defer clnt.unlock(containerID)
  377. cont, err := clnt.getContainer(containerID)
  378. if err != nil {
  379. return err
  380. }
  381. for _, p := range cont.processes {
  382. if p.friendlyName == processFriendlyName {
  383. return p.hcsProcess.Kill()
  384. }
  385. }
  386. return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
  387. }
  388. // Resize handles a CLI event to resize an interactive docker run or docker exec
  389. // window.
  390. func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
  391. // Get the libcontainerd container object
  392. clnt.lock(containerID)
  393. defer clnt.unlock(containerID)
  394. cont, err := clnt.getContainer(containerID)
  395. if err != nil {
  396. return err
  397. }
  398. h, w := uint16(height), uint16(width)
  399. if processFriendlyName == InitFriendlyName {
  400. logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
  401. return cont.process.hcsProcess.ResizeConsole(w, h)
  402. }
  403. for _, p := range cont.processes {
  404. if p.friendlyName == processFriendlyName {
  405. logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
  406. return p.hcsProcess.ResizeConsole(w, h)
  407. }
  408. }
  409. return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
  410. }
  411. // Pause handles pause requests for containers
  412. func (clnt *client) Pause(containerID string) error {
  413. unlockContainer := true
  414. // Get the libcontainerd container object
  415. clnt.lock(containerID)
  416. defer func() {
  417. if unlockContainer {
  418. clnt.unlock(containerID)
  419. }
  420. }()
  421. container, err := clnt.getContainer(containerID)
  422. if err != nil {
  423. return err
  424. }
  425. for _, option := range container.options {
  426. if h, ok := option.(*HyperVIsolationOption); ok {
  427. if !h.IsHyperV {
  428. return errors.New("cannot pause Windows Server Containers")
  429. }
  430. break
  431. }
  432. }
  433. err = container.hcsContainer.Pause()
  434. if err != nil {
  435. return err
  436. }
  437. // Unlock container before calling back into the daemon
  438. unlockContainer = false
  439. clnt.unlock(containerID)
  440. return clnt.backend.StateChanged(containerID, StateInfo{
  441. CommonStateInfo: CommonStateInfo{
  442. State: StatePause,
  443. }})
  444. }
  445. // Resume handles resume requests for containers
  446. func (clnt *client) Resume(containerID string) error {
  447. unlockContainer := true
  448. // Get the libcontainerd container object
  449. clnt.lock(containerID)
  450. defer func() {
  451. if unlockContainer {
  452. clnt.unlock(containerID)
  453. }
  454. }()
  455. container, err := clnt.getContainer(containerID)
  456. if err != nil {
  457. return err
  458. }
  459. // This should never happen, since Windows Server Containers cannot be paused
  460. for _, option := range container.options {
  461. if h, ok := option.(*HyperVIsolationOption); ok {
  462. if !h.IsHyperV {
  463. return errors.New("cannot resume Windows Server Containers")
  464. }
  465. break
  466. }
  467. }
  468. err = container.hcsContainer.Resume()
  469. if err != nil {
  470. return err
  471. }
  472. // Unlock container before calling back into the daemon
  473. unlockContainer = false
  474. clnt.unlock(containerID)
  475. return clnt.backend.StateChanged(containerID, StateInfo{
  476. CommonStateInfo: CommonStateInfo{
  477. State: StateResume,
  478. }})
  479. }
  480. // Stats handles stats requests for containers
  481. func (clnt *client) Stats(containerID string) (*Stats, error) {
  482. // Get the libcontainerd container object
  483. clnt.lock(containerID)
  484. defer clnt.unlock(containerID)
  485. container, err := clnt.getContainer(containerID)
  486. if err != nil {
  487. return nil, err
  488. }
  489. s, err := container.hcsContainer.Statistics()
  490. if err != nil {
  491. return nil, err
  492. }
  493. st := Stats(s)
  494. return &st, nil
  495. }
  496. // Restore is the handler for restoring a container
  497. func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
  498. logrus.Debugf("libcontainerd: Restore(%s)", containerID)
  499. // TODO Windows: On RS1, a re-attach isn't possible.
  500. // However, there is a scenario in which there is an issue.
  501. // Consider a background container. The daemon dies unexpectedly.
  502. // HCS will still have the compute service alive and running.
  503. // For consistence, we call in to shoot it regardless if HCS knows about it
  504. // We explicitly just log a warning if the terminate fails.
  505. // Then we tell the backend the container exited.
  506. if hc, err := hcsshim.OpenContainer(containerID); err == nil {
  507. const terminateTimeout = time.Minute * 2
  508. err := hc.Terminate()
  509. if hcsshim.IsPending(err) {
  510. err = hc.WaitTimeout(terminateTimeout)
  511. } else if hcsshim.IsAlreadyStopped(err) {
  512. err = nil
  513. }
  514. if err != nil {
  515. logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err)
  516. return err
  517. }
  518. }
  519. return clnt.backend.StateChanged(containerID, StateInfo{
  520. CommonStateInfo: CommonStateInfo{
  521. State: StateExit,
  522. ExitCode: 1 << 31,
  523. }})
  524. }
  525. // GetPidsForContainer returns a list of process IDs running in a container.
  526. // Not used on Windows.
  527. func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
  528. return nil, errors.New("not implemented on Windows")
  529. }
  530. // Summary returns a summary of the processes running in a container.
  531. // This is present in Windows to support docker top. In linux, the
  532. // engine shells out to ps to get process information. On Windows, as
  533. // the containers could be Hyper-V containers, they would not be
  534. // visible on the container host. However, libcontainerd does have
  535. // that information.
  536. func (clnt *client) Summary(containerID string) ([]Summary, error) {
  537. // Get the libcontainerd container object
  538. clnt.lock(containerID)
  539. defer clnt.unlock(containerID)
  540. container, err := clnt.getContainer(containerID)
  541. if err != nil {
  542. return nil, err
  543. }
  544. p, err := container.hcsContainer.ProcessList()
  545. if err != nil {
  546. return nil, err
  547. }
  548. pl := make([]Summary, len(p))
  549. for i := range p {
  550. pl[i] = Summary(p[i])
  551. }
  552. return pl, nil
  553. }
  554. // UpdateResources updates resources for a running container.
  555. func (clnt *client) UpdateResources(containerID string, resources Resources) error {
  556. // Updating resource isn't supported on Windows
  557. // but we should return nil for enabling updating container
  558. return nil
  559. }
  560. func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
  561. return errors.New("Windows: Containers do not support checkpoints")
  562. }
  563. func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
  564. return errors.New("Windows: Containers do not support checkpoints")
  565. }
  566. func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
  567. return nil, errors.New("Windows: Containers do not support checkpoints")
  568. }
  569. func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
  570. return &ServerVersion{}, nil
  571. }