client_windows.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. package libcontainerd
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "io/ioutil"
  7. "os"
  8. "path/filepath"
  9. "strings"
  10. "syscall"
  11. "time"
  12. "golang.org/x/net/context"
  13. "github.com/Microsoft/hcsshim"
  14. "github.com/Sirupsen/logrus"
  15. "github.com/docker/docker/pkg/sysinfo"
  16. specs "github.com/opencontainers/runtime-spec/specs-go"
  17. )
  18. type client struct {
  19. clientCommon
  20. // Platform specific properties below here (none presently on Windows)
  21. }
  22. // Win32 error codes that are used for various workarounds
  23. // These really should be ALL_CAPS to match golangs syscall library and standard
  24. // Win32 error conventions, but golint insists on CamelCase.
  25. const (
  26. CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
  27. ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
  28. ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
  29. ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
  30. )
  31. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  32. // container creator management stacks. We hard code "docker" in the case
  33. // of docker.
  34. const defaultOwner = "docker"
  35. // Create is the entrypoint to create a container from a spec, and if successfully
  36. // created, start it too. Table below shows the fields required for HCS JSON calling parameters,
  37. // where if not populated, is omitted.
  38. // +-----------------+--------------------------------------------+---------------------------------------------------+
  39. // | | Isolation=Process | Isolation=Hyper-V |
  40. // +-----------------+--------------------------------------------+---------------------------------------------------+
  41. // | VolumePath | \\?\\Volume{GUIDa} | |
  42. // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) |
  43. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  44. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  45. // +-----------------+--------------------------------------------+---------------------------------------------------+
  46. //
  47. // Isolation=Process example:
  48. //
  49. // {
  50. // "SystemType": "Container",
  51. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  52. // "Owner": "docker",
  53. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  54. // "IgnoreFlushesDuringBoot": true,
  55. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  56. // "Layers": [{
  57. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  58. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  59. // }],
  60. // "HostName": "5e0055c814a6",
  61. // "MappedDirectories": [],
  62. // "HvPartition": false,
  63. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  64. // "Servicing": false
  65. //}
  66. //
  67. // Isolation=Hyper-V example:
  68. //
  69. //{
  70. // "SystemType": "Container",
  71. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  72. // "Owner": "docker",
  73. // "IgnoreFlushesDuringBoot": true,
  74. // "Layers": [{
  75. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  76. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  77. // }],
  78. // "HostName": "475c2c58933b",
  79. // "MappedDirectories": [],
  80. // "HvPartition": true,
  81. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  82. // "DNSSearchList": "a.com,b.com,c.com",
  83. // "HvRuntime": {
  84. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  85. // },
  86. // "Servicing": false
  87. //}
  88. func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
  89. clnt.lock(containerID)
  90. defer clnt.unlock(containerID)
  91. logrus.Debugln("libcontainerd: client.Create() with spec", spec)
  92. configuration := &hcsshim.ContainerConfig{
  93. SystemType: "Container",
  94. Name: containerID,
  95. Owner: defaultOwner,
  96. IgnoreFlushesDuringBoot: false,
  97. HostName: spec.Hostname,
  98. HvPartition: false,
  99. }
  100. if spec.Windows.Resources != nil {
  101. if spec.Windows.Resources.CPU != nil {
  102. if spec.Windows.Resources.CPU.Count != nil {
  103. // This check is being done here rather than in adaptContainerSettings
  104. // because we don't want to update the HostConfig in case this container
  105. // is moved to a host with more CPUs than this one.
  106. cpuCount := *spec.Windows.Resources.CPU.Count
  107. hostCPUCount := uint64(sysinfo.NumCPU())
  108. if cpuCount > hostCPUCount {
  109. logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
  110. cpuCount = hostCPUCount
  111. }
  112. configuration.ProcessorCount = uint32(cpuCount)
  113. }
  114. if spec.Windows.Resources.CPU.Shares != nil {
  115. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  116. }
  117. if spec.Windows.Resources.CPU.Maximum != nil {
  118. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
  119. }
  120. }
  121. if spec.Windows.Resources.Memory != nil {
  122. if spec.Windows.Resources.Memory.Limit != nil {
  123. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  124. }
  125. }
  126. if spec.Windows.Resources.Storage != nil {
  127. if spec.Windows.Resources.Storage.Bps != nil {
  128. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  129. }
  130. if spec.Windows.Resources.Storage.Iops != nil {
  131. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  132. }
  133. }
  134. }
  135. var layerOpt *LayerOption
  136. for _, option := range options {
  137. if s, ok := option.(*ServicingOption); ok {
  138. configuration.Servicing = s.IsServicing
  139. continue
  140. }
  141. if f, ok := option.(*FlushOption); ok {
  142. configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot
  143. continue
  144. }
  145. if h, ok := option.(*HyperVIsolationOption); ok {
  146. configuration.HvPartition = h.IsHyperV
  147. continue
  148. }
  149. if l, ok := option.(*LayerOption); ok {
  150. layerOpt = l
  151. }
  152. if n, ok := option.(*NetworkEndpointsOption); ok {
  153. configuration.EndpointList = n.Endpoints
  154. configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery
  155. if n.DNSSearchList != nil {
  156. configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",")
  157. }
  158. configuration.NetworkSharedContainerName = n.NetworkSharedContainerID
  159. continue
  160. }
  161. if c, ok := option.(*CredentialsOption); ok {
  162. configuration.Credentials = c.Credentials
  163. continue
  164. }
  165. }
  166. // We must have a layer option with at least one path
  167. if layerOpt == nil || layerOpt.LayerPaths == nil {
  168. return fmt.Errorf("no layer option or paths were supplied to the runtime")
  169. }
  170. if configuration.HvPartition {
  171. // Find the upper-most utility VM image, since the utility VM does not
  172. // use layering in RS1.
  173. // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable.
  174. var uvmImagePath string
  175. for _, path := range layerOpt.LayerPaths {
  176. fullPath := filepath.Join(path, "UtilityVM")
  177. _, err := os.Stat(fullPath)
  178. if err == nil {
  179. uvmImagePath = fullPath
  180. break
  181. }
  182. if !os.IsNotExist(err) {
  183. return err
  184. }
  185. }
  186. if uvmImagePath == "" {
  187. return errors.New("utility VM image could not be found")
  188. }
  189. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  190. } else {
  191. configuration.VolumePath = spec.Root.Path
  192. }
  193. configuration.LayerFolderPath = layerOpt.LayerFolderPath
  194. for _, layerPath := range layerOpt.LayerPaths {
  195. _, filename := filepath.Split(layerPath)
  196. g, err := hcsshim.NameToGuid(filename)
  197. if err != nil {
  198. return err
  199. }
  200. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  201. ID: g.ToString(),
  202. Path: layerPath,
  203. })
  204. }
  205. // Add the mounts (volumes, bind mounts etc) to the structure
  206. mds := make([]hcsshim.MappedDir, len(spec.Mounts))
  207. for i, mount := range spec.Mounts {
  208. mds[i] = hcsshim.MappedDir{
  209. HostPath: mount.Source,
  210. ContainerPath: mount.Destination,
  211. ReadOnly: false,
  212. }
  213. for _, o := range mount.Options {
  214. if strings.ToLower(o) == "ro" {
  215. mds[i].ReadOnly = true
  216. }
  217. }
  218. }
  219. configuration.MappedDirectories = mds
  220. hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
  221. if err != nil {
  222. return err
  223. }
  224. // Construct a container object for calling start on it.
  225. container := &container{
  226. containerCommon: containerCommon{
  227. process: process{
  228. processCommon: processCommon{
  229. containerID: containerID,
  230. client: clnt,
  231. friendlyName: InitFriendlyName,
  232. },
  233. },
  234. processes: make(map[string]*process),
  235. },
  236. ociSpec: spec,
  237. hcsContainer: hcsContainer,
  238. }
  239. container.options = options
  240. for _, option := range options {
  241. if err := option.Apply(container); err != nil {
  242. logrus.Errorf("libcontainerd: %v", err)
  243. }
  244. }
  245. // Call start, and if it fails, delete the container from our
  246. // internal structure, start will keep HCS in sync by deleting the
  247. // container there.
  248. logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
  249. if err := container.start(attachStdio); err != nil {
  250. clnt.deleteContainer(containerID)
  251. return err
  252. }
  253. logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
  254. return nil
  255. }
  256. // AddProcess is the handler for adding a process to an already running
  257. // container. It's called through docker exec. It returns the system pid of the
  258. // exec'd process.
  259. func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
  260. clnt.lock(containerID)
  261. defer clnt.unlock(containerID)
  262. container, err := clnt.getContainer(containerID)
  263. if err != nil {
  264. return -1, err
  265. }
  266. // Note we always tell HCS to
  267. // create stdout as it's required regardless of '-i' or '-t' options, so that
  268. // docker can always grab the output through logs. We also tell HCS to always
  269. // create stdin, even if it's not used - it will be closed shortly. Stderr
  270. // is only created if it we're not -t.
  271. createProcessParms := hcsshim.ProcessConfig{
  272. EmulateConsole: procToAdd.Terminal,
  273. CreateStdInPipe: true,
  274. CreateStdOutPipe: true,
  275. CreateStdErrPipe: !procToAdd.Terminal,
  276. }
  277. createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
  278. createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
  279. // Take working directory from the process to add if it is defined,
  280. // otherwise take from the first process.
  281. if procToAdd.Cwd != "" {
  282. createProcessParms.WorkingDirectory = procToAdd.Cwd
  283. } else {
  284. createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
  285. }
  286. // Configure the environment for the process
  287. createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
  288. createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
  289. createProcessParms.User = procToAdd.User.Username
  290. logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
  291. // Start the command running in the container.
  292. var stdout, stderr io.ReadCloser
  293. var stdin io.WriteCloser
  294. newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
  295. if err != nil {
  296. logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
  297. return -1, err
  298. }
  299. pid := newProcess.Pid()
  300. stdin, stdout, stderr, err = newProcess.Stdio()
  301. if err != nil {
  302. logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
  303. return -1, err
  304. }
  305. iopipe := &IOPipe{Terminal: procToAdd.Terminal}
  306. iopipe.Stdin = createStdInCloser(stdin, newProcess)
  307. // Convert io.ReadClosers to io.Readers
  308. if stdout != nil {
  309. iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  310. }
  311. if stderr != nil {
  312. iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  313. }
  314. proc := &process{
  315. processCommon: processCommon{
  316. containerID: containerID,
  317. friendlyName: processFriendlyName,
  318. client: clnt,
  319. systemPid: uint32(pid),
  320. },
  321. hcsProcess: newProcess,
  322. }
  323. // Add the process to the container's list of processes
  324. container.processes[processFriendlyName] = proc
  325. // Tell the engine to attach streams back to the client
  326. if err := attachStdio(*iopipe); err != nil {
  327. return -1, err
  328. }
  329. // Spin up a go routine waiting for exit to handle cleanup
  330. go container.waitExit(proc, false)
  331. return pid, nil
  332. }
  333. // Signal handles `docker stop` on Windows. While Linux has support for
  334. // the full range of signals, signals aren't really implemented on Windows.
  335. // We fake supporting regular stop and -9 to force kill.
  336. func (clnt *client) Signal(containerID string, sig int) error {
  337. var (
  338. cont *container
  339. err error
  340. )
  341. // Get the container as we need it to get the container handle.
  342. clnt.lock(containerID)
  343. defer clnt.unlock(containerID)
  344. if cont, err = clnt.getContainer(containerID); err != nil {
  345. return err
  346. }
  347. cont.manualStopRequested = true
  348. logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
  349. if syscall.Signal(sig) == syscall.SIGKILL {
  350. // Terminate the compute system
  351. if err := cont.hcsContainer.Terminate(); err != nil {
  352. if !hcsshim.IsPending(err) {
  353. logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
  354. }
  355. }
  356. } else {
  357. // Shut down the container
  358. if err := cont.hcsContainer.Shutdown(); err != nil {
  359. if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
  360. // ignore errors
  361. logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err)
  362. }
  363. }
  364. }
  365. return nil
  366. }
  367. // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
  368. // We try to terminate the specified process whatever signal is requested.
  369. func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
  370. clnt.lock(containerID)
  371. defer clnt.unlock(containerID)
  372. cont, err := clnt.getContainer(containerID)
  373. if err != nil {
  374. return err
  375. }
  376. for _, p := range cont.processes {
  377. if p.friendlyName == processFriendlyName {
  378. return p.hcsProcess.Kill()
  379. }
  380. }
  381. return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
  382. }
  383. // Resize handles a CLI event to resize an interactive docker run or docker exec
  384. // window.
  385. func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
  386. // Get the libcontainerd container object
  387. clnt.lock(containerID)
  388. defer clnt.unlock(containerID)
  389. cont, err := clnt.getContainer(containerID)
  390. if err != nil {
  391. return err
  392. }
  393. h, w := uint16(height), uint16(width)
  394. if processFriendlyName == InitFriendlyName {
  395. logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
  396. return cont.process.hcsProcess.ResizeConsole(w, h)
  397. }
  398. for _, p := range cont.processes {
  399. if p.friendlyName == processFriendlyName {
  400. logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
  401. return p.hcsProcess.ResizeConsole(w, h)
  402. }
  403. }
  404. return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
  405. }
  406. // Pause handles pause requests for containers
  407. func (clnt *client) Pause(containerID string) error {
  408. unlockContainer := true
  409. // Get the libcontainerd container object
  410. clnt.lock(containerID)
  411. defer func() {
  412. if unlockContainer {
  413. clnt.unlock(containerID)
  414. }
  415. }()
  416. container, err := clnt.getContainer(containerID)
  417. if err != nil {
  418. return err
  419. }
  420. for _, option := range container.options {
  421. if h, ok := option.(*HyperVIsolationOption); ok {
  422. if !h.IsHyperV {
  423. return errors.New("cannot pause Windows Server Containers")
  424. }
  425. break
  426. }
  427. }
  428. err = container.hcsContainer.Pause()
  429. if err != nil {
  430. return err
  431. }
  432. // Unlock container before calling back into the daemon
  433. unlockContainer = false
  434. clnt.unlock(containerID)
  435. return clnt.backend.StateChanged(containerID, StateInfo{
  436. CommonStateInfo: CommonStateInfo{
  437. State: StatePause,
  438. }})
  439. }
  440. // Resume handles resume requests for containers
  441. func (clnt *client) Resume(containerID string) error {
  442. unlockContainer := true
  443. // Get the libcontainerd container object
  444. clnt.lock(containerID)
  445. defer func() {
  446. if unlockContainer {
  447. clnt.unlock(containerID)
  448. }
  449. }()
  450. container, err := clnt.getContainer(containerID)
  451. if err != nil {
  452. return err
  453. }
  454. // This should never happen, since Windows Server Containers cannot be paused
  455. for _, option := range container.options {
  456. if h, ok := option.(*HyperVIsolationOption); ok {
  457. if !h.IsHyperV {
  458. return errors.New("cannot resume Windows Server Containers")
  459. }
  460. break
  461. }
  462. }
  463. err = container.hcsContainer.Resume()
  464. if err != nil {
  465. return err
  466. }
  467. // Unlock container before calling back into the daemon
  468. unlockContainer = false
  469. clnt.unlock(containerID)
  470. return clnt.backend.StateChanged(containerID, StateInfo{
  471. CommonStateInfo: CommonStateInfo{
  472. State: StateResume,
  473. }})
  474. }
  475. // Stats handles stats requests for containers
  476. func (clnt *client) Stats(containerID string) (*Stats, error) {
  477. // Get the libcontainerd container object
  478. clnt.lock(containerID)
  479. defer clnt.unlock(containerID)
  480. container, err := clnt.getContainer(containerID)
  481. if err != nil {
  482. return nil, err
  483. }
  484. s, err := container.hcsContainer.Statistics()
  485. if err != nil {
  486. return nil, err
  487. }
  488. st := Stats(s)
  489. return &st, nil
  490. }
  491. // Restore is the handler for restoring a container
  492. func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
  493. logrus.Debugf("libcontainerd: Restore(%s)", containerID)
  494. // TODO Windows: On RS1, a re-attach isn't possible.
  495. // However, there is a scenario in which there is an issue.
  496. // Consider a background container. The daemon dies unexpectedly.
  497. // HCS will still have the compute service alive and running.
  498. // For consistence, we call in to shoot it regardless if HCS knows about it
  499. // We explicitly just log a warning if the terminate fails.
  500. // Then we tell the backend the container exited.
  501. if hc, err := hcsshim.OpenContainer(containerID); err == nil {
  502. const terminateTimeout = time.Minute * 2
  503. err := hc.Terminate()
  504. if hcsshim.IsPending(err) {
  505. err = hc.WaitTimeout(terminateTimeout)
  506. } else if hcsshim.IsAlreadyStopped(err) {
  507. err = nil
  508. }
  509. if err != nil {
  510. logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err)
  511. return err
  512. }
  513. }
  514. return clnt.backend.StateChanged(containerID, StateInfo{
  515. CommonStateInfo: CommonStateInfo{
  516. State: StateExit,
  517. ExitCode: 1 << 31,
  518. }})
  519. }
  520. // GetPidsForContainer returns a list of process IDs running in a container.
  521. // Not used on Windows.
  522. func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
  523. return nil, errors.New("not implemented on Windows")
  524. }
  525. // Summary returns a summary of the processes running in a container.
  526. // This is present in Windows to support docker top. In linux, the
  527. // engine shells out to ps to get process information. On Windows, as
  528. // the containers could be Hyper-V containers, they would not be
  529. // visible on the container host. However, libcontainerd does have
  530. // that information.
  531. func (clnt *client) Summary(containerID string) ([]Summary, error) {
  532. // Get the libcontainerd container object
  533. clnt.lock(containerID)
  534. defer clnt.unlock(containerID)
  535. container, err := clnt.getContainer(containerID)
  536. if err != nil {
  537. return nil, err
  538. }
  539. p, err := container.hcsContainer.ProcessList()
  540. if err != nil {
  541. return nil, err
  542. }
  543. pl := make([]Summary, len(p))
  544. for i := range p {
  545. pl[i] = Summary(p[i])
  546. }
  547. return pl, nil
  548. }
  549. // UpdateResources updates resources for a running container.
  550. func (clnt *client) UpdateResources(containerID string, resources Resources) error {
  551. // Updating resource isn't supported on Windows
  552. // but we should return nil for enabling updating container
  553. return nil
  554. }
  555. func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
  556. return errors.New("Windows: Containers do not support checkpoints")
  557. }
  558. func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
  559. return errors.New("Windows: Containers do not support checkpoints")
  560. }
  561. func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
  562. return nil, errors.New("Windows: Containers do not support checkpoints")
  563. }
  564. func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
  565. return &ServerVersion{}, nil
  566. }