client_windows.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. package libcontainerd
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "io/ioutil"
  7. "os"
  8. "path/filepath"
  9. "strings"
  10. "syscall"
  11. "golang.org/x/net/context"
  12. "github.com/Microsoft/hcsshim"
  13. "github.com/Sirupsen/logrus"
  14. "github.com/opencontainers/runtime-spec/specs-go"
  15. )
  16. type client struct {
  17. clientCommon
  18. // Platform specific properties below here (none presently on Windows)
  19. }
  20. // Win32 error codes that are used for various workarounds
  21. // These really should be ALL_CAPS to match golangs syscall library and standard
  22. // Win32 error conventions, but golint insists on CamelCase.
  23. const (
  24. CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string
  25. ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started
  26. ErrorBadPathname = syscall.Errno(161) // The specified path is invalid
  27. ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
  28. )
  29. // defaultOwner is a tag passed to HCS to allow it to differentiate between
  30. // container creator management stacks. We hard code "docker" in the case
  31. // of docker.
  32. const defaultOwner = "docker"
  33. // Create is the entrypoint to create a container from a spec, and if successfully
  34. // created, start it too. Table below shows the fields required for HCS JSON calling parameters,
  35. // where if not populated, is omitted.
  36. // +-----------------+--------------------------------------------+---------------------------------------------------+
  37. // | | Isolation=Process | Isolation=Hyper-V |
  38. // +-----------------+--------------------------------------------+---------------------------------------------------+
  39. // | VolumePath | \\?\\Volume{GUIDa} | |
  40. // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) |
  41. // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID |
  42. // | SandboxPath | | %root%\windowsfilter |
  43. // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM |
  44. // +-----------------+--------------------------------------------+---------------------------------------------------+
  45. //
  46. // Isolation=Process example:
  47. //
  48. // {
  49. // "SystemType": "Container",
  50. // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  51. // "Owner": "docker",
  52. // "IsDummy": false,
  53. // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
  54. // "IgnoreFlushesDuringBoot": true,
  55. // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
  56. // "Layers": [{
  57. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  58. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  59. // }],
  60. // "HostName": "5e0055c814a6",
  61. // "MappedDirectories": [],
  62. // "HvPartition": false,
  63. // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
  64. // "Servicing": false
  65. //}
  66. //
  67. // Isolation=Hyper-V example:
  68. //
  69. //{
  70. // "SystemType": "Container",
  71. // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
  72. // "Owner": "docker",
  73. // "IsDummy": false,
  74. // "IgnoreFlushesDuringBoot": true,
  75. // "Layers": [{
  76. // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
  77. // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
  78. // }],
  79. // "HostName": "475c2c58933b",
  80. // "MappedDirectories": [],
  81. // "SandboxPath": "C:\\\\control\\\\windowsfilter",
  82. // "HvPartition": true,
  83. // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
  84. // "HvRuntime": {
  85. // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
  86. // },
  87. // "Servicing": false
  88. //}
  89. func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
  90. clnt.lock(containerID)
  91. defer clnt.unlock(containerID)
  92. logrus.Debugln("libcontainerd: client.Create() with spec", spec)
  93. configuration := &hcsshim.ContainerConfig{
  94. SystemType: "Container",
  95. Name: containerID,
  96. Owner: defaultOwner,
  97. IgnoreFlushesDuringBoot: false,
  98. HostName: spec.Hostname,
  99. HvPartition: false,
  100. }
  101. if spec.Windows.Resources != nil {
  102. if spec.Windows.Resources.CPU != nil {
  103. if spec.Windows.Resources.CPU.Count != nil {
  104. configuration.ProcessorCount = uint32(*spec.Windows.Resources.CPU.Count)
  105. }
  106. if spec.Windows.Resources.CPU.Shares != nil {
  107. configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
  108. }
  109. if spec.Windows.Resources.CPU.Percent != nil {
  110. configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000
  111. }
  112. }
  113. if spec.Windows.Resources.Memory != nil {
  114. if spec.Windows.Resources.Memory.Limit != nil {
  115. configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
  116. }
  117. }
  118. if spec.Windows.Resources.Storage != nil {
  119. if spec.Windows.Resources.Storage.Bps != nil {
  120. configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
  121. }
  122. if spec.Windows.Resources.Storage.Iops != nil {
  123. configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
  124. }
  125. }
  126. }
  127. var layerOpt *LayerOption
  128. for _, option := range options {
  129. if s, ok := option.(*ServicingOption); ok {
  130. configuration.Servicing = s.IsServicing
  131. continue
  132. }
  133. if f, ok := option.(*FlushOption); ok {
  134. configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot
  135. continue
  136. }
  137. if h, ok := option.(*HyperVIsolationOption); ok {
  138. configuration.HvPartition = h.IsHyperV
  139. configuration.SandboxPath = h.SandboxPath
  140. continue
  141. }
  142. if l, ok := option.(*LayerOption); ok {
  143. layerOpt = l
  144. }
  145. if n, ok := option.(*NetworkEndpointsOption); ok {
  146. configuration.EndpointList = n.Endpoints
  147. configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery
  148. continue
  149. }
  150. if c, ok := option.(*CredentialsOption); ok {
  151. configuration.Credentials = c.Credentials
  152. continue
  153. }
  154. }
  155. // We must have a layer option with at least one path
  156. if layerOpt == nil || layerOpt.LayerPaths == nil {
  157. return fmt.Errorf("no layer option or paths were supplied to the runtime")
  158. }
  159. if configuration.HvPartition {
  160. // Find the upper-most utility VM image, since the utility VM does not
  161. // use layering in RS1.
  162. // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable.
  163. var uvmImagePath string
  164. for _, path := range layerOpt.LayerPaths {
  165. fullPath := filepath.Join(path, "UtilityVM")
  166. _, err := os.Stat(fullPath)
  167. if err == nil {
  168. uvmImagePath = fullPath
  169. break
  170. }
  171. if !os.IsNotExist(err) {
  172. return err
  173. }
  174. }
  175. if uvmImagePath == "" {
  176. return errors.New("utility VM image could not be found")
  177. }
  178. configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
  179. } else {
  180. configuration.VolumePath = spec.Root.Path
  181. }
  182. configuration.LayerFolderPath = layerOpt.LayerFolderPath
  183. for _, layerPath := range layerOpt.LayerPaths {
  184. _, filename := filepath.Split(layerPath)
  185. g, err := hcsshim.NameToGuid(filename)
  186. if err != nil {
  187. return err
  188. }
  189. configuration.Layers = append(configuration.Layers, hcsshim.Layer{
  190. ID: g.ToString(),
  191. Path: layerPath,
  192. })
  193. }
  194. // Add the mounts (volumes, bind mounts etc) to the structure
  195. mds := make([]hcsshim.MappedDir, len(spec.Mounts))
  196. for i, mount := range spec.Mounts {
  197. mds[i] = hcsshim.MappedDir{
  198. HostPath: mount.Source,
  199. ContainerPath: mount.Destination,
  200. ReadOnly: false,
  201. }
  202. for _, o := range mount.Options {
  203. if strings.ToLower(o) == "ro" {
  204. mds[i].ReadOnly = true
  205. }
  206. }
  207. }
  208. configuration.MappedDirectories = mds
  209. hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
  210. if err != nil {
  211. return err
  212. }
  213. // Construct a container object for calling start on it.
  214. container := &container{
  215. containerCommon: containerCommon{
  216. process: process{
  217. processCommon: processCommon{
  218. containerID: containerID,
  219. client: clnt,
  220. friendlyName: InitFriendlyName,
  221. },
  222. commandLine: strings.Join(spec.Process.Args, " "),
  223. },
  224. processes: make(map[string]*process),
  225. },
  226. ociSpec: spec,
  227. hcsContainer: hcsContainer,
  228. }
  229. container.options = options
  230. for _, option := range options {
  231. if err := option.Apply(container); err != nil {
  232. logrus.Errorf("libcontainerd: %v", err)
  233. }
  234. }
  235. // Call start, and if it fails, delete the container from our
  236. // internal structure, start will keep HCS in sync by deleting the
  237. // container there.
  238. logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
  239. if err := container.start(attachStdio); err != nil {
  240. clnt.deleteContainer(containerID)
  241. return err
  242. }
  243. logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
  244. return nil
  245. }
  246. // AddProcess is the handler for adding a process to an already running
  247. // container. It's called through docker exec. It returns the system pid of the
  248. // exec'd process.
  249. func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
  250. clnt.lock(containerID)
  251. defer clnt.unlock(containerID)
  252. container, err := clnt.getContainer(containerID)
  253. if err != nil {
  254. return -1, err
  255. }
  256. // Note we always tell HCS to
  257. // create stdout as it's required regardless of '-i' or '-t' options, so that
  258. // docker can always grab the output through logs. We also tell HCS to always
  259. // create stdin, even if it's not used - it will be closed shortly. Stderr
  260. // is only created if it we're not -t.
  261. createProcessParms := hcsshim.ProcessConfig{
  262. EmulateConsole: procToAdd.Terminal,
  263. CreateStdInPipe: true,
  264. CreateStdOutPipe: true,
  265. CreateStdErrPipe: !procToAdd.Terminal,
  266. }
  267. createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
  268. createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
  269. // Take working directory from the process to add if it is defined,
  270. // otherwise take from the first process.
  271. if procToAdd.Cwd != "" {
  272. createProcessParms.WorkingDirectory = procToAdd.Cwd
  273. } else {
  274. createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
  275. }
  276. // Configure the environment for the process
  277. createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
  278. createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
  279. logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
  280. // Start the command running in the container.
  281. var stdout, stderr io.ReadCloser
  282. var stdin io.WriteCloser
  283. newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
  284. if err != nil {
  285. logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
  286. return -1, err
  287. }
  288. pid := newProcess.Pid()
  289. stdin, stdout, stderr, err = newProcess.Stdio()
  290. if err != nil {
  291. logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
  292. return -1, err
  293. }
  294. iopipe := &IOPipe{Terminal: procToAdd.Terminal}
  295. iopipe.Stdin = createStdInCloser(stdin, newProcess)
  296. // Convert io.ReadClosers to io.Readers
  297. if stdout != nil {
  298. iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
  299. }
  300. if stderr != nil {
  301. iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
  302. }
  303. proc := &process{
  304. processCommon: processCommon{
  305. containerID: containerID,
  306. friendlyName: processFriendlyName,
  307. client: clnt,
  308. systemPid: uint32(pid),
  309. },
  310. commandLine: createProcessParms.CommandLine,
  311. hcsProcess: newProcess,
  312. }
  313. // Add the process to the container's list of processes
  314. container.processes[processFriendlyName] = proc
  315. // Tell the engine to attach streams back to the client
  316. if err := attachStdio(*iopipe); err != nil {
  317. return -1, err
  318. }
  319. // Spin up a go routine waiting for exit to handle cleanup
  320. go container.waitExit(proc, false)
  321. return pid, nil
  322. }
  323. // Signal handles `docker stop` on Windows. While Linux has support for
  324. // the full range of signals, signals aren't really implemented on Windows.
  325. // We fake supporting regular stop and -9 to force kill.
  326. func (clnt *client) Signal(containerID string, sig int) error {
  327. var (
  328. cont *container
  329. err error
  330. )
  331. // Get the container as we need it to get the container handle.
  332. clnt.lock(containerID)
  333. defer clnt.unlock(containerID)
  334. if cont, err = clnt.getContainer(containerID); err != nil {
  335. return err
  336. }
  337. cont.manualStopRequested = true
  338. logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
  339. if syscall.Signal(sig) == syscall.SIGKILL {
  340. // Terminate the compute system
  341. if err := cont.hcsContainer.Terminate(); err != nil {
  342. if !hcsshim.IsPending(err) {
  343. logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
  344. }
  345. }
  346. } else {
  347. // Terminate Process
  348. if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) {
  349. // ignore errors
  350. logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err)
  351. }
  352. }
  353. return nil
  354. }
  355. // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
  356. // We try to terminate the specified process whatever signal is requested.
  357. func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
  358. clnt.lock(containerID)
  359. defer clnt.unlock(containerID)
  360. cont, err := clnt.getContainer(containerID)
  361. if err != nil {
  362. return err
  363. }
  364. for _, p := range cont.processes {
  365. if p.friendlyName == processFriendlyName {
  366. return p.hcsProcess.Kill()
  367. }
  368. }
  369. return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
  370. }
  371. // Resize handles a CLI event to resize an interactive docker run or docker exec
  372. // window.
  373. func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
  374. // Get the libcontainerd container object
  375. clnt.lock(containerID)
  376. defer clnt.unlock(containerID)
  377. cont, err := clnt.getContainer(containerID)
  378. if err != nil {
  379. return err
  380. }
  381. h, w := uint16(height), uint16(width)
  382. if processFriendlyName == InitFriendlyName {
  383. logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
  384. return cont.process.hcsProcess.ResizeConsole(w, h)
  385. }
  386. for _, p := range cont.processes {
  387. if p.friendlyName == processFriendlyName {
  388. logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
  389. return p.hcsProcess.ResizeConsole(w, h)
  390. }
  391. }
  392. return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
  393. }
  394. // Pause handles pause requests for containers
  395. func (clnt *client) Pause(containerID string) error {
  396. unlockContainer := true
  397. // Get the libcontainerd container object
  398. clnt.lock(containerID)
  399. defer func() {
  400. if unlockContainer {
  401. clnt.unlock(containerID)
  402. }
  403. }()
  404. container, err := clnt.getContainer(containerID)
  405. if err != nil {
  406. return err
  407. }
  408. for _, option := range container.options {
  409. if h, ok := option.(*HyperVIsolationOption); ok {
  410. if !h.IsHyperV {
  411. return errors.New("cannot pause Windows Server Containers")
  412. }
  413. break
  414. }
  415. }
  416. err = container.hcsContainer.Pause()
  417. if err != nil {
  418. return err
  419. }
  420. // Unlock container before calling back into the daemon
  421. unlockContainer = false
  422. clnt.unlock(containerID)
  423. return clnt.backend.StateChanged(containerID, StateInfo{
  424. CommonStateInfo: CommonStateInfo{
  425. State: StatePause,
  426. }})
  427. }
  428. // Resume handles resume requests for containers
  429. func (clnt *client) Resume(containerID string) error {
  430. unlockContainer := true
  431. // Get the libcontainerd container object
  432. clnt.lock(containerID)
  433. defer func() {
  434. if unlockContainer {
  435. clnt.unlock(containerID)
  436. }
  437. }()
  438. container, err := clnt.getContainer(containerID)
  439. if err != nil {
  440. return err
  441. }
  442. // This should never happen, since Windows Server Containers cannot be paused
  443. for _, option := range container.options {
  444. if h, ok := option.(*HyperVIsolationOption); ok {
  445. if !h.IsHyperV {
  446. return errors.New("cannot resume Windows Server Containers")
  447. }
  448. break
  449. }
  450. }
  451. err = container.hcsContainer.Resume()
  452. if err != nil {
  453. return err
  454. }
  455. // Unlock container before calling back into the daemon
  456. unlockContainer = false
  457. clnt.unlock(containerID)
  458. return clnt.backend.StateChanged(containerID, StateInfo{
  459. CommonStateInfo: CommonStateInfo{
  460. State: StateResume,
  461. }})
  462. }
  463. // Stats handles stats requests for containers
  464. func (clnt *client) Stats(containerID string) (*Stats, error) {
  465. // Get the libcontainerd container object
  466. clnt.lock(containerID)
  467. defer clnt.unlock(containerID)
  468. container, err := clnt.getContainer(containerID)
  469. if err != nil {
  470. return nil, err
  471. }
  472. s, err := container.hcsContainer.Statistics()
  473. if err != nil {
  474. return nil, err
  475. }
  476. st := Stats(s)
  477. return &st, nil
  478. }
  479. // Restore is the handler for restoring a container
  480. func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
  481. // TODO Windows: Implement this. For now, just tell the backend the container exited.
  482. logrus.Debugf("libcontainerd: Restore(%s)", containerID)
  483. return clnt.backend.StateChanged(containerID, StateInfo{
  484. CommonStateInfo: CommonStateInfo{
  485. State: StateExit,
  486. ExitCode: 1 << 31,
  487. }})
  488. }
  489. // GetPidsForContainer returns a list of process IDs running in a container.
  490. // Although implemented, this is not used in Windows.
  491. func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
  492. var pids []int
  493. clnt.lock(containerID)
  494. defer clnt.unlock(containerID)
  495. cont, err := clnt.getContainer(containerID)
  496. if err != nil {
  497. return nil, err
  498. }
  499. // Add the first process
  500. pids = append(pids, int(cont.containerCommon.systemPid))
  501. // And add all the exec'd processes
  502. for _, p := range cont.processes {
  503. pids = append(pids, int(p.processCommon.systemPid))
  504. }
  505. return pids, nil
  506. }
  507. // Summary returns a summary of the processes running in a container.
  508. // This is present in Windows to support docker top. In linux, the
  509. // engine shells out to ps to get process information. On Windows, as
  510. // the containers could be Hyper-V containers, they would not be
  511. // visible on the container host. However, libcontainerd does have
  512. // that information.
  513. func (clnt *client) Summary(containerID string) ([]Summary, error) {
  514. // Get the libcontainerd container object
  515. clnt.lock(containerID)
  516. defer clnt.unlock(containerID)
  517. container, err := clnt.getContainer(containerID)
  518. if err != nil {
  519. return nil, err
  520. }
  521. p, err := container.hcsContainer.ProcessList()
  522. if err != nil {
  523. return nil, err
  524. }
  525. pl := make([]Summary, len(p))
  526. for i := range p {
  527. pl[i] = Summary(p[i])
  528. }
  529. return pl, nil
  530. }
  531. // UpdateResources updates resources for a running container.
  532. func (clnt *client) UpdateResources(containerID string, resources Resources) error {
  533. // Updating resource isn't supported on Windows
  534. // but we should return nil for enabling updating container
  535. return nil
  536. }
  537. func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
  538. return errors.New("Windows: Containers do not support checkpoints")
  539. }
  540. func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
  541. return errors.New("Windows: Containers do not support checkpoints")
  542. }
  543. func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
  544. return nil, errors.New("Windows: Containers do not support checkpoints")
  545. }