Merge pull request #429 from thaJeztah/19.03_backport_windows_1903_fixes
[19.03 backport] bump hcsshim to fix docker build failing on Windows 1903
This commit is contained in:
commit
6949793bb1
137 changed files with 4026 additions and 1593 deletions
|
@ -16,7 +16,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
containerd_cgroups "github.com/containerd/cgroups"
|
||||
statsV1 "github.com/containerd/cgroups/stats/v1"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
pblkiodev "github.com/docker/docker/api/types/blkiodev"
|
||||
|
@ -1349,7 +1349,7 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container
|
|||
return daemon.Unmount(container)
|
||||
}
|
||||
|
||||
func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry {
|
||||
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
|
||||
out := make([]types.BlkioStatEntry, len(entries))
|
||||
for i, re := range entries {
|
||||
out[i] = types.BlkioStatEntry{
|
||||
|
|
|
@ -94,7 +94,7 @@ func (d *Daemon) CheckActiveContainerCount(c *testing.T) (interface{}, string) {
|
|||
if len(strings.TrimSpace(out)) == 0 {
|
||||
return 0, ""
|
||||
}
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", string(out))
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
|
||||
}
|
||||
|
||||
// WaitRun waits for a container to be running for 10s
|
||||
|
|
|
@ -27,7 +27,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *testing.T) {
|
|||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat")
|
||||
|
||||
rwc, err := request.SockConn(time.Duration(10*time.Second), request.DaemonHost())
|
||||
rwc, err := request.SockConn(10*time.Second, request.DaemonHost())
|
||||
assert.NilError(c, err)
|
||||
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
|
@ -237,7 +237,7 @@ func sockRequestHijack(method, endpoint string, data io.Reader, ct string, daemo
|
|||
// Deprecated: Use New instead of NewRequestClient
|
||||
// Deprecated: use request.Do (or Get, Delete, Post) instead
|
||||
func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string, modifiers ...func(*http.Request)) (*http.Request, *httputil.ClientConn, error) {
|
||||
c, err := request.SockConn(time.Duration(10*time.Second), daemon)
|
||||
c, err := request.SockConn(10*time.Second, daemon)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
|
||||
}
|
||||
|
|
|
@ -394,7 +394,7 @@ func (s *DockerSuite) TestContainerAPIPause(c *testing.T) {
|
|||
func (s *DockerSuite) TestContainerAPITop(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
|
@ -417,7 +417,7 @@ func (s *DockerSuite) TestContainerAPITop(c *testing.T) {
|
|||
func (s *DockerSuite) TestContainerAPITopWindows(c *testing.T) {
|
||||
testRequires(c, DaemonIsWindows)
|
||||
out := runSleepingContainer(c, "-d")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
|
@ -614,7 +614,7 @@ func UtilCreateNetworkMode(c *testing.T, networkMode containertypes.NetworkMode)
|
|||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Equal(c, containerJSON.HostConfig.NetworkMode, containertypes.NetworkMode(networkMode), "Mismatched NetworkMode")
|
||||
assert.Equal(c, containerJSON.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T) {
|
||||
|
|
|
@ -84,7 +84,7 @@ func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *testing.T) {
|
|||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
inspectOut := cli.InspectCmd(c, id, cli.Format(".Id")).Combined()
|
||||
assert.Equal(c, strings.TrimSpace(string(inspectOut)), id, "load did not work properly")
|
||||
assert.Equal(c, strings.TrimSpace(inspectOut), id, "load did not work properly")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesDelete(c *testing.T) {
|
||||
|
|
|
@ -595,7 +595,7 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
|
|||
|
||||
func simpleTestService(s *swarm.Service) {
|
||||
ureplicas := uint64(1)
|
||||
restartDelay := time.Duration(100 * time.Millisecond)
|
||||
restartDelay := 100 * time.Millisecond
|
||||
|
||||
s.Spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
|
@ -618,7 +618,7 @@ func simpleTestService(s *swarm.Service) {
|
|||
|
||||
func serviceForUpdate(s *swarm.Service) {
|
||||
ureplicas := uint64(1)
|
||||
restartDelay := time.Duration(100 * time.Millisecond)
|
||||
restartDelay := 100 * time.Millisecond
|
||||
|
||||
s.Spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
|
|
|
@ -2082,7 +2082,7 @@ CMD ["cat", "/foo"]`),
|
|||
}).Assert(c, icmd.Success)
|
||||
|
||||
res := inspectField(c, name, "Config.Cmd")
|
||||
assert.Equal(c, strings.TrimSpace(string(res)), `[cat /foo]`)
|
||||
assert.Equal(c, strings.TrimSpace(res), `[cat /foo]`)
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) migrate to docker/cli tests (unit or e2e)
|
||||
|
|
|
@ -40,7 +40,7 @@ func (s *DockerSuite) TestCreateArgs(c *testing.T) {
|
|||
assert.Equal(c, len(containers), 1)
|
||||
|
||||
cont := containers[0]
|
||||
assert.Equal(c, string(cont.Path), "command", fmt.Sprintf("Unexpected container path. Expected command, received: %s", cont.Path))
|
||||
assert.Equal(c, cont.Path, "command", fmt.Sprintf("Unexpected container path. Expected command, received: %s", cont.Path))
|
||||
|
||||
b := false
|
||||
expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"}
|
||||
|
@ -333,7 +333,7 @@ func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *testing.T) {
|
|||
// #20972
|
||||
func (s *DockerSuite) TestCreate64ByteHexID(c *testing.T) {
|
||||
out := inspectField(c, "busybox", "Id")
|
||||
imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:")
|
||||
imageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:")
|
||||
|
||||
dockerCmd(c, "create", imageID)
|
||||
}
|
||||
|
|
|
@ -1854,11 +1854,11 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
|
|||
|
||||
out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup")
|
||||
assert.NilError(c, err)
|
||||
cgroupPaths := ParseCgroupPaths(string(out))
|
||||
assert.Assert(c, len(cgroupPaths) != 0, "unexpected output - %q", string(out))
|
||||
cgroupPaths := ParseCgroupPaths(out)
|
||||
assert.Assert(c, len(cgroupPaths) != 0, "unexpected output - %q", out)
|
||||
out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name)
|
||||
assert.NilError(c, err)
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
expectedCgroup := path.Join(cgroupParent, id)
|
||||
found := false
|
||||
for _, path := range cgroupPaths {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (s *DockerSuite) TestImagesFormat(c *testing.T) {
|
|||
dockerCmd(c, "tag", "busybox", tag+":v2")
|
||||
|
||||
out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag)
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
|
||||
expected := []string{"myimage", "myimage"}
|
||||
var names []string
|
||||
|
|
|
@ -161,7 +161,7 @@ func (s *DockerSuite) TestLinksUpdateOnRestart(c *testing.T) {
|
|||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top")
|
||||
out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress")
|
||||
content := readContainerFileWithExec(c, id, "/etc/hosts")
|
||||
|
|
|
@ -812,14 +812,14 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
|||
// verify first container's etc/hosts file has not changed after spawning the second named container
|
||||
hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, string(hosts), string(hostsPost), fmt.Sprintf("Unexpected %s change on second container creation", hostsFile))
|
||||
assert.Equal(c, hosts, hostsPost, fmt.Sprintf("Unexpected %s change on second container creation", hostsFile))
|
||||
// stop container 2 and verify first container's etc/hosts has not changed
|
||||
_, err = s.d.Cmd("stop", cid2)
|
||||
assert.NilError(c, err)
|
||||
|
||||
hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, string(hosts), string(hostsPost), fmt.Sprintf("Unexpected %s change on second container creation", hostsFile))
|
||||
assert.Equal(c, hosts, hostsPost, fmt.Sprintf("Unexpected %s change on second container creation", hostsFile))
|
||||
// but discovery is on when connecting to non default bridge network
|
||||
network := "anotherbridge"
|
||||
out, err = s.d.Cmd("network", "create", network)
|
||||
|
@ -834,7 +834,7 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
|||
|
||||
hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, string(hosts), string(hostsPost), fmt.Sprintf("Unexpected %s change on second network connection", hostsFile))
|
||||
assert.Equal(c, hosts, hostsPost, fmt.Sprintf("Unexpected %s change on second network connection", hostsFile))
|
||||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *testing.T) {
|
||||
|
@ -1683,7 +1683,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *testing.T)
|
|||
// Cleanup because these containers will not be shut down by daemon
|
||||
out, err = s.d.Cmd("stop", newCon)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v %v", err, string(out))
|
||||
t.Fatalf("err: %v %v", err, out)
|
||||
}
|
||||
_, err = s.d.Cmd("stop", strings.TrimSpace(id))
|
||||
if err != nil {
|
||||
|
|
|
@ -478,22 +478,22 @@ func (s *DockerSuite) TestPsRightTagName(c *testing.T) {
|
|||
|
||||
var id1 string
|
||||
out := runSleepingContainer(c)
|
||||
id1 = strings.TrimSpace(string(out))
|
||||
id1 = strings.TrimSpace(out)
|
||||
|
||||
var id2 string
|
||||
out = runSleepingContainerInImage(c, tag)
|
||||
id2 = strings.TrimSpace(string(out))
|
||||
id2 = strings.TrimSpace(out)
|
||||
|
||||
var imageID string
|
||||
out = inspectField(c, "busybox", "Id")
|
||||
imageID = strings.TrimSpace(string(out))
|
||||
imageID = strings.TrimSpace(out)
|
||||
|
||||
var id3 string
|
||||
out = runSleepingContainerInImage(c, imageID)
|
||||
id3 = strings.TrimSpace(string(out))
|
||||
id3 = strings.TrimSpace(out)
|
||||
|
||||
out, _ = dockerCmd(c, "ps", "--no-trunc")
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
// skip header
|
||||
lines = lines[1:]
|
||||
|
@ -562,7 +562,7 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *testing.T) {
|
|||
result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc")
|
||||
result.Assert(c, icmd.Success)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(result.Combined()), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
// skip header
|
||||
lines = lines[1:]
|
||||
|
@ -579,7 +579,7 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *testing.T) {
|
|||
result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc")
|
||||
result.Assert(c, icmd.Success)
|
||||
|
||||
lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(result.Combined()), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
// skip header
|
||||
lines = lines[1:]
|
||||
|
@ -597,7 +597,7 @@ func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *testing.T) {
|
|||
dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top")
|
||||
assert.Assert(c, waitRun("foo") == nil)
|
||||
out, _ := dockerCmd(c, "ps")
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
expected := "0.0.0.0:5000->5000/tcp"
|
||||
fields := strings.Fields(lines[1])
|
||||
assert.Equal(c, fields[len(fields)-2], expected, fmt.Sprintf("Expected: %v, got: %v", expected, fields[len(fields)-2]))
|
||||
|
@ -605,7 +605,7 @@ func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *testing.T) {
|
|||
dockerCmd(c, "kill", "foo")
|
||||
dockerCmd(c, "wait", "foo")
|
||||
out, _ = dockerCmd(c, "ps", "-l")
|
||||
lines = strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(out), "\n")
|
||||
fields = strings.Fields(lines[1])
|
||||
assert.Assert(c, fields[len(fields)-2] != expected, "Should not got %v", expected)
|
||||
}
|
||||
|
@ -638,7 +638,7 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
|
||||
out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}")
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
assert.Equal(c, len(lines), 3)
|
||||
|
||||
|
@ -658,7 +658,7 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
// filter by volume name
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test")
|
||||
|
||||
lines = strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
assert.Equal(c, len(lines), 1)
|
||||
|
||||
|
@ -667,12 +667,12 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
|
||||
// empty results filtering by unknown volume
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist")
|
||||
assert.Equal(c, len(strings.TrimSpace(string(out))), 0)
|
||||
assert.Equal(c, len(strings.TrimSpace(out)), 0)
|
||||
|
||||
// filter by mount destination
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp)
|
||||
|
||||
lines = strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
assert.Equal(c, len(lines), 2)
|
||||
|
||||
|
@ -684,7 +684,7 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
// filter by bind mount source
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource)
|
||||
|
||||
lines = strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
assert.Equal(c, len(lines), 1)
|
||||
|
||||
|
@ -696,7 +696,7 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
// filter by bind mount destination
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination)
|
||||
|
||||
lines = strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines = strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
assert.Equal(c, len(lines), 1)
|
||||
|
||||
|
@ -707,7 +707,7 @@ func (s *DockerSuite) TestPsShowMounts(c *testing.T) {
|
|||
|
||||
// empty results filtering by unknown mount point
|
||||
out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted")
|
||||
assert.Equal(c, len(strings.TrimSpace(string(out))), 0)
|
||||
assert.Equal(c, len(strings.TrimSpace(out)), 0)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
|
||||
|
@ -723,7 +723,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
|
|||
|
||||
// Filter docker ps on non existing network
|
||||
out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist")
|
||||
containerOut := strings.TrimSpace(string(out))
|
||||
containerOut := strings.TrimSpace(out)
|
||||
lines := strings.Split(containerOut, "\n")
|
||||
|
||||
// skip header
|
||||
|
@ -734,7 +734,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
|
|||
|
||||
// Filter docker ps on network bridge
|
||||
out, _ = dockerCmd(c, "ps", "--filter", "network=bridge")
|
||||
containerOut = strings.TrimSpace(string(out))
|
||||
containerOut = strings.TrimSpace(out)
|
||||
|
||||
lines = strings.Split(containerOut, "\n")
|
||||
|
||||
|
@ -748,7 +748,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
|
|||
assert.Assert(c, strings.Contains(containerOut, "onbridgenetwork"), "Missing the container on network\n")
|
||||
// Filter docker ps on networks bridge and none
|
||||
out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none")
|
||||
containerOut = strings.TrimSpace(string(out))
|
||||
containerOut = strings.TrimSpace(out)
|
||||
|
||||
lines = strings.Split(containerOut, "\n")
|
||||
|
||||
|
@ -765,15 +765,15 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
|
|||
|
||||
// Filter by network ID
|
||||
out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID)
|
||||
containerOut = strings.TrimSpace(string(out))
|
||||
containerOut = strings.TrimSpace(out)
|
||||
|
||||
assert.Assert(c, is.Contains(containerOut, "onbridgenetwork"))
|
||||
|
||||
// Filter by partial network ID
|
||||
partialnwID := string(nwID[0:4])
|
||||
partialnwID := nwID[0:4]
|
||||
|
||||
out, _ = dockerCmd(c, "ps", "--filter", "network="+partialnwID)
|
||||
containerOut = strings.TrimSpace(string(out))
|
||||
containerOut = strings.TrimSpace(out)
|
||||
|
||||
lines = strings.Split(containerOut, "\n")
|
||||
|
||||
|
@ -849,7 +849,7 @@ func (s *DockerSuite) TestPsNotShowLinknamesOfDeletedContainer(c *testing.T) {
|
|||
dockerCmd(c, "create", "--name=bbb", "--link=aaa", "busybox", "top")
|
||||
|
||||
out, _ := dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}")
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
lines = RemoveLinesForExistingElements(lines, existingContainers)
|
||||
expected := []string{"bbb", "aaa,bbb/aaa"}
|
||||
var names []string
|
||||
|
|
|
@ -333,7 +333,7 @@ func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) {
|
|||
err = os.MkdirAll(blobDir, 0755)
|
||||
assert.NilError(c, err, "error creating blob dir")
|
||||
blobPath := filepath.Join(blobDir, "data")
|
||||
err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644)
|
||||
err = ioutil.WriteFile(blobPath, manifestListJSON, 0644)
|
||||
assert.NilError(c, err, "error writing manifest list")
|
||||
|
||||
// Add to revision store
|
||||
|
|
|
@ -18,13 +18,13 @@ func unescapeBackslashSemicolonParens(s string) string {
|
|||
ret := re.ReplaceAll([]byte(s), []byte(";"))
|
||||
|
||||
re = regexp.MustCompile(`\\\(`)
|
||||
ret = re.ReplaceAll([]byte(ret), []byte("("))
|
||||
ret = re.ReplaceAll(ret, []byte("("))
|
||||
|
||||
re = regexp.MustCompile(`\\\)`)
|
||||
ret = re.ReplaceAll([]byte(ret), []byte(")"))
|
||||
ret = re.ReplaceAll(ret, []byte(")"))
|
||||
|
||||
re = regexp.MustCompile(`\\\\`)
|
||||
ret = re.ReplaceAll([]byte(ret), []byte(`\`))
|
||||
ret = re.ReplaceAll(ret, []byte(`\`))
|
||||
|
||||
return string(ret)
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func (s *DockerSuite) TestRestartDisconnectedContainer(c *testing.T) {
|
|||
func (s *DockerSuite) TestRestartPolicyNO(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "create", "--restart=no", "busybox")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
name := inspectField(c, id, "HostConfig.RestartPolicy.Name")
|
||||
assert.Equal(c, name, "no")
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (s *DockerSuite) TestRestartPolicyNO(c *testing.T) {
|
|||
func (s *DockerSuite) TestRestartPolicyAlways(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "create", "--restart=always", "busybox")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
name := inspectField(c, id, "HostConfig.RestartPolicy.Name")
|
||||
assert.Equal(c, name, "always")
|
||||
|
||||
|
@ -123,7 +123,7 @@ func (s *DockerSuite) TestRestartPolicyOnFailure(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
name := inspectField(c, id, "HostConfig.RestartPolicy.Name")
|
||||
maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount")
|
||||
|
||||
|
@ -132,7 +132,7 @@ func (s *DockerSuite) TestRestartPolicyOnFailure(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox")
|
||||
|
||||
id = strings.TrimSpace(string(out))
|
||||
id = strings.TrimSpace(out)
|
||||
name = inspectField(c, id, "HostConfig.RestartPolicy.Name")
|
||||
maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount")
|
||||
|
||||
|
@ -141,7 +141,7 @@ func (s *DockerSuite) TestRestartPolicyOnFailure(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox")
|
||||
|
||||
id = strings.TrimSpace(string(out))
|
||||
id = strings.TrimSpace(out)
|
||||
name = inspectField(c, id, "HostConfig.RestartPolicy.Name")
|
||||
maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount")
|
||||
|
||||
|
@ -154,7 +154,7 @@ func (s *DockerSuite) TestRestartPolicyOnFailure(c *testing.T) {
|
|||
func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second)
|
||||
assert.NilError(c, err)
|
||||
|
||||
|
@ -281,8 +281,8 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *testing.T) {
|
|||
out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false")
|
||||
out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false")
|
||||
|
||||
id1 := strings.TrimSpace(string(out1))
|
||||
id2 := strings.TrimSpace(string(out2))
|
||||
id1 := strings.TrimSpace(out1)
|
||||
id2 := strings.TrimSpace(out2)
|
||||
waitTimeout := 15 * time.Second
|
||||
if testEnv.OSType == "windows" {
|
||||
waitTimeout = 150 * time.Second
|
||||
|
@ -311,7 +311,7 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *testing.T) {
|
|||
func (s *DockerSuite) TestRestartAutoRemoveContainer(c *testing.T) {
|
||||
out := runSleepingContainer(c, "--rm")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
dockerCmd(c, "restart", id)
|
||||
err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second)
|
||||
assert.NilError(c, err)
|
||||
|
|
|
@ -1332,8 +1332,8 @@ func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *testing.T) {
|
|||
var out string
|
||||
out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf")
|
||||
|
||||
if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" {
|
||||
c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0]))
|
||||
if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); actualNameservers[0] != "127.0.0.1" {
|
||||
c.Fatalf("expected '127.0.0.1', but says: %q", actualNameservers[0])
|
||||
}
|
||||
|
||||
actualSearch := resolvconf.GetSearchDomains([]byte(out))
|
||||
|
@ -1358,8 +1358,8 @@ func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" {
|
||||
c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0]))
|
||||
if actualSearch = resolvconf.GetSearchDomains([]byte(out)); actualSearch[0] != "mydomain" {
|
||||
c.Fatalf("expected 'mydomain', but says: %q", actualSearch[0])
|
||||
}
|
||||
|
||||
// test with file
|
||||
|
@ -1382,7 +1382,7 @@ func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *testing.T) {
|
|||
hostSearch = resolvconf.GetSearchDomains(resolvConf)
|
||||
|
||||
out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf")
|
||||
if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 {
|
||||
if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); actualNameservers[0] != "12.34.56.78" || len(actualNameservers) != 1 {
|
||||
c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers)
|
||||
}
|
||||
|
||||
|
@ -1458,8 +1458,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
containerID1 := getIDByName(c, "first")
|
||||
|
||||
// replace resolv.conf with our temporary copy
|
||||
bytesResolvConf := []byte(tmpResolvConf)
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1468,7 +1467,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
|
||||
// check for update in container
|
||||
containerResolv := readContainerFile(c, containerID1, "resolv.conf")
|
||||
if !bytes.Equal(containerResolv, bytesResolvConf) {
|
||||
if !bytes.Equal(containerResolv, tmpResolvConf) {
|
||||
c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
|
||||
}
|
||||
|
||||
|
@ -1500,13 +1499,13 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
runningContainerID := strings.TrimSpace(out)
|
||||
|
||||
// replace resolv.conf
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// check for update in container
|
||||
containerResolv = readContainerFile(c, runningContainerID, "resolv.conf")
|
||||
if bytes.Equal(containerResolv, bytesResolvConf) {
|
||||
if bytes.Equal(containerResolv, tmpResolvConf) {
|
||||
c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
|
||||
}
|
||||
|
||||
|
@ -1516,16 +1515,15 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
|
||||
// check for update in container
|
||||
containerResolv = readContainerFile(c, runningContainerID, "resolv.conf")
|
||||
if !bytes.Equal(containerResolv, bytesResolvConf) {
|
||||
c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv))
|
||||
if !bytes.Equal(containerResolv, tmpResolvConf) {
|
||||
c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(tmpResolvConf), string(containerResolv))
|
||||
}
|
||||
|
||||
//5. test that additions of a localhost resolver are cleaned from
|
||||
// host resolv.conf before updating container's resolv.conf copies
|
||||
|
||||
// replace resolv.conf with a localhost-only nameserver copy
|
||||
bytesResolvConf = []byte(tmpLocalhostResolvConf)
|
||||
if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
|
||||
if err = ioutil.WriteFile("/etc/resolv.conf", tmpLocalhostResolvConf, 0644); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1553,8 +1551,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
containerID3 := getIDByName(c, "third")
|
||||
|
||||
// Create a modified resolv.conf.aside and override resolv.conf with it
|
||||
bytesResolvConf = []byte(tmpResolvConf)
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil {
|
||||
if err := ioutil.WriteFile("/etc/resolv.conf.aside", tmpResolvConf, 0644); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1568,7 +1565,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
|
|||
|
||||
// check for update in container
|
||||
containerResolv = readContainerFile(c, containerID3, "resolv.conf")
|
||||
if !bytes.Equal(containerResolv, bytesResolvConf) {
|
||||
if !bytes.Equal(containerResolv, tmpResolvConf) {
|
||||
c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
|
||||
}
|
||||
|
||||
|
@ -2661,7 +2658,7 @@ func (s *DockerSuite) TestRunRestartMaxRetries(c *testing.T) {
|
|||
timeout = 120 * time.Second
|
||||
}
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
@ -2704,7 +2701,7 @@ func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *testing.T) {
|
|||
c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.")
|
||||
}
|
||||
expected := "type devpts (rw,"
|
||||
if !strings.Contains(string(out), expected) {
|
||||
if !strings.Contains(out, expected) {
|
||||
c.Fatalf("expected output to contain %s but contains %s", expected, out)
|
||||
}
|
||||
}
|
||||
|
@ -2739,7 +2736,7 @@ func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *
|
|||
dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top")
|
||||
|
||||
out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts")
|
||||
if !strings.Contains(string(out), "testlinked") {
|
||||
if !strings.Contains(out, "testlinked") {
|
||||
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled")
|
||||
}
|
||||
}
|
||||
|
@ -2749,7 +2746,7 @@ func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *testing.T
|
|||
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
|
||||
|
||||
out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf")
|
||||
if !strings.Contains(string(out), "1.1.1.1") {
|
||||
if !strings.Contains(out, "1.1.1.1") {
|
||||
c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used")
|
||||
}
|
||||
}
|
||||
|
@ -2759,7 +2756,7 @@ func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *testi
|
|||
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
|
||||
|
||||
out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts")
|
||||
if !strings.Contains(string(out), "testreadonly") {
|
||||
if !strings.Contains(out, "testreadonly") {
|
||||
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used")
|
||||
}
|
||||
}
|
||||
|
@ -3249,11 +3246,11 @@ func (s *DockerSuite) TestRunContainerWithCgroupParent(c *testing.T) {
|
|||
func testRunContainerWithCgroupParent(c *testing.T, cgroupParent, name string) {
|
||||
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
|
||||
if err != nil {
|
||||
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
|
||||
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", out, err)
|
||||
}
|
||||
cgroupPaths := ParseCgroupPaths(string(out))
|
||||
cgroupPaths := ParseCgroupPaths(out)
|
||||
if len(cgroupPaths) == 0 {
|
||||
c.Fatalf("unexpected output - %q", string(out))
|
||||
c.Fatalf("unexpected output - %q", out)
|
||||
}
|
||||
id := getIDByName(c, name)
|
||||
expectedCgroup := path.Join(cgroupParent, id)
|
||||
|
@ -3283,7 +3280,7 @@ func testRunInvalidCgroupParent(c *testing.T, cgroupParent, cleanCgroupParent, n
|
|||
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
|
||||
if err != nil {
|
||||
// XXX: This may include a daemon crash.
|
||||
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
|
||||
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", out, err)
|
||||
}
|
||||
|
||||
// We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue.
|
||||
|
@ -3291,9 +3288,9 @@ func testRunInvalidCgroupParent(c *testing.T, cgroupParent, cleanCgroupParent, n
|
|||
c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!")
|
||||
}
|
||||
|
||||
cgroupPaths := ParseCgroupPaths(string(out))
|
||||
cgroupPaths := ParseCgroupPaths(out)
|
||||
if len(cgroupPaths) == 0 {
|
||||
c.Fatalf("unexpected output - %q", string(out))
|
||||
c.Fatalf("unexpected output - %q", out)
|
||||
}
|
||||
id := getIDByName(c, name)
|
||||
expectedCgroup := path.Join(cleanCgroupParent, id)
|
||||
|
@ -3947,11 +3944,12 @@ func (s *DockerSuite) TestRunAttachFailedNoLeak(c *testing.T) {
|
|||
assert.Assert(c, err != nil, "Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)
|
||||
// check for windows error as well
|
||||
// TODO Windows Post TP5. Fix the error message string
|
||||
assert.Assert(c, strings.Contains(string(out), "port is already allocated") ||
|
||||
strings.Contains(string(out), "were not connected because a duplicate name exists") ||
|
||||
strings.Contains(string(out), "The specified port already exists") ||
|
||||
strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") ||
|
||||
strings.Contains(string(out), "HNS failed with error : The object already exists"), fmt.Sprintf("Output: %s", out))
|
||||
outLowerCase := strings.ToLower(out)
|
||||
assert.Assert(c, strings.Contains(outLowerCase, "port is already allocated") ||
|
||||
strings.Contains(outLowerCase, "were not connected because a duplicate name exists") ||
|
||||
strings.Contains(outLowerCase, "the specified port already exists") ||
|
||||
strings.Contains(outLowerCase, "hns failed with error : failed to create endpoint") ||
|
||||
strings.Contains(outLowerCase, "hns failed with error : the object already exists"), fmt.Sprintf("Output: %s", out))
|
||||
dockerCmd(c, "rm", "-f", "test")
|
||||
|
||||
// NGoroutines is not updated right away, so we need to wait before failing
|
||||
|
|
|
@ -65,7 +65,7 @@ func countLogLines(d *daemon.Daemon, name string) func(*testing.T) (interface{},
|
|||
return 0, "Empty stdout"
|
||||
}
|
||||
lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n")
|
||||
return len(lines), fmt.Sprintf("output, %q", string(result.Stdout()))
|
||||
return len(lines), fmt.Sprintf("output, %q", result.Stdout())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -900,15 +900,15 @@ func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *testing.T) {
|
|||
|
||||
result := icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "foo"))
|
||||
result.Assert(c, icmd.Success)
|
||||
fooNetwork := strings.TrimSpace(string(result.Combined()))
|
||||
fooNetwork := strings.TrimSpace(result.Combined())
|
||||
|
||||
result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "bar"))
|
||||
result.Assert(c, icmd.Success)
|
||||
barNetwork := strings.TrimSpace(string(result.Combined()))
|
||||
barNetwork := strings.TrimSpace(result.Combined())
|
||||
|
||||
result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "baz"))
|
||||
result.Assert(c, icmd.Success)
|
||||
bazNetwork := strings.TrimSpace(string(result.Combined()))
|
||||
bazNetwork := strings.TrimSpace(result.Combined())
|
||||
|
||||
// Create a service
|
||||
name := "top"
|
||||
|
|
|
@ -50,7 +50,7 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
|
|||
}
|
||||
|
||||
assert.NilError(c, json.NewDecoder(strings.NewReader(out)).Decode(&mounts))
|
||||
assert.Equal(c, len(mounts), 1, string(out))
|
||||
assert.Equal(c, len(mounts), 1, out)
|
||||
assert.Equal(c, mounts[0].Name, "my-volume")
|
||||
assert.Equal(c, mounts[0].Driver, "customvolumedriver")
|
||||
}
|
||||
|
|
|
@ -271,7 +271,7 @@ func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *testing.T) {
|
|||
testRequires(c, DaemonIsLinux, cpuShare)
|
||||
|
||||
out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
dockerCmd(c, "update", "--cpu-shares", "512", id)
|
||||
|
||||
cpty, tty, err := pty.Open()
|
||||
|
|
|
@ -106,7 +106,7 @@ func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *testing.T) {
|
|||
}
|
||||
|
||||
func assertVolumesInList(c *testing.T, out string, expected []string) {
|
||||
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
for _, expect := range expected {
|
||||
found := false
|
||||
for _, v := range lines {
|
||||
|
|
|
@ -82,8 +82,8 @@ func UnixCli() bool {
|
|||
|
||||
func Network() bool {
|
||||
// Set a timeout on the GET at 15s
|
||||
var timeout = time.Duration(15 * time.Second)
|
||||
var url = "https://hub.docker.com"
|
||||
const timeout = 15 * time.Second
|
||||
const url = "https://hub.docker.com"
|
||||
|
||||
client := http.Client{
|
||||
Timeout: timeout,
|
||||
|
|
|
@ -3,8 +3,8 @@ package types // import "github.com/docker/docker/libcontainerd/types"
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
statsV1 "github.com/containerd/cgroups/stats/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// Summary is not used on linux
|
||||
|
@ -13,13 +13,13 @@ type Summary struct{}
|
|||
// Stats holds metrics properties as returned by containerd
|
||||
type Stats struct {
|
||||
Read time.Time
|
||||
Metrics *cgroups.Metrics
|
||||
Metrics *statsV1.Metrics
|
||||
}
|
||||
|
||||
// InterfaceToStats returns a stats object from the platform-specific interface.
|
||||
func InterfaceToStats(read time.Time, v interface{}) *Stats {
|
||||
return &Stats{
|
||||
Metrics: v.(*cgroups.Metrics),
|
||||
Metrics: v.(*statsV1.Metrics),
|
||||
Read: read,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/Microsoft/hcsshim 672e52e9209d1e53718c1b6a7d68cc9272654ab5
|
||||
github.com/Microsoft/hcsshim b3f49c06ffaeef24d09c6c08ec8ec8425a0303e2
|
||||
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a
|
||||
|
@ -120,7 +120,7 @@ google.golang.org/genproto 694d95ba50e67b2e363f3483057d
|
|||
github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870
|
||||
github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
|
||||
github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7
|
||||
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
|
||||
github.com/containerd/cgroups 5fbad35c2a7e855762d3c60f2e474ffcad0d470a
|
||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||
github.com/containerd/go-runc e029b79d8cda8374981c64eba71f28ec38e5526f
|
||||
github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
|
||||
|
|
336
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
336
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
|
@ -1,33 +1,19 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
|
||||
|
||||
/*
|
||||
Package options is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Options
|
||||
ProcessDetails
|
||||
*/
|
||||
package options
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
_ "github.com/gogo/protobuf/types"
|
||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
io "io"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
@ -54,6 +40,7 @@ var Options_DebugType_name = map[int32]string{
|
|||
1: "FILE",
|
||||
2: "ETW",
|
||||
}
|
||||
|
||||
var Options_DebugType_value = map[string]int32{
|
||||
"NPIPE": 0,
|
||||
"FILE": 1,
|
||||
|
@ -63,7 +50,10 @@ var Options_DebugType_value = map[string]int32{
|
|||
func (x Options_DebugType) String() string {
|
||||
return proto.EnumName(Options_DebugType_name, int32(x))
|
||||
}
|
||||
func (Options_DebugType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRunhcs, []int{0, 0} }
|
||||
|
||||
func (Options_DebugType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b643df6839c75082, []int{0, 0}
|
||||
}
|
||||
|
||||
type Options_SandboxIsolation int32
|
||||
|
||||
|
@ -76,6 +66,7 @@ var Options_SandboxIsolation_name = map[int32]string{
|
|||
0: "PROCESS",
|
||||
1: "HYPERVISOR",
|
||||
}
|
||||
|
||||
var Options_SandboxIsolation_value = map[string]int32{
|
||||
"PROCESS": 0,
|
||||
"HYPERVISOR": 1,
|
||||
|
@ -84,8 +75,9 @@ var Options_SandboxIsolation_value = map[string]int32{
|
|||
func (x Options_SandboxIsolation) String() string {
|
||||
return proto.EnumName(Options_SandboxIsolation_name, int32(x))
|
||||
}
|
||||
|
||||
func (Options_SandboxIsolation) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptorRunhcs, []int{0, 1}
|
||||
return fileDescriptor_b643df6839c75082, []int{0, 1}
|
||||
}
|
||||
|
||||
// Options are the set of customizations that can be passed at Create time.
|
||||
|
@ -109,18 +101,49 @@ type Options struct {
|
|||
SandboxIsolation Options_SandboxIsolation `protobuf:"varint,6,opt,name=sandbox_isolation,json=sandboxIsolation,proto3,enum=containerd.runhcs.v1.Options_SandboxIsolation" json:"sandbox_isolation,omitempty"`
|
||||
// boot_files_root_path is the path to the directory containing the LCOW
|
||||
// kernel and root FS files.
|
||||
BootFilesRootPath string `protobuf:"bytes,7,opt,name=boot_files_root_path,json=bootFilesRootPath,proto3" json:"boot_files_root_path,omitempty"`
|
||||
BootFilesRootPath string `protobuf:"bytes,7,opt,name=boot_files_root_path,json=bootFilesRootPath,proto3" json:"boot_files_root_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Options) Reset() { *m = Options{} }
|
||||
func (*Options) ProtoMessage() {}
|
||||
func (*Options) Descriptor() ([]byte, []int) { return fileDescriptorRunhcs, []int{0} }
|
||||
func (m *Options) Reset() { *m = Options{} }
|
||||
func (*Options) ProtoMessage() {}
|
||||
func (*Options) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b643df6839c75082, []int{0}
|
||||
}
|
||||
func (m *Options) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Options.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Options) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Options.Merge(m, src)
|
||||
}
|
||||
func (m *Options) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Options) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Options.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Options proto.InternalMessageInfo
|
||||
|
||||
// ProcessDetails contains additional information about a process. This is the additional
|
||||
// info returned in the Pids query.
|
||||
type ProcessDetails struct {
|
||||
ImageName string `protobuf:"bytes,1,opt,name=image_name,json=imageName,proto3" json:"image_name,omitempty"`
|
||||
CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
||||
CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
|
||||
KernelTime_100Ns uint64 `protobuf:"varint,3,opt,name=kernel_time_100_ns,json=kernelTime100Ns,proto3" json:"kernel_time_100_ns,omitempty"`
|
||||
MemoryCommitBytes uint64 `protobuf:"varint,4,opt,name=memory_commit_bytes,json=memoryCommitBytes,proto3" json:"memory_commit_bytes,omitempty"`
|
||||
MemoryWorkingSetPrivateBytes uint64 `protobuf:"varint,5,opt,name=memory_working_set_private_bytes,json=memoryWorkingSetPrivateBytes,proto3" json:"memory_working_set_private_bytes,omitempty"`
|
||||
|
@ -128,18 +151,102 @@ type ProcessDetails struct {
|
|||
ProcessID uint32 `protobuf:"varint,7,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"`
|
||||
UserTime_100Ns uint64 `protobuf:"varint,8,opt,name=user_time_100_ns,json=userTime100Ns,proto3" json:"user_time_100_ns,omitempty"`
|
||||
ExecID string `protobuf:"bytes,9,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorRunhcs, []int{1} }
|
||||
func (m *ProcessDetails) Reset() { *m = ProcessDetails{} }
|
||||
func (*ProcessDetails) ProtoMessage() {}
|
||||
func (*ProcessDetails) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b643df6839c75082, []int{1}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalTo(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessDetails.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessDetails) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ProcessDetails) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessDetails.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options")
|
||||
proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails")
|
||||
proto.RegisterEnum("containerd.runhcs.v1.Options_DebugType", Options_DebugType_name, Options_DebugType_value)
|
||||
proto.RegisterEnum("containerd.runhcs.v1.Options_SandboxIsolation", Options_SandboxIsolation_name, Options_SandboxIsolation_value)
|
||||
proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options")
|
||||
proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto", fileDescriptor_b643df6839c75082)
|
||||
}
|
||||
|
||||
var fileDescriptor_b643df6839c75082 = []byte{
|
||||
// 704 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xda, 0x48,
|
||||
0x18, 0xc6, 0xe1, 0xd3, 0x6f, 0x96, 0xc4, 0x99, 0xe5, 0x80, 0xb2, 0xbb, 0x80, 0xc8, 0x21, 0x89,
|
||||
0x76, 0x63, 0x43, 0xf6, 0xd8, 0x53, 0x09, 0xa0, 0xba, 0x6a, 0x83, 0x65, 0xa2, 0xa6, 0x1f, 0x07,
|
||||
0xcb, 0xd8, 0x83, 0xb1, 0x82, 0x3d, 0xd6, 0xcc, 0x90, 0x86, 0x5b, 0x7f, 0x42, 0x7f, 0x55, 0x95,
|
||||
0x63, 0x8f, 0x95, 0x2a, 0xa5, 0x0d, 0xbf, 0xa4, 0x9a, 0xb1, 0x49, 0xd4, 0x28, 0xea, 0xa5, 0x27,
|
||||
0xc6, 0xcf, 0xf3, 0xbc, 0xcf, 0xfb, 0x29, 0x60, 0x14, 0x84, 0x7c, 0xb6, 0x98, 0xe8, 0x1e, 0x89,
|
||||
0x8c, 0x97, 0xa1, 0x47, 0x09, 0x23, 0x53, 0x6e, 0xcc, 0x3c, 0xc6, 0x66, 0x61, 0x64, 0x78, 0x91,
|
||||
0x6f, 0x78, 0x24, 0xe6, 0x6e, 0x18, 0x63, 0xea, 0x1f, 0x09, 0xec, 0x88, 0x2e, 0xe2, 0x99, 0xc7,
|
||||
0x8e, 0x2e, 0xbb, 0x06, 0x49, 0x78, 0x48, 0x62, 0x66, 0xa4, 0x88, 0x9e, 0x50, 0xc2, 0x09, 0xaa,
|
||||
0xdd, 0xeb, 0xf5, 0x8c, 0xb8, 0xec, 0xee, 0xd6, 0x02, 0x12, 0x10, 0x29, 0x30, 0xc4, 0x2b, 0xd5,
|
||||
0xee, 0x36, 0x03, 0x42, 0x82, 0x39, 0x36, 0xe4, 0xd7, 0x64, 0x31, 0x35, 0x78, 0x18, 0x61, 0xc6,
|
||||
0xdd, 0x28, 0x49, 0x05, 0xed, 0x4f, 0x79, 0x28, 0x8f, 0xd2, 0x2c, 0xa8, 0x06, 0x45, 0x1f, 0x4f,
|
||||
0x16, 0x41, 0x5d, 0x69, 0x29, 0x07, 0x15, 0x3b, 0xfd, 0x40, 0x43, 0x00, 0xf9, 0x70, 0xf8, 0x32,
|
||||
0xc1, 0xf5, 0x8d, 0x96, 0x72, 0xb0, 0x75, 0xbc, 0xaf, 0x3f, 0x56, 0x83, 0x9e, 0x19, 0xe9, 0x7d,
|
||||
0xa1, 0x3f, 0x5b, 0x26, 0xd8, 0x56, 0xfd, 0xf5, 0x13, 0xed, 0x41, 0x95, 0xe2, 0x20, 0x64, 0x9c,
|
||||
0x2e, 0x1d, 0x4a, 0x08, 0xaf, 0xe7, 0x5b, 0xca, 0x81, 0x6a, 0xff, 0xb1, 0x06, 0x6d, 0x42, 0xb8,
|
||||
0x10, 0x31, 0x37, 0xf6, 0x27, 0xe4, 0xca, 0x09, 0x23, 0x37, 0xc0, 0xf5, 0x42, 0x2a, 0xca, 0x40,
|
||||
0x53, 0x60, 0xe8, 0x10, 0xb4, 0xb5, 0x28, 0x99, 0xbb, 0x7c, 0x4a, 0x68, 0x54, 0x2f, 0x4a, 0xdd,
|
||||
0x76, 0x86, 0x5b, 0x19, 0x8c, 0xde, 0xc1, 0xce, 0x9d, 0x1f, 0x23, 0x73, 0x57, 0xd4, 0x57, 0x2f,
|
||||
0xc9, 0x1e, 0xf4, 0x5f, 0xf7, 0x30, 0xce, 0x32, 0xae, 0xa3, 0xec, 0x75, 0xce, 0x3b, 0x04, 0x19,
|
||||
0x50, 0x9b, 0x10, 0xc2, 0x9d, 0x69, 0x38, 0xc7, 0x4c, 0xf6, 0xe4, 0x24, 0x2e, 0x9f, 0xd5, 0xcb,
|
||||
0xb2, 0x96, 0x1d, 0xc1, 0x0d, 0x05, 0x25, 0x3a, 0xb3, 0x5c, 0x3e, 0x6b, 0x1f, 0x82, 0x7a, 0x37,
|
||||
0x1a, 0xa4, 0x42, 0xf1, 0xd4, 0x32, 0xad, 0x81, 0x96, 0x43, 0x15, 0x28, 0x0c, 0xcd, 0x17, 0x03,
|
||||
0x4d, 0x41, 0x65, 0xc8, 0x0f, 0xce, 0xce, 0xb5, 0x8d, 0xb6, 0x01, 0xda, 0xc3, 0x0a, 0xd0, 0x26,
|
||||
0x94, 0x2d, 0x7b, 0x74, 0x32, 0x18, 0x8f, 0xb5, 0x1c, 0xda, 0x02, 0x78, 0xf6, 0xc6, 0x1a, 0xd8,
|
||||
0xaf, 0xcc, 0xf1, 0xc8, 0xd6, 0x94, 0xf6, 0xd7, 0x3c, 0x6c, 0x59, 0x94, 0x78, 0x98, 0xb1, 0x3e,
|
||||
0xe6, 0x6e, 0x38, 0x67, 0xe8, 0x1f, 0x00, 0x39, 0x44, 0x27, 0x76, 0x23, 0x2c, 0x97, 0xaa, 0xda,
|
||||
0xaa, 0x44, 0x4e, 0xdd, 0x08, 0xa3, 0x13, 0x00, 0x8f, 0x62, 0x97, 0x63, 0xdf, 0x71, 0xb9, 0x5c,
|
||||
0xec, 0xe6, 0xf1, 0xae, 0x9e, 0x1e, 0x8c, 0xbe, 0x3e, 0x18, 0xfd, 0x6c, 0x7d, 0x30, 0xbd, 0xca,
|
||||
0xf5, 0x4d, 0x33, 0xf7, 0xf1, 0x5b, 0x53, 0xb1, 0xd5, 0x2c, 0xee, 0x29, 0x47, 0xff, 0x02, 0xba,
|
||||
0xc0, 0x34, 0xc6, 0x73, 0x47, 0x5c, 0x96, 0xd3, 0xed, 0x74, 0x9c, 0x98, 0xc9, 0xd5, 0x16, 0xec,
|
||||
0xed, 0x94, 0x11, 0x0e, 0xdd, 0x4e, 0xe7, 0x94, 0x21, 0x1d, 0xfe, 0x8c, 0x70, 0x44, 0xe8, 0xd2,
|
||||
0xf1, 0x48, 0x14, 0x85, 0xdc, 0x99, 0x2c, 0x39, 0x66, 0x72, 0xc7, 0x05, 0x7b, 0x27, 0xa5, 0x4e,
|
||||
0x24, 0xd3, 0x13, 0x04, 0x1a, 0x42, 0x2b, 0xd3, 0xbf, 0x27, 0xf4, 0x22, 0x8c, 0x03, 0x87, 0x61,
|
||||
0xee, 0x24, 0x34, 0xbc, 0x74, 0x39, 0xce, 0x82, 0x8b, 0x32, 0xf8, 0xef, 0x54, 0x77, 0x9e, 0xca,
|
||||
0xc6, 0x98, 0x5b, 0xa9, 0x28, 0xf5, 0xe9, 0x43, 0xf3, 0x11, 0x1f, 0x36, 0x73, 0x29, 0xf6, 0x33,
|
||||
0x9b, 0x92, 0xb4, 0xf9, 0xeb, 0xa1, 0xcd, 0x58, 0x6a, 0x52, 0x97, 0xff, 0x00, 0x92, 0x74, 0xc0,
|
||||
0x4e, 0xe8, 0xcb, 0x25, 0x57, 0x7b, 0xd5, 0xd5, 0x4d, 0x53, 0xcd, 0xc6, 0x6e, 0xf6, 0x6d, 0x35,
|
||||
0x13, 0x98, 0x3e, 0xda, 0x07, 0x6d, 0xc1, 0x30, 0xfd, 0x69, 0x2c, 0x15, 0x99, 0xa4, 0x2a, 0xf0,
|
||||
0xfb, 0xa1, 0xec, 0x41, 0x19, 0x5f, 0x61, 0x4f, 0x78, 0xaa, 0x62, 0x45, 0x3d, 0x58, 0xdd, 0x34,
|
||||
0x4b, 0x83, 0x2b, 0xec, 0x99, 0x7d, 0xbb, 0x24, 0x28, 0xd3, 0xef, 0xf9, 0xd7, 0xb7, 0x8d, 0xdc,
|
||||
0x97, 0xdb, 0x46, 0xee, 0xc3, 0xaa, 0xa1, 0x5c, 0xaf, 0x1a, 0xca, 0xe7, 0x55, 0x43, 0xf9, 0xbe,
|
||||
0x6a, 0x28, 0x6f, 0x9f, 0xff, 0xfe, 0xdf, 0xcb, 0x93, 0xec, 0xf7, 0x75, 0x6e, 0x52, 0x92, 0x7b,
|
||||
0xff, 0xff, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x9a, 0x54, 0x17, 0xb5, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Options) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
@ -199,6 +306,9 @@ func (m *Options) MarshalTo(dAtA []byte) (int, error) {
|
|||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath)))
|
||||
i += copy(dAtA[i:], m.BootFilesRootPath)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -225,8 +335,8 @@ func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintRunhcs(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
|
||||
n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
||||
i = encodeVarintRunhcs(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -267,6 +377,9 @@ func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
|
|||
i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID)))
|
||||
i += copy(dAtA[i:], m.ExecID)
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -280,6 +393,9 @@ func encodeVarintRunhcs(dAtA []byte, offset int, v uint64) int {
|
|||
return offset + 1
|
||||
}
|
||||
func (m *Options) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Debug {
|
||||
|
@ -307,17 +423,23 @@ func (m *Options) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovRunhcs(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ProcessDetails) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ImageName)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunhcs(uint64(l))
|
||||
}
|
||||
l = types.SizeOfStdTime(m.CreatedAt)
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
|
||||
n += 1 + l + sovRunhcs(uint64(l))
|
||||
if m.KernelTime_100Ns != 0 {
|
||||
n += 1 + sovRunhcs(uint64(m.KernelTime_100Ns))
|
||||
|
@ -341,6 +463,9 @@ func (m *ProcessDetails) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovRunhcs(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -369,6 +494,7 @@ func (this *Options) String() string {
|
|||
`SandboxPlatform:` + fmt.Sprintf("%v", this.SandboxPlatform) + `,`,
|
||||
`SandboxIsolation:` + fmt.Sprintf("%v", this.SandboxIsolation) + `,`,
|
||||
`BootFilesRootPath:` + fmt.Sprintf("%v", this.BootFilesRootPath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -379,7 +505,7 @@ func (this *ProcessDetails) String() string {
|
|||
}
|
||||
s := strings.Join([]string{`&ProcessDetails{`,
|
||||
`ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`,
|
||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`,
|
||||
`MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`,
|
||||
`MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`,
|
||||
|
@ -387,6 +513,7 @@ func (this *ProcessDetails) String() string {
|
|||
`ProcessID:` + fmt.Sprintf("%v", this.ProcessID) + `,`,
|
||||
`UserTime_100Ns:` + fmt.Sprintf("%v", this.UserTime_100Ns) + `,`,
|
||||
`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -414,7 +541,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -442,7 +569,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -462,7 +589,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.DebugType |= (Options_DebugType(b) & 0x7F) << shift
|
||||
m.DebugType |= Options_DebugType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -481,7 +608,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -491,6 +618,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -510,7 +640,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -520,6 +650,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -539,7 +672,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -549,6 +682,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -568,7 +704,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.SandboxIsolation |= (Options_SandboxIsolation(b) & 0x7F) << shift
|
||||
m.SandboxIsolation |= Options_SandboxIsolation(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -587,7 +723,7 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -597,6 +733,9 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -611,9 +750,13 @@ func (m *Options) Unmarshal(dAtA []byte) error {
|
|||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
@ -638,7 +781,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -666,7 +809,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -676,6 +819,9 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -695,7 +841,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -704,10 +850,13 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
|
@ -725,7 +874,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.KernelTime_100Ns |= (uint64(b) & 0x7F) << shift
|
||||
m.KernelTime_100Ns |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -744,7 +893,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.MemoryCommitBytes |= (uint64(b) & 0x7F) << shift
|
||||
m.MemoryCommitBytes |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -763,7 +912,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.MemoryWorkingSetPrivateBytes |= (uint64(b) & 0x7F) << shift
|
||||
m.MemoryWorkingSetPrivateBytes |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -782,7 +931,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.MemoryWorkingSetSharedBytes |= (uint64(b) & 0x7F) << shift
|
||||
m.MemoryWorkingSetSharedBytes |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -801,7 +950,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ProcessID |= (uint32(b) & 0x7F) << shift
|
||||
m.ProcessID |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -820,7 +969,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.UserTime_100Ns |= (uint64(b) & 0x7F) << shift
|
||||
m.UserTime_100Ns |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -839,7 +988,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
|
@ -849,6 +998,9 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@ -863,9 +1015,13 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
|
|||
if skippy < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthRunhcs
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
@ -929,10 +1085,13 @@ func skipRunhcs(dAtA []byte) (n int, err error) {
|
|||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthRunhcs
|
||||
}
|
||||
iNdEx += length
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthRunhcs
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
|
@ -961,6 +1120,9 @@ func skipRunhcs(dAtA []byte) (n int, err error) {
|
|||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthRunhcs
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
|
@ -979,55 +1141,3 @@ var (
|
|||
ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto", fileDescriptorRunhcs)
|
||||
}
|
||||
|
||||
var fileDescriptorRunhcs = []byte{
|
||||
// 704 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xda, 0x48,
|
||||
0x18, 0xc6, 0xe1, 0xd3, 0x6f, 0x96, 0xc4, 0x99, 0xe5, 0x80, 0xb2, 0xbb, 0x80, 0xc8, 0x21, 0x89,
|
||||
0x76, 0x63, 0x43, 0xf6, 0xd8, 0x53, 0x09, 0xa0, 0xba, 0x6a, 0x83, 0x65, 0xa2, 0xa6, 0x1f, 0x07,
|
||||
0xcb, 0xd8, 0x83, 0xb1, 0x82, 0x3d, 0xd6, 0xcc, 0x90, 0x86, 0x5b, 0x7f, 0x42, 0x7f, 0x55, 0x95,
|
||||
0x63, 0x8f, 0x95, 0x2a, 0xa5, 0x0d, 0xbf, 0xa4, 0x9a, 0xb1, 0x49, 0xd4, 0x28, 0xea, 0xa5, 0x27,
|
||||
0xc6, 0xcf, 0xf3, 0xbc, 0xcf, 0xfb, 0x29, 0x60, 0x14, 0x84, 0x7c, 0xb6, 0x98, 0xe8, 0x1e, 0x89,
|
||||
0x8c, 0x97, 0xa1, 0x47, 0x09, 0x23, 0x53, 0x6e, 0xcc, 0x3c, 0xc6, 0x66, 0x61, 0x64, 0x78, 0x91,
|
||||
0x6f, 0x78, 0x24, 0xe6, 0x6e, 0x18, 0x63, 0xea, 0x1f, 0x09, 0xec, 0x88, 0x2e, 0xe2, 0x99, 0xc7,
|
||||
0x8e, 0x2e, 0xbb, 0x06, 0x49, 0x78, 0x48, 0x62, 0x66, 0xa4, 0x88, 0x9e, 0x50, 0xc2, 0x09, 0xaa,
|
||||
0xdd, 0xeb, 0xf5, 0x8c, 0xb8, 0xec, 0xee, 0xd6, 0x02, 0x12, 0x10, 0x29, 0x30, 0xc4, 0x2b, 0xd5,
|
||||
0xee, 0x36, 0x03, 0x42, 0x82, 0x39, 0x36, 0xe4, 0xd7, 0x64, 0x31, 0x35, 0x78, 0x18, 0x61, 0xc6,
|
||||
0xdd, 0x28, 0x49, 0x05, 0xed, 0x4f, 0x79, 0x28, 0x8f, 0xd2, 0x2c, 0xa8, 0x06, 0x45, 0x1f, 0x4f,
|
||||
0x16, 0x41, 0x5d, 0x69, 0x29, 0x07, 0x15, 0x3b, 0xfd, 0x40, 0x43, 0x00, 0xf9, 0x70, 0xf8, 0x32,
|
||||
0xc1, 0xf5, 0x8d, 0x96, 0x72, 0xb0, 0x75, 0xbc, 0xaf, 0x3f, 0x56, 0x83, 0x9e, 0x19, 0xe9, 0x7d,
|
||||
0xa1, 0x3f, 0x5b, 0x26, 0xd8, 0x56, 0xfd, 0xf5, 0x13, 0xed, 0x41, 0x95, 0xe2, 0x20, 0x64, 0x9c,
|
||||
0x2e, 0x1d, 0x4a, 0x08, 0xaf, 0xe7, 0x5b, 0xca, 0x81, 0x6a, 0xff, 0xb1, 0x06, 0x6d, 0x42, 0xb8,
|
||||
0x10, 0x31, 0x37, 0xf6, 0x27, 0xe4, 0xca, 0x09, 0x23, 0x37, 0xc0, 0xf5, 0x42, 0x2a, 0xca, 0x40,
|
||||
0x53, 0x60, 0xe8, 0x10, 0xb4, 0xb5, 0x28, 0x99, 0xbb, 0x7c, 0x4a, 0x68, 0x54, 0x2f, 0x4a, 0xdd,
|
||||
0x76, 0x86, 0x5b, 0x19, 0x8c, 0xde, 0xc1, 0xce, 0x9d, 0x1f, 0x23, 0x73, 0x57, 0xd4, 0x57, 0x2f,
|
||||
0xc9, 0x1e, 0xf4, 0x5f, 0xf7, 0x30, 0xce, 0x32, 0xae, 0xa3, 0xec, 0x75, 0xce, 0x3b, 0x04, 0x19,
|
||||
0x50, 0x9b, 0x10, 0xc2, 0x9d, 0x69, 0x38, 0xc7, 0x4c, 0xf6, 0xe4, 0x24, 0x2e, 0x9f, 0xd5, 0xcb,
|
||||
0xb2, 0x96, 0x1d, 0xc1, 0x0d, 0x05, 0x25, 0x3a, 0xb3, 0x5c, 0x3e, 0x6b, 0x1f, 0x82, 0x7a, 0x37,
|
||||
0x1a, 0xa4, 0x42, 0xf1, 0xd4, 0x32, 0xad, 0x81, 0x96, 0x43, 0x15, 0x28, 0x0c, 0xcd, 0x17, 0x03,
|
||||
0x4d, 0x41, 0x65, 0xc8, 0x0f, 0xce, 0xce, 0xb5, 0x8d, 0xb6, 0x01, 0xda, 0xc3, 0x0a, 0xd0, 0x26,
|
||||
0x94, 0x2d, 0x7b, 0x74, 0x32, 0x18, 0x8f, 0xb5, 0x1c, 0xda, 0x02, 0x78, 0xf6, 0xc6, 0x1a, 0xd8,
|
||||
0xaf, 0xcc, 0xf1, 0xc8, 0xd6, 0x94, 0xf6, 0xd7, 0x3c, 0x6c, 0x59, 0x94, 0x78, 0x98, 0xb1, 0x3e,
|
||||
0xe6, 0x6e, 0x38, 0x67, 0xe8, 0x1f, 0x00, 0x39, 0x44, 0x27, 0x76, 0x23, 0x2c, 0x97, 0xaa, 0xda,
|
||||
0xaa, 0x44, 0x4e, 0xdd, 0x08, 0xa3, 0x13, 0x00, 0x8f, 0x62, 0x97, 0x63, 0xdf, 0x71, 0xb9, 0x5c,
|
||||
0xec, 0xe6, 0xf1, 0xae, 0x9e, 0x1e, 0x8c, 0xbe, 0x3e, 0x18, 0xfd, 0x6c, 0x7d, 0x30, 0xbd, 0xca,
|
||||
0xf5, 0x4d, 0x33, 0xf7, 0xf1, 0x5b, 0x53, 0xb1, 0xd5, 0x2c, 0xee, 0x29, 0x47, 0xff, 0x02, 0xba,
|
||||
0xc0, 0x34, 0xc6, 0x73, 0x47, 0x5c, 0x96, 0xd3, 0xed, 0x74, 0x9c, 0x98, 0xc9, 0xd5, 0x16, 0xec,
|
||||
0xed, 0x94, 0x11, 0x0e, 0xdd, 0x4e, 0xe7, 0x94, 0x21, 0x1d, 0xfe, 0x8c, 0x70, 0x44, 0xe8, 0xd2,
|
||||
0xf1, 0x48, 0x14, 0x85, 0xdc, 0x99, 0x2c, 0x39, 0x66, 0x72, 0xc7, 0x05, 0x7b, 0x27, 0xa5, 0x4e,
|
||||
0x24, 0xd3, 0x13, 0x04, 0x1a, 0x42, 0x2b, 0xd3, 0xbf, 0x27, 0xf4, 0x22, 0x8c, 0x03, 0x87, 0x61,
|
||||
0xee, 0x24, 0x34, 0xbc, 0x74, 0x39, 0xce, 0x82, 0x8b, 0x32, 0xf8, 0xef, 0x54, 0x77, 0x9e, 0xca,
|
||||
0xc6, 0x98, 0x5b, 0xa9, 0x28, 0xf5, 0xe9, 0x43, 0xf3, 0x11, 0x1f, 0x36, 0x73, 0x29, 0xf6, 0x33,
|
||||
0x9b, 0x92, 0xb4, 0xf9, 0xeb, 0xa1, 0xcd, 0x58, 0x6a, 0x52, 0x97, 0xff, 0x00, 0x92, 0x74, 0xc0,
|
||||
0x4e, 0xe8, 0xcb, 0x25, 0x57, 0x7b, 0xd5, 0xd5, 0x4d, 0x53, 0xcd, 0xc6, 0x6e, 0xf6, 0x6d, 0x35,
|
||||
0x13, 0x98, 0x3e, 0xda, 0x07, 0x6d, 0xc1, 0x30, 0xfd, 0x69, 0x2c, 0x15, 0x99, 0xa4, 0x2a, 0xf0,
|
||||
0xfb, 0xa1, 0xec, 0x41, 0x19, 0x5f, 0x61, 0x4f, 0x78, 0xaa, 0x62, 0x45, 0x3d, 0x58, 0xdd, 0x34,
|
||||
0x4b, 0x83, 0x2b, 0xec, 0x99, 0x7d, 0xbb, 0x24, 0x28, 0xd3, 0xef, 0xf9, 0xd7, 0xb7, 0x8d, 0xdc,
|
||||
0x97, 0xdb, 0x46, 0xee, 0xc3, 0xaa, 0xa1, 0x5c, 0xaf, 0x1a, 0xca, 0xe7, 0x55, 0x43, 0xf9, 0xbe,
|
||||
0x6a, 0x28, 0x6f, 0x9f, 0xff, 0xfe, 0xdf, 0xcb, 0x93, 0xec, 0xf7, 0x75, 0x6e, 0x52, 0x92, 0x7b,
|
||||
0xff, 0xff, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x9a, 0x54, 0x17, 0xb5, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
|
75
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
75
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
|
@ -1,8 +1,10 @@
|
|||
package hcsshim
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
|
@ -52,7 +54,10 @@ const (
|
|||
type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse
|
||||
|
||||
type container struct {
|
||||
system *hcs.System
|
||||
system *hcs.System
|
||||
waitOnce sync.Once
|
||||
waitErr error
|
||||
waitCh chan struct{}
|
||||
}
|
||||
|
||||
// createComputeSystemAdditionalJSON is read from the environment at initialisation
|
||||
|
@ -71,61 +76,87 @@ func CreateContainer(id string, c *ContainerConfig) (Container, error) {
|
|||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
|
||||
}
|
||||
|
||||
system, err := hcs.CreateComputeSystem(id, fullConfig)
|
||||
system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &container{system}, err
|
||||
return &container{system: system}, err
|
||||
}
|
||||
|
||||
// OpenContainer opens an existing container by ID.
|
||||
func OpenContainer(id string) (Container, error) {
|
||||
system, err := hcs.OpenComputeSystem(id)
|
||||
system, err := hcs.OpenComputeSystem(context.Background(), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &container{system}, err
|
||||
return &container{system: system}, err
|
||||
}
|
||||
|
||||
// GetContainers gets a list of the containers on the system that match the query
|
||||
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
|
||||
return hcs.GetComputeSystems(q)
|
||||
return hcs.GetComputeSystems(context.Background(), q)
|
||||
}
|
||||
|
||||
// Start synchronously starts the container.
|
||||
func (container *container) Start() error {
|
||||
return convertSystemError(container.system.Start(), container)
|
||||
return convertSystemError(container.system.Start(context.Background()), container)
|
||||
}
|
||||
|
||||
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
|
||||
func (container *container) Shutdown() error {
|
||||
return convertSystemError(container.system.Shutdown(), container)
|
||||
err := container.system.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"}
|
||||
}
|
||||
|
||||
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
|
||||
func (container *container) Terminate() error {
|
||||
return convertSystemError(container.system.Terminate(), container)
|
||||
err := container.system.Terminate(context.Background())
|
||||
if err != nil {
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"}
|
||||
}
|
||||
|
||||
// Waits synchronously waits for the container to shutdown or terminate.
|
||||
func (container *container) Wait() error {
|
||||
return convertSystemError(container.system.Wait(), container)
|
||||
err := container.system.Wait()
|
||||
if err == nil {
|
||||
err = container.system.ExitError()
|
||||
}
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
|
||||
// returns false if timeout occurs.
|
||||
func (container *container) WaitTimeout(t time.Duration) error {
|
||||
return convertSystemError(container.system.WaitTimeout(t), container)
|
||||
func (container *container) WaitTimeout(timeout time.Duration) error {
|
||||
container.waitOnce.Do(func() {
|
||||
container.waitCh = make(chan struct{})
|
||||
go func() {
|
||||
container.waitErr = container.Wait()
|
||||
close(container.waitCh)
|
||||
}()
|
||||
})
|
||||
t := time.NewTimer(timeout)
|
||||
defer t.Stop()
|
||||
select {
|
||||
case <-t.C:
|
||||
return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"}
|
||||
case <-container.waitCh:
|
||||
return container.waitErr
|
||||
}
|
||||
}
|
||||
|
||||
// Pause pauses the execution of a container.
|
||||
func (container *container) Pause() error {
|
||||
return convertSystemError(container.system.Pause(), container)
|
||||
return convertSystemError(container.system.Pause(context.Background()), container)
|
||||
}
|
||||
|
||||
// Resume resumes the execution of a container.
|
||||
func (container *container) Resume() error {
|
||||
return convertSystemError(container.system.Resume(), container)
|
||||
return convertSystemError(container.system.Resume(context.Background()), container)
|
||||
}
|
||||
|
||||
// HasPendingUpdates returns true if the container has updates pending to install
|
||||
|
@ -135,7 +166,7 @@ func (container *container) HasPendingUpdates() (bool, error) {
|
|||
|
||||
// Statistics returns statistics for the container. This is a legacy v1 call
|
||||
func (container *container) Statistics() (Statistics, error) {
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeStatistics)
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)
|
||||
if err != nil {
|
||||
return Statistics{}, convertSystemError(err, container)
|
||||
}
|
||||
|
@ -145,7 +176,7 @@ func (container *container) Statistics() (Statistics, error) {
|
|||
|
||||
// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call
|
||||
func (container *container) ProcessList() ([]ProcessListItem, error) {
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeProcessList)
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
@ -155,7 +186,7 @@ func (container *container) ProcessList() ([]ProcessListItem, error) {
|
|||
|
||||
// This is a legacy v1 call
|
||||
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
|
||||
properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk)
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
@ -165,20 +196,20 @@ func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskContr
|
|||
|
||||
// CreateProcess launches a new process within the container.
|
||||
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
|
||||
p, err := container.system.CreateProcess(c)
|
||||
p, err := container.system.CreateProcess(context.Background(), c)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
return &process{p}, nil
|
||||
return &process{p: p.(*hcs.Process)}, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the container.
|
||||
func (container *container) OpenProcess(pid int) (Process, error) {
|
||||
p, err := container.system.OpenProcess(pid)
|
||||
p, err := container.system.OpenProcess(context.Background(), pid)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
return &process{p}, nil
|
||||
return &process{p: p}, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the container but does not terminate or wait for it.
|
||||
|
@ -188,5 +219,5 @@ func (container *container) Close() error {
|
|||
|
||||
// Modify the System
|
||||
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
|
||||
return convertSystemError(container.system.Modify(config), container)
|
||||
return convertSystemError(container.system.Modify(context.Background(), config), container)
|
||||
}
|
||||
|
|
37
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
Normal file
37
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
module github.com/Microsoft/hcsshim
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
||||
github.com/blang/semver v3.1.0+incompatible // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 // indirect
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/procfs v0.0.5 // indirect
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
google.golang.org/grpc v1.20.1
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
k8s.io/kubernetes v1.13.0
|
||||
)
|
10
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
10
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go
generated
vendored
|
@ -39,11 +39,21 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) {
|
|||
|
||||
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
|
||||
func HotAttachEndpoint(containerID string, endpointID string) error {
|
||||
endpoint, err := GetHNSEndpointByID(endpointID)
|
||||
isAttached, err := endpoint.IsAttached(containerID)
|
||||
if isAttached {
|
||||
return err
|
||||
}
|
||||
return modifyNetworkEndpoint(containerID, endpointID, Add)
|
||||
}
|
||||
|
||||
// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container
|
||||
func HotDetachEndpoint(containerID string, endpointID string) error {
|
||||
endpoint, err := GetHNSEndpointByID(endpointID)
|
||||
isAttached, err := endpoint.IsAttached(containerID)
|
||||
if !isAttached {
|
||||
return err
|
||||
}
|
||||
return modifyNetworkEndpoint(containerID, endpointID, Remove)
|
||||
}
|
||||
|
||||
|
|
83
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
Normal file
83
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package cow
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
)
|
||||
|
||||
// Process is the interface for an OS process running in a container or utility VM.
|
||||
type Process interface {
|
||||
// Close releases resources associated with the process and closes the
|
||||
// writer and readers returned by Stdio. Depending on the implementation,
|
||||
// this may also terminate the process.
|
||||
Close() error
|
||||
// CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever
|
||||
// is appropriate to indicate that no more data is available.
|
||||
CloseStdin(ctx context.Context) error
|
||||
// Pid returns the process ID.
|
||||
Pid() int
|
||||
// Stdio returns the stdio streams for a process. These may be nil if a stream
|
||||
// was not requested during CreateProcess.
|
||||
Stdio() (_ io.Writer, _ io.Reader, _ io.Reader)
|
||||
// ResizeConsole resizes the virtual terminal associated with the process.
|
||||
ResizeConsole(ctx context.Context, width, height uint16) error
|
||||
// Kill sends a SIGKILL or equivalent signal to the process and returns whether
|
||||
// the signal was delivered. It does not wait for the process to terminate.
|
||||
Kill(ctx context.Context) (bool, error)
|
||||
// Signal sends a signal to the process and returns whether the signal was
|
||||
// delivered. The input is OS specific (either
|
||||
// guestrequest.SignalProcessOptionsWCOW or
|
||||
// guestrequest.SignalProcessOptionsLCOW). It does not wait for the process
|
||||
// to terminate.
|
||||
Signal(ctx context.Context, options interface{}) (bool, error)
|
||||
// Wait waits for the process to complete, or for a connection to the process to be
|
||||
// terminated by some error condition (including calling Close).
|
||||
Wait() error
|
||||
// ExitCode returns the exit code of the process. Returns an error if the process is
|
||||
// not running.
|
||||
ExitCode() (int, error)
|
||||
}
|
||||
|
||||
// ProcessHost is the interface for creating processes.
|
||||
type ProcessHost interface {
|
||||
// CreateProcess creates a process. The configuration is host specific
|
||||
// (either hcsschema.ProcessParameters or lcow.ProcessParameters).
|
||||
CreateProcess(ctx context.Context, config interface{}) (Process, error)
|
||||
// OS returns the host's operating system, "linux" or "windows".
|
||||
OS() string
|
||||
// IsOCI specifies whether this is an OCI-compliant process host. If true,
|
||||
// then the configuration passed to CreateProcess should have an OCI process
|
||||
// spec (or nil if this is the initial process in an OCI container).
|
||||
// Otherwise, it should have the HCS-specific process parameters.
|
||||
IsOCI() bool
|
||||
}
|
||||
|
||||
// Container is the interface for container objects, either running on the host or
|
||||
// in a utility VM.
|
||||
type Container interface {
|
||||
ProcessHost
|
||||
// Close releases the resources associated with the container. Depending on
|
||||
// the implementation, this may also terminate the container.
|
||||
Close() error
|
||||
// ID returns the container ID.
|
||||
ID() string
|
||||
// Properties returns the requested container properties targeting a V1 schema container.
|
||||
Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error)
|
||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
||||
PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error)
|
||||
// Start starts a container.
|
||||
Start(ctx context.Context) error
|
||||
// Shutdown sends a shutdown request to the container (but does not wait for
|
||||
// the shutdown to complete).
|
||||
Shutdown(ctx context.Context) error
|
||||
// Terminate sends a terminate request to the container (but does not wait
|
||||
// for the terminate to complete).
|
||||
Terminate(ctx context.Context) error
|
||||
// Wait waits for the container to terminate, or for the connection to the
|
||||
// container to be terminated by some error condition (including calling
|
||||
// Close).
|
||||
Wait() error
|
||||
}
|
69
vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go
generated
vendored
69
vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = (json.Marshaler)(&GUID{})
|
||||
var _ = (json.Unmarshaler)(&GUID{})
|
||||
|
||||
type GUID [16]byte
|
||||
|
||||
func New() GUID {
|
||||
g := GUID{}
|
||||
_, err := io.ReadFull(rand.Reader, g[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
|
||||
}
|
||||
|
||||
func FromString(s string) GUID {
|
||||
if len(s) != 36 {
|
||||
panic(fmt.Sprintf("invalid GUID length: %d", len(s)))
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
panic("invalid GUID format")
|
||||
}
|
||||
indexOrder := [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
}
|
||||
byteOrder := [16]int{
|
||||
3, 2, 1, 0,
|
||||
5, 4,
|
||||
7, 6,
|
||||
8, 9,
|
||||
10, 11, 12, 13, 14, 15,
|
||||
}
|
||||
var g GUID
|
||||
for i, x := range indexOrder {
|
||||
b, err := strconv.ParseInt(s[x:x+2], 16, 16)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
g[byteOrder[i]] = byte(b)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(g.String())
|
||||
}
|
||||
|
||||
func (g *GUID) UnmarshalJSON(data []byte) error {
|
||||
*g = FromString(strings.Trim(string(data), "\""))
|
||||
return nil
|
||||
}
|
42
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
42
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -88,7 +89,7 @@ type notificationChannel chan error
|
|||
|
||||
type notifcationWatcherContext struct {
|
||||
channels notificationChannels
|
||||
handle hcsCallback
|
||||
handle vmcompute.HcsCallback
|
||||
|
||||
systemID string
|
||||
processID int
|
||||
|
@ -98,21 +99,27 @@ type notificationChannels map[hcsNotification]notificationChannel
|
|||
|
||||
func newSystemChannels() notificationChannels {
|
||||
channels := make(notificationChannels)
|
||||
|
||||
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
|
||||
|
||||
for _, notif := range []hcsNotification{
|
||||
hcsNotificationServiceDisconnect,
|
||||
hcsNotificationSystemExited,
|
||||
hcsNotificationSystemCreateCompleted,
|
||||
hcsNotificationSystemStartCompleted,
|
||||
hcsNotificationSystemPauseCompleted,
|
||||
hcsNotificationSystemResumeCompleted,
|
||||
} {
|
||||
channels[notif] = make(notificationChannel, 1)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func newProcessChannels() notificationChannels {
|
||||
channels := make(notificationChannels)
|
||||
|
||||
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
|
||||
|
||||
for _, notif := range []hcsNotification{
|
||||
hcsNotificationServiceDisconnect,
|
||||
hcsNotificationProcessExited,
|
||||
} {
|
||||
channels[notif] = make(notificationChannel, 1)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
|
@ -143,18 +150,7 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt
|
|||
if context.processID != 0 {
|
||||
log.Data[logfields.ProcessID] = context.processID
|
||||
}
|
||||
log.Debug("")
|
||||
|
||||
// The HCS notification system can grow overtime. We explicitly opt-in to
|
||||
// the notifications we would like to handle, all others we simply return.
|
||||
// This means that as it grows we don't have issues associated with new
|
||||
// notification types the code didn't know about.
|
||||
switch notificationType {
|
||||
case hcsNotificationSystemExited, hcsNotificationSystemCreateCompleted, hcsNotificationSystemStartCompleted, hcsNotificationSystemPauseCompleted, hcsNotificationSystemResumeCompleted:
|
||||
case hcsNotificationProcessExited:
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
log.Debug("HCS notification")
|
||||
|
||||
if channel, ok := context.channels[notificationType]; ok {
|
||||
channel <- result
|
||||
|
|
68
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
68
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
|
@ -1,14 +1,14 @@
|
|||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -117,17 +117,11 @@ func (ev *ErrorEvent) String() string {
|
|||
return evs
|
||||
}
|
||||
|
||||
func processHcsResult(resultp *uint16) []ErrorEvent {
|
||||
if resultp != nil {
|
||||
resultj := interop.ConvertAndFreeCoTaskMemString(resultp)
|
||||
logrus.WithField(logfields.JSON, resultj).
|
||||
Debug("HCS Result")
|
||||
func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent {
|
||||
if resultJSON != "" {
|
||||
result := &hcsResult{}
|
||||
if err := json.Unmarshal([]byte(resultj), result); err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.JSON: resultj,
|
||||
logrus.ErrorKey: err,
|
||||
}).Warning("Could not unmarshal HCS result")
|
||||
if err := json.Unmarshal([]byte(resultJSON), result); err != nil {
|
||||
log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result")
|
||||
return nil
|
||||
}
|
||||
return result.ErrorEvents
|
||||
|
@ -141,6 +135,8 @@ type HcsError struct {
|
|||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &HcsError{}
|
||||
|
||||
func (e *HcsError) Error() string {
|
||||
s := e.Op + ": " + e.Err.Error()
|
||||
for _, ev := range e.Events {
|
||||
|
@ -149,6 +145,16 @@ func (e *HcsError) Error() string {
|
|||
return s
|
||||
}
|
||||
|
||||
func (e *HcsError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *HcsError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
// ProcessError is an error encountered in HCS during an operation on a Process object
|
||||
type ProcessError struct {
|
||||
SystemID string
|
||||
|
@ -158,6 +164,8 @@ type ProcessError struct {
|
|||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &ProcessError{}
|
||||
|
||||
// SystemError is an error encountered in HCS during an operation on a Container object
|
||||
type SystemError struct {
|
||||
ID string
|
||||
|
@ -167,6 +175,8 @@ type SystemError struct {
|
|||
Events []ErrorEvent
|
||||
}
|
||||
|
||||
var _ net.Error = &SystemError{}
|
||||
|
||||
func (e *SystemError) Error() string {
|
||||
s := e.Op + " " + e.ID + ": " + e.Err.Error()
|
||||
for _, ev := range e.Events {
|
||||
|
@ -178,6 +188,16 @@ func (e *SystemError) Error() string {
|
|||
return s
|
||||
}
|
||||
|
||||
func (e *SystemError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *SystemError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*SystemError); ok {
|
||||
|
@ -200,6 +220,16 @@ func (e *ProcessError) Error() string {
|
|||
return s
|
||||
}
|
||||
|
||||
func (e *ProcessError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
}
|
||||
|
||||
func (e *ProcessError) Timeout() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Timeout()
|
||||
}
|
||||
|
||||
func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ProcessError); ok {
|
||||
|
@ -242,6 +272,9 @@ func IsPending(err error) bool {
|
|||
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||
// a timeout waiting for the operation to complete.
|
||||
func IsTimeout(err error) bool {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
err = getInnerError(err)
|
||||
return err == ErrTimeout
|
||||
}
|
||||
|
@ -292,3 +325,12 @@ func getInnerError(err error) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getOperationLogResult(err error) (string, error) {
|
||||
switch err {
|
||||
case nil:
|
||||
return "Success", nil
|
||||
default:
|
||||
return "Error", err
|
||||
}
|
||||
}
|
||||
|
|
48
vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go
generated
vendored
48
vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go
generated
vendored
|
@ -1,48 +0,0 @@
|
|||
// Shim for the Host Compute Service (HCS) to manage Windows Server
|
||||
// containers and Hyper-V containers.
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hcs.go
|
||||
|
||||
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
|
||||
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
|
||||
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
|
||||
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
|
||||
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
|
||||
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
|
||||
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
|
||||
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
|
||||
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
|
||||
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
|
||||
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
|
||||
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
|
||||
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
|
||||
|
||||
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
|
||||
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
|
||||
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
|
||||
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
|
||||
//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess?
|
||||
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
|
||||
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
|
||||
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
|
||||
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
|
||||
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
|
||||
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
|
||||
|
||||
type hcsSystem syscall.Handle
|
||||
type hcsProcess syscall.Handle
|
||||
type hcsCallback syscall.Handle
|
||||
|
||||
type hcsProcessInformation struct {
|
||||
ProcessId uint32
|
||||
Reserved uint32
|
||||
StdInput syscall.Handle
|
||||
StdOutput syscall.Handle
|
||||
StdError syscall.Handle
|
||||
}
|
20
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
20
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
package hcs
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
func logOperationBegin(ctx logrus.Fields, msg string) {
|
||||
logrus.WithFields(ctx).Debug(msg)
|
||||
}
|
||||
|
||||
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
|
||||
// Copy the log and fields first.
|
||||
log := logrus.WithFields(ctx)
|
||||
if err == nil {
|
||||
log.Debug(msg)
|
||||
} else {
|
||||
// Edit only the copied field data to avoid race conditions on the
|
||||
// write.
|
||||
log.Data[logrus.ErrorKey] = err
|
||||
log.Error(msg)
|
||||
}
|
||||
}
|
421
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
421
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
|
@ -1,52 +1,47 @@
|
|||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ContainerError is an error encountered in HCS
|
||||
type Process struct {
|
||||
handleLock sync.RWMutex
|
||||
handle hcsProcess
|
||||
handle vmcompute.HcsProcess
|
||||
processID int
|
||||
system *System
|
||||
cachedPipes *cachedPipes
|
||||
hasCachedStdio bool
|
||||
stdioLock sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
callbackNumber uintptr
|
||||
|
||||
logctx logrus.Fields
|
||||
|
||||
closedWaitOnce sync.Once
|
||||
waitBlock chan struct{}
|
||||
exitCode int
|
||||
waitError error
|
||||
}
|
||||
|
||||
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
|
||||
func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process {
|
||||
return &Process{
|
||||
handle: process,
|
||||
processID: processID,
|
||||
system: computeSystem,
|
||||
logctx: logrus.Fields{
|
||||
logfields.ContainerID: computeSystem.ID(),
|
||||
logfields.ProcessID: processID,
|
||||
},
|
||||
waitBlock: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
type cachedPipes struct {
|
||||
stdIn syscall.Handle
|
||||
stdOut syscall.Handle
|
||||
stdErr syscall.Handle
|
||||
}
|
||||
|
||||
type processModifyRequest struct {
|
||||
Operation string
|
||||
ConsoleSize *consoleSize `json:",omitempty"`
|
||||
|
@ -62,7 +57,7 @@ type closeHandle struct {
|
|||
Handle string
|
||||
}
|
||||
|
||||
type ProcessStatus struct {
|
||||
type processStatus struct {
|
||||
ProcessID uint32
|
||||
Exited bool
|
||||
ExitCode uint32
|
||||
|
@ -90,24 +85,32 @@ func (process *Process) SystemID() string {
|
|||
return process.system.ID()
|
||||
}
|
||||
|
||||
func (process *Process) logOperationBegin(operation string) {
|
||||
logOperationBegin(
|
||||
process.logctx,
|
||||
operation+" - Begin Operation")
|
||||
}
|
||||
|
||||
func (process *Process) logOperationEnd(operation string, err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
|
||||
switch err {
|
||||
case nil:
|
||||
return true, nil
|
||||
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
|
||||
select {
|
||||
case <-process.waitBlock:
|
||||
// The process exit notification has already arrived.
|
||||
default:
|
||||
// The process should be gone, but we have not received the notification.
|
||||
// After a second, force unblock the process wait to work around a possible
|
||||
// deadlock in the HCS.
|
||||
go func() {
|
||||
time.Sleep(time.Second)
|
||||
process.closedWaitOnce.Do(func() {
|
||||
log.G(ctx).WithError(err).Warn("force unblocking process waits")
|
||||
process.exitCode = -1
|
||||
process.waitError = err
|
||||
close(process.waitBlock)
|
||||
})
|
||||
}()
|
||||
}
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
process.logctx,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
}
|
||||
|
||||
// Signal signals the process with `options`.
|
||||
|
@ -115,115 +118,120 @@ func (process *Process) logOperationEnd(operation string, err error) {
|
|||
// For LCOW `guestrequest.SignalProcessOptionsLCOW`.
|
||||
//
|
||||
// For WCOW `guestrequest.SignalProcessOptionsWCOW`.
|
||||
func (process *Process) Signal(options interface{}) (err error) {
|
||||
func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Signal"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
optionsb, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
optionsStr := string(optionsb)
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsSignalProcess(process.handle, optionsStr, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
delivered, err := process.processSignalResult(ctx, err)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
return delivered, err
|
||||
}
|
||||
|
||||
// Kill signals the process to terminate but does not wait for it to finish terminating.
|
||||
func (process *Process) Kill() (err error) {
|
||||
func (process *Process) Kill(ctx context.Context) (bool, error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Kill"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsTerminateProcess(process.handle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
delivered, err := process.processSignalResult(ctx, err)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
return delivered, err
|
||||
}
|
||||
|
||||
// waitBackground waits for the process exit notification. Once received sets
|
||||
// `process.waitError` (if any) and unblocks all `Wait` and `WaitTimeout` calls.
|
||||
// `process.waitError` (if any) and unblocks all `Wait` calls.
|
||||
//
|
||||
// This MUST be called exactly once per `process.handle` but `Wait` and
|
||||
// `WaitTimeout` are safe to call multiple times.
|
||||
// This MUST be called exactly once per `process.handle` but `Wait` is safe to
|
||||
// call multiple times.
|
||||
func (process *Process) waitBackground() {
|
||||
process.waitError = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
operation := "hcsshim::Process::waitBackground"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
var (
|
||||
err error
|
||||
exitCode = -1
|
||||
)
|
||||
|
||||
err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, nil)
|
||||
log.G(ctx).WithError(err).Error("failed wait")
|
||||
} else {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
// Make sure we didnt race with Close() here
|
||||
if process.handle != 0 {
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetProcessProperties(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
} else {
|
||||
properties := &processStatus{}
|
||||
err = json.Unmarshal([]byte(propertiesJSON), properties)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, nil)
|
||||
} else {
|
||||
if properties.LastWaitResult != 0 {
|
||||
log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result")
|
||||
} else {
|
||||
exitCode = int(properties.ExitCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.G(ctx).WithField("exitCode", exitCode).Debug("process exited")
|
||||
|
||||
process.closedWaitOnce.Do(func() {
|
||||
process.exitCode = exitCode
|
||||
process.waitError = err
|
||||
close(process.waitBlock)
|
||||
})
|
||||
oc.SetSpanStatus(span, err)
|
||||
}
|
||||
|
||||
// Wait waits for the process to exit. If the process has already exited returns
|
||||
// the pervious error (if any).
|
||||
func (process *Process) Wait() (err error) {
|
||||
operation := "hcsshim::Process::Wait"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
func (process *Process) Wait() error {
|
||||
<-process.waitBlock
|
||||
if process.waitError != nil {
|
||||
return makeProcessError(process, operation, process.waitError, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the process to exit or the duration to elapse. If the
|
||||
// process has already exited returns the pervious error (if any). If a timeout
|
||||
// occurs returns `ErrTimeout`.
|
||||
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcssshim::Process::WaitTimeout"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
select {
|
||||
case <-process.waitBlock:
|
||||
if process.waitError != nil {
|
||||
return makeProcessError(process, operation, process.waitError, nil)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
return makeProcessError(process, operation, ErrTimeout, nil)
|
||||
}
|
||||
return process.waitError
|
||||
}
|
||||
|
||||
// ResizeConsole resizes the console of the process.
|
||||
func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
||||
func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::ResizeConsole"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
|
@ -242,11 +250,8 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
modifyRequestStr := string(modifyRequestb)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
@ -254,109 +259,55 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Properties"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
propertiesp *uint16
|
||||
)
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
if propertiesp == nil {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
|
||||
|
||||
properties := &ProcessStatus{}
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// ExitCode returns the exit code of the process. The process must have
|
||||
// already terminated.
|
||||
func (process *Process) ExitCode() (_ int, err error) {
|
||||
operation := "hcsshim::Process::ExitCode"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
properties, err := process.Properties()
|
||||
if err != nil {
|
||||
return -1, makeProcessError(process, operation, err, nil)
|
||||
func (process *Process) ExitCode() (int, error) {
|
||||
select {
|
||||
case <-process.waitBlock:
|
||||
if process.waitError != nil {
|
||||
return -1, process.waitError
|
||||
}
|
||||
return process.exitCode, nil
|
||||
default:
|
||||
return -1, makeProcessError(process, "hcsshim::Process::ExitCode", ErrInvalidProcessState, nil)
|
||||
}
|
||||
|
||||
if properties.Exited == false {
|
||||
return -1, makeProcessError(process, operation, ErrInvalidProcessState, nil)
|
||||
}
|
||||
|
||||
if properties.LastWaitResult != 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.ContainerID: process.SystemID(),
|
||||
logfields.ProcessID: process.processID,
|
||||
"wait-result": properties.LastWaitResult,
|
||||
}).Warn("hcsshim::Process::ExitCode - Non-zero last wait result")
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return int(properties.ExitCode), nil
|
||||
}
|
||||
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes; it should be possible to
|
||||
// call this multiple times to get multiple interfaces.
|
||||
func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
||||
// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes. Once returned, these pipes
|
||||
// are the responsibility of the caller to close.
|
||||
func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
||||
operation := "hcsshim::Process::StdioLegacy"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::Stdio"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var stdIn, stdOut, stdErr syscall.Handle
|
||||
|
||||
if process.cachedPipes == nil {
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
resultp *uint16
|
||||
)
|
||||
err = hcsGetProcessInfo(process.handle, &processInfo, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
|
||||
} else {
|
||||
// Use cached pipes
|
||||
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
|
||||
|
||||
// Invalidate the cache
|
||||
process.cachedPipes = nil
|
||||
process.stdioLock.Lock()
|
||||
defer process.stdioLock.Unlock()
|
||||
if process.hasCachedStdio {
|
||||
stdin, stdout, stderr := process.stdin, process.stdout, process.stderr
|
||||
process.stdin, process.stdout, process.stderr = nil, nil, nil
|
||||
process.hasCachedStdio = false
|
||||
return stdin, stdout, stderr, nil
|
||||
}
|
||||
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
|
||||
processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
@ -364,15 +315,21 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
|
|||
return pipes[0], pipes[1], pipes[2], nil
|
||||
}
|
||||
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively.
|
||||
// To close them, close the process handle.
|
||||
func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) {
|
||||
process.stdioLock.Lock()
|
||||
defer process.stdioLock.Unlock()
|
||||
return process.stdin, process.stdout, process.stderr
|
||||
}
|
||||
|
||||
// CloseStdin closes the write side of the stdin pipe so that the process is
|
||||
// notified on the read side that there is no more data in stdin.
|
||||
func (process *Process) CloseStdin() (err error) {
|
||||
func (process *Process) CloseStdin(ctx context.Context) error {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::Process::CloseStdin"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
|
@ -390,51 +347,76 @@ func (process *Process) CloseStdin() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
modifyRequestStr := string(modifyRequestb)
|
||||
|
||||
var resultp *uint16
|
||||
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
process.stdioLock.Lock()
|
||||
if process.stdin != nil {
|
||||
process.stdin.Close()
|
||||
process.stdin = nil
|
||||
}
|
||||
process.stdioLock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the process but does not kill
|
||||
// or wait on it.
|
||||
func (process *Process) Close() (err error) {
|
||||
operation := "hcsshim::Process::Close"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("cid", process.SystemID()),
|
||||
trace.Int64Attribute("pid", int64(process.processID)))
|
||||
|
||||
process.handleLock.Lock()
|
||||
defer process.handleLock.Unlock()
|
||||
|
||||
operation := "hcsshim::Process::Close"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
// Don't double free this
|
||||
if process.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = process.unregisterCallback(); err != nil {
|
||||
process.stdioLock.Lock()
|
||||
if process.stdin != nil {
|
||||
process.stdin.Close()
|
||||
process.stdin = nil
|
||||
}
|
||||
if process.stdout != nil {
|
||||
process.stdout.Close()
|
||||
process.stdout = nil
|
||||
}
|
||||
if process.stderr != nil {
|
||||
process.stderr.Close()
|
||||
process.stderr = nil
|
||||
}
|
||||
process.stdioLock.Unlock()
|
||||
|
||||
if err = process.unregisterCallback(ctx); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
if err = hcsCloseProcess(process.handle); err != nil {
|
||||
if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
process.handle = 0
|
||||
process.closedWaitOnce.Do(func() {
|
||||
process.exitCode = -1
|
||||
process.waitError = ErrAlreadyClosed
|
||||
close(process.waitBlock)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) registerCallback() error {
|
||||
context := ¬ifcationWatcherContext{
|
||||
func (process *Process) registerCallback(ctx context.Context) error {
|
||||
callbackContext := ¬ifcationWatcherContext{
|
||||
channels: newProcessChannels(),
|
||||
systemID: process.SystemID(),
|
||||
processID: process.processID,
|
||||
|
@ -443,45 +425,44 @@ func (process *Process) registerCallback() error {
|
|||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = context
|
||||
callbackMap[callbackNumber] = callbackContext
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
var callbackHandle hcsCallback
|
||||
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
|
||||
callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context.handle = callbackHandle
|
||||
callbackContext.handle = callbackHandle
|
||||
process.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) unregisterCallback() error {
|
||||
func (process *Process) unregisterCallback(ctx context.Context) error {
|
||||
callbackNumber := process.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackContext := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
if callbackContext == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := context.handle
|
||||
handle := callbackContext.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hcsUnregisterProcessCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := hcsUnregisterProcessCallback(handle)
|
||||
// vmcompute.HcsUnregisterProcessCallback has its own synchronization to
|
||||
// wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := vmcompute.HcsUnregisterProcessCallback(ctx, handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(context.channels)
|
||||
closeChannels(callbackContext.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
delete(callbackMap, callbackNumber)
|
||||
|
|
619
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
619
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
|
@ -1,18 +1,24 @@
|
|||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/cow"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// currentContainerStarts is used to limit the number of concurrent container
|
||||
|
@ -38,54 +44,37 @@ func init() {
|
|||
|
||||
type System struct {
|
||||
handleLock sync.RWMutex
|
||||
handle hcsSystem
|
||||
handle vmcompute.HcsSystem
|
||||
id string
|
||||
callbackNumber uintptr
|
||||
|
||||
logctx logrus.Fields
|
||||
|
||||
closedWaitOnce sync.Once
|
||||
waitBlock chan struct{}
|
||||
waitError error
|
||||
exitError error
|
||||
|
||||
os, typ string
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
return &System{
|
||||
id: id,
|
||||
logctx: logrus.Fields{
|
||||
logfields.ContainerID: id,
|
||||
},
|
||||
id: id,
|
||||
waitBlock: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationBegin(operation string) {
|
||||
logOperationBegin(
|
||||
computeSystem.logctx,
|
||||
operation+" - Begin Operation")
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationEnd(operation string, err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
computeSystem.logctx,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcsshim::CreateComputeSystem"
|
||||
|
||||
// hcsCreateComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full create time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", id))
|
||||
|
||||
computeSystem := newSystem(id)
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
||||
if err != nil {
|
||||
|
@ -94,129 +83,114 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
|
|||
|
||||
hcsDocument := string(hcsDocumentB)
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, hcsDocument).
|
||||
Debug("HCS ComputeSystem Document")
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
identity syscall.Handle
|
||||
resultJSON string
|
||||
createError error
|
||||
)
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
|
||||
})
|
||||
|
||||
computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity)
|
||||
if createError == nil || IsPending(createError) {
|
||||
if err = computeSystem.registerCallback(); err != nil {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
computeSystem.Close()
|
||||
}
|
||||
}()
|
||||
if err = computeSystem.registerCallback(ctx); err != nil {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
computeSystem.Terminate()
|
||||
computeSystem.Terminate(ctx)
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
events, err := processAsyncHcsResult(createError, resultp, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
|
||||
events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
|
||||
if err != nil {
|
||||
if err == ErrTimeout {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
computeSystem.Terminate()
|
||||
computeSystem.Terminate(ctx)
|
||||
}
|
||||
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
|
||||
}
|
||||
|
||||
go computeSystem.waitBackground()
|
||||
|
||||
if err = computeSystem.getCachedProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
// OpenComputeSystem opens an existing compute system by ID.
|
||||
func OpenComputeSystem(id string) (_ *System, err error) {
|
||||
func OpenComputeSystem(ctx context.Context, id string) (*System, error) {
|
||||
operation := "hcsshim::OpenComputeSystem"
|
||||
|
||||
computeSystem := newSystem(id)
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsNotExist(err) {
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
handle hcsSystem
|
||||
resultp *uint16
|
||||
)
|
||||
err = hcsOpenComputeSystem(id, &handle, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
computeSystem.handle = handle
|
||||
|
||||
if err = computeSystem.registerCallback(); err != nil {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
computeSystem.Close()
|
||||
}
|
||||
}()
|
||||
if err = computeSystem.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go computeSystem.waitBackground()
|
||||
|
||||
if err = computeSystem.getCachedProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
||||
props, err := computeSystem.Properties(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||
// that ran Windows.
|
||||
computeSystem.os = "windows"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OS returns the operating system of the compute system, "linux" or "windows".
|
||||
func (computeSystem *System) OS() string {
|
||||
return computeSystem.os
|
||||
}
|
||||
|
||||
// IsOCI returns whether processes in the compute system should be created via
|
||||
// OCI.
|
||||
func (computeSystem *System) IsOCI() bool {
|
||||
return computeSystem.os == "linux" && computeSystem.typ == "container"
|
||||
}
|
||||
|
||||
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
||||
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
|
||||
func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
|
||||
operation := "hcsshim::GetComputeSystems"
|
||||
fields := logrus.Fields{}
|
||||
logOperationBegin(
|
||||
fields,
|
||||
operation+" - Begin Operation")
|
||||
|
||||
defer func() {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
fields,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
}()
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := string(queryb)
|
||||
|
||||
logrus.WithFields(fields).
|
||||
WithField(logfields.JSON, query).
|
||||
Debug("HCS ComputeSystem Query")
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
computeSystemsp *uint16
|
||||
)
|
||||
|
||||
syscallWatcher(fields, func() {
|
||||
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, &HcsError{Op: operation, Err: err, Events: events}
|
||||
}
|
||||
|
||||
if computeSystemsp == nil {
|
||||
if computeSystemsJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp)
|
||||
computeSystems := []schema1.ContainerProperties{}
|
||||
if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
|
||||
if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -224,16 +198,21 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
|
|||
}
|
||||
|
||||
// Start synchronously starts the computeSystem.
|
||||
func (computeSystem *System) Start() (err error) {
|
||||
func (computeSystem *System) Start(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Start"
|
||||
|
||||
// hcsStartComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full start time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Start"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
// This is a very simple backoff-retry loop to limit the number
|
||||
|
@ -262,13 +241,10 @@ func (computeSystem *System) Start() (err error) {
|
|||
}()
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
|
||||
resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Start", "", err, events)
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -279,273 +255,257 @@ func (computeSystem *System) ID() string {
|
|||
return computeSystem.id
|
||||
}
|
||||
|
||||
// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (computeSystem *System) Shutdown() (err error) {
|
||||
// Shutdown requests a compute system shutdown.
|
||||
func (computeSystem *System) Shutdown(ctx context.Context) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Shutdown"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsAlreadyClosed(err) || IsAlreadyStopped(err) || IsPending(err) {
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
operation := "hcsshim::System::Shutdown"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Shutdown", "", err, events)
|
||||
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Terminate requests a compute system terminate, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (computeSystem *System) Terminate() (err error) {
|
||||
// Terminate requests a compute system terminate.
|
||||
func (computeSystem *System) Terminate(ctx context.Context) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Terminate"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsAlreadyClosed(err) || IsAlreadyStopped(err) || IsPending(err) {
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
operation := "hcsshim::System::Terminate"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil && err != ErrVmcomputeAlreadyStopped {
|
||||
return makeSystemError(computeSystem, "Terminate", "", err, events)
|
||||
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitBackground waits for the compute system exit notification. Once received
|
||||
// sets `computeSystem.waitError` (if any) and unblocks all `Wait`,
|
||||
// `WaitExpectedError`, and `WaitTimeout` calls.
|
||||
// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls.
|
||||
//
|
||||
// This MUST be called exactly once per `computeSystem.handle` but `Wait`,
|
||||
// `WaitExpectedError`, and `WaitTimeout` are safe to call multiple times.
|
||||
// This MUST be called exactly once per `computeSystem.handle` but `Wait` is
|
||||
// safe to call multiple times.
|
||||
func (computeSystem *System) waitBackground() {
|
||||
computeSystem.waitError = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
operation := "hcsshim::System::waitBackground"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
switch err {
|
||||
case nil:
|
||||
log.G(ctx).Debug("system exited")
|
||||
case ErrVmcomputeUnexpectedExit:
|
||||
log.G(ctx).Debug("unexpected system exit")
|
||||
computeSystem.exitError = makeSystemError(computeSystem, operation, "", err, nil)
|
||||
err = nil
|
||||
default:
|
||||
err = makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
computeSystem.closedWaitOnce.Do(func() {
|
||||
computeSystem.waitError = err
|
||||
close(computeSystem.waitBlock)
|
||||
})
|
||||
oc.SetSpanStatus(span, err)
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the compute system to shutdown or terminate. If
|
||||
// the compute system has already exited returns the previous error (if any).
|
||||
func (computeSystem *System) Wait() (err error) {
|
||||
operation := "hcsshim::ComputeSystem::Wait"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
func (computeSystem *System) Wait() error {
|
||||
<-computeSystem.waitBlock
|
||||
if computeSystem.waitError != nil {
|
||||
return makeSystemError(computeSystem, "Wait", "", computeSystem.waitError, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
return computeSystem.waitError
|
||||
}
|
||||
|
||||
// WaitExpectedError synchronously waits for the compute system to shutdown or
|
||||
// terminate and returns the error (if any) as long as it does not match
|
||||
// `expected`. If the compute system has already exited returns the previous
|
||||
// error (if any) as long as it does not match `expected`.
|
||||
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitExpectedError"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
<-computeSystem.waitBlock
|
||||
if computeSystem.waitError != nil && getInnerError(computeSystem.waitError) != expected {
|
||||
return makeSystemError(computeSystem, "WaitExpectedError", "", computeSystem.waitError, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the compute system to terminate or the
|
||||
// duration to elapse. If the timeout expires, `IsTimeout(err) == true`. If
|
||||
// the compute system has already exited returns the previous error (if any).
|
||||
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitTimeout"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
// ExitError returns an error describing the reason the compute system terminated.
|
||||
func (computeSystem *System) ExitError() error {
|
||||
select {
|
||||
case <-computeSystem.waitBlock:
|
||||
if computeSystem.waitError != nil {
|
||||
return makeSystemError(computeSystem, "WaitTimeout", "", computeSystem.waitError, nil)
|
||||
return computeSystem.waitError
|
||||
}
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
return makeSystemError(computeSystem, "WaitTimeout", "", ErrTimeout, nil)
|
||||
return computeSystem.exitError
|
||||
default:
|
||||
return errors.New("container not exited")
|
||||
}
|
||||
}
|
||||
|
||||
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
|
||||
// Properties returns the requested container properties targeting a V1 schema container.
|
||||
func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Properties"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
operation := "hcsshim::System::Properties"
|
||||
|
||||
queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
queryString := string(queryBytes)
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, queryString).
|
||||
Debug("HCS ComputeSystem Properties Query")
|
||||
|
||||
var resultp, propertiesp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryString), &propertiesp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
if propertiesp == nil {
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
|
||||
properties := &schema1.ContainerProperties{}
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::System::PropertiesV2"
|
||||
|
||||
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
properties := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Pause() (err error) {
|
||||
func (computeSystem *System) Pause(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Pause"
|
||||
|
||||
// hcsPauseComputeSystemContext is an async peration. Start the outer span
|
||||
// here to measure the full pause time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Pause"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
|
||||
resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Pause", "", err, events)
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Resume() (err error) {
|
||||
func (computeSystem *System) Resume(ctx context.Context) (err error) {
|
||||
operation := "hcsshim::System::Resume"
|
||||
|
||||
// hcsResumeComputeSystemContext is an async operation. Start the outer span
|
||||
// here to measure the full restore time.
|
||||
ctx, span := trace.StartSpan(ctx, operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Resume"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
|
||||
resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "")
|
||||
events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Resume", "", err, events)
|
||||
return makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the computeSystem.
|
||||
func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) {
|
||||
func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::CreateProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", "", ErrAlreadyClosed, nil)
|
||||
return nil, nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
|
||||
return nil, nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, configuration).
|
||||
Debug("HCS ComputeSystem Process Document")
|
||||
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
|
||||
return nil, nil, makeSystemError(computeSystem, operation, configuration, err, events)
|
||||
}
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.ProcessID, processInfo.ProcessId).
|
||||
Debug("HCS ComputeSystem CreateProcess PID")
|
||||
log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid")
|
||||
return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil
|
||||
}
|
||||
|
||||
process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem)
|
||||
process.cachedPipes = &cachedPipes{
|
||||
stdIn: processInfo.StdInput,
|
||||
stdOut: processInfo.StdOutput,
|
||||
stdErr: processInfo.StdError,
|
||||
// CreateProcess launches a new process within the computeSystem.
|
||||
func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) {
|
||||
operation := "hcsshim::System::CreateProcess"
|
||||
process, processInfo, err := computeSystem.createProcess(ctx, operation, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
process.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err = process.registerCallback(); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
|
||||
pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
process.stdin = pipes[0]
|
||||
process.stdout = pipes[1]
|
||||
process.stderr = pipes[2]
|
||||
process.hasCachedStdio = true
|
||||
|
||||
if err = process.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go process.waitBackground()
|
||||
|
||||
|
@ -553,38 +513,25 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
|
|||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the computeSystem.
|
||||
func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
||||
func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
// Add PID for the context of this operation
|
||||
computeSystem.logctx[logfields.ProcessID] = pid
|
||||
defer delete(computeSystem.logctx, logfields.ProcessID)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::OpenProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
var (
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
)
|
||||
operation := "hcsshim::System::OpenProcess"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
|
||||
return nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
}
|
||||
|
||||
process := newProcess(processHandle, pid, computeSystem)
|
||||
if err = process.registerCallback(); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
|
||||
if err = process.registerCallback(ctx); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
go process.waitBackground()
|
||||
|
||||
|
@ -593,39 +540,40 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
|||
|
||||
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
|
||||
func (computeSystem *System) Close() (err error) {
|
||||
operation := "hcsshim::System::Close"
|
||||
ctx, span := trace.StartSpan(context.Background(), operation)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
computeSystem.handleLock.Lock()
|
||||
defer computeSystem.handleLock.Unlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Close"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
// Don't double free this
|
||||
if computeSystem.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = computeSystem.unregisterCallback(); err != nil {
|
||||
return makeSystemError(computeSystem, "Close", "", err, nil)
|
||||
if err = computeSystem.unregisterCallback(ctx); err != nil {
|
||||
return makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsCloseComputeSystem(computeSystem.handle)
|
||||
})
|
||||
err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Close", "", err, nil)
|
||||
return makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
computeSystem.handle = 0
|
||||
computeSystem.closedWaitOnce.Do(func() {
|
||||
computeSystem.waitError = ErrAlreadyClosed
|
||||
close(computeSystem.waitBlock)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) registerCallback() error {
|
||||
context := ¬ifcationWatcherContext{
|
||||
func (computeSystem *System) registerCallback(ctx context.Context) error {
|
||||
callbackContext := ¬ifcationWatcherContext{
|
||||
channels: newSystemChannels(),
|
||||
systemID: computeSystem.id,
|
||||
}
|
||||
|
@ -633,32 +581,31 @@ func (computeSystem *System) registerCallback() error {
|
|||
callbackMapLock.Lock()
|
||||
callbackNumber := nextCallback
|
||||
nextCallback++
|
||||
callbackMap[callbackNumber] = context
|
||||
callbackMap[callbackNumber] = callbackContext
|
||||
callbackMapLock.Unlock()
|
||||
|
||||
var callbackHandle hcsCallback
|
||||
err := hcsRegisterComputeSystemCallback(computeSystem.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
|
||||
callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
context.handle = callbackHandle
|
||||
callbackContext.handle = callbackHandle
|
||||
computeSystem.callbackNumber = callbackNumber
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) unregisterCallback() error {
|
||||
func (computeSystem *System) unregisterCallback(ctx context.Context) error {
|
||||
callbackNumber := computeSystem.callbackNumber
|
||||
|
||||
callbackMapLock.RLock()
|
||||
context := callbackMap[callbackNumber]
|
||||
callbackContext := callbackMap[callbackNumber]
|
||||
callbackMapLock.RUnlock()
|
||||
|
||||
if context == nil {
|
||||
if callbackContext == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
handle := context.handle
|
||||
handle := callbackContext.handle
|
||||
|
||||
if handle == 0 {
|
||||
return nil
|
||||
|
@ -666,12 +613,12 @@ func (computeSystem *System) unregisterCallback() error {
|
|||
|
||||
// hcsUnregisterComputeSystemCallback has its own syncronization
|
||||
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
|
||||
err := hcsUnregisterComputeSystemCallback(handle)
|
||||
err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeChannels(context.channels)
|
||||
closeChannels(callbackContext.channels)
|
||||
|
||||
callbackMapLock.Lock()
|
||||
delete(callbackMap, callbackNumber)
|
||||
|
@ -683,36 +630,26 @@ func (computeSystem *System) unregisterCallback() error {
|
|||
}
|
||||
|
||||
// Modify the System by sending a request to HCS
|
||||
func (computeSystem *System) Modify(config interface{}) (err error) {
|
||||
func (computeSystem *System) Modify(ctx context.Context, config interface{}) error {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Modify"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
operation := "hcsshim::System::Modify"
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
|
||||
return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(config)
|
||||
requestBytes, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestString := string(requestJSON)
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, requestString).
|
||||
Debug("HCS ComputeSystem Modify Document")
|
||||
|
||||
var resultp *uint16
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
requestJSON := string(requestBytes)
|
||||
resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON)
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Modify", requestString, err, events)
|
||||
return makeSystemError(computeSystem, operation, requestJSON, err, events)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
|
@ -1,25 +1,26 @@
|
|||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
)
|
||||
|
||||
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
|
||||
events := processHcsResult(resultp)
|
||||
func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if IsPending(err) {
|
||||
return nil, waitForNotification(callbackNumber, expectedNotification, timeout)
|
||||
return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout)
|
||||
}
|
||||
|
||||
return events, err
|
||||
}
|
||||
|
||||
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||
func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||
callbackMapLock.RLock()
|
||||
if _, ok := callbackMap[callbackNumber]; !ok {
|
||||
callbackMapLock.RUnlock()
|
||||
logrus.Errorf("failed to waitForNotification: callbackNumber %d does not exist in callbackMap", callbackNumber)
|
||||
log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap")
|
||||
return ErrHandleClose
|
||||
}
|
||||
channels := callbackMap[callbackNumber].channels
|
||||
|
@ -27,7 +28,7 @@ func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotific
|
|||
|
||||
expectedChannel := channels[expectedNotification]
|
||||
if expectedChannel == nil {
|
||||
logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
|
||||
log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification")
|
||||
return ErrInvalidNotificationType
|
||||
}
|
||||
|
||||
|
|
41
vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
generated
vendored
41
vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// syscallWatcher is used as a very simple goroutine around calls into
|
||||
// the platform. In some cases, we have seen HCS APIs not returning due to
|
||||
// various bugs, and the goroutine making the syscall ends up not returning,
|
||||
// prior to its async callback. By spinning up a syscallWatcher, it allows
|
||||
// us to at least log a warning if a syscall doesn't complete in a reasonable
|
||||
// amount of time.
|
||||
//
|
||||
// Usage is:
|
||||
//
|
||||
// syscallWatcher(logContext, func() {
|
||||
// err = <syscall>(args...)
|
||||
// })
|
||||
//
|
||||
|
||||
func syscallWatcher(logContext logrus.Fields, syscallLambda func()) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher)
|
||||
defer cancel()
|
||||
go watchFunc(ctx, logContext)
|
||||
syscallLambda()
|
||||
}
|
||||
|
||||
func watchFunc(ctx context.Context, logContext logrus.Fields) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() != context.Canceled {
|
||||
logrus.WithFields(logContext).
|
||||
WithField(logfields.Timeout, timeout.SyscallWatcher).
|
||||
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
|
||||
}
|
||||
}
|
||||
}
|
22
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
|
@ -3,6 +3,7 @@ package hns
|
|||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -94,6 +95,27 @@ func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
|
|||
return nil, EndpointNotFoundError{EndpointName: endpointName}
|
||||
}
|
||||
|
||||
type endpointAttachInfo struct {
|
||||
SharedContainers json.RawMessage `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) {
|
||||
attachInfo := endpointAttachInfo{}
|
||||
err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo)
|
||||
|
||||
// Return false allows us to just return the err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
||||
}
|
||||
|
||||
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
|
||||
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
|
||||
operation := "Create"
|
||||
|
|
15
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
15
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
|
@ -9,23 +9,30 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func hnsCall(method, path, request string, returnResponse interface{}) error {
|
||||
func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) {
|
||||
var responseBuffer *uint16
|
||||
logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
|
||||
|
||||
err := _hnsCall(method, path, request, &responseBuffer)
|
||||
if err != nil {
|
||||
return hcserror.New(err, "hnsCall ", "")
|
||||
return nil, hcserror.New(err, "hnsCall ", "")
|
||||
}
|
||||
response := interop.ConvertAndFreeCoTaskMemString(responseBuffer)
|
||||
|
||||
hnsresponse := &hnsResponse{}
|
||||
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return hnsresponse, nil
|
||||
}
|
||||
|
||||
func hnsCall(method, path, request string, returnResponse interface{}) error {
|
||||
hnsresponse, err := hnsCallRawResponse(method, path, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed during hnsCallRawResponse: %v", err)
|
||||
}
|
||||
if !hnsresponse.Success {
|
||||
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
|
||||
return fmt.Errorf("hns failed with error : %s", hnsresponse.Error)
|
||||
}
|
||||
|
||||
if len(hnsresponse.Output) == 0 {
|
||||
|
|
5
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
|
@ -55,8 +55,9 @@ type PaPolicy struct {
|
|||
|
||||
type OutboundNatPolicy struct {
|
||||
Policy
|
||||
VIP string `json:"VIP,omitempty"`
|
||||
Exceptions []string `json:"ExceptionList,omitempty"`
|
||||
VIP string `json:"VIP,omitempty"`
|
||||
Exceptions []string `json:"ExceptionList,omitempty"`
|
||||
Destinations []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ActionType string
|
||||
|
|
4
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
|
@ -15,10 +15,6 @@ func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
|
|||
return str
|
||||
}
|
||||
|
||||
func ConvertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
|
||||
return []byte(ConvertAndFreeCoTaskMemString(buffer))
|
||||
}
|
||||
|
||||
func Win32FromHresult(hr uintptr) syscall.Errno {
|
||||
if hr&0x1fff0000 == 0x00070000 {
|
||||
return syscall.Errno(hr & 0xffff)
|
||||
|
|
23
vendor/github.com/Microsoft/hcsshim/internal/log/g.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/internal/log/g.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx`
|
||||
// contains an OpenCensus `trace.Span`.
|
||||
func G(ctx context.Context) *logrus.Entry {
|
||||
span := trace.FromContext(ctx)
|
||||
if span != nil {
|
||||
sctx := span.SpanContext()
|
||||
return logrus.WithFields(logrus.Fields{
|
||||
"traceID": sctx.TraceID.String(),
|
||||
"spanID": sctx.SpanID.String(),
|
||||
// "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this?
|
||||
})
|
||||
}
|
||||
return logrus.NewEntry(logrus.StandardLogger())
|
||||
}
|
43
vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
generated
vendored
Normal file
43
vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
package oc
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var _ = (trace.Exporter)(&LogrusExporter{})
|
||||
|
||||
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
|
||||
// `trace.SpanData` to logrus output.
|
||||
type LogrusExporter struct {
|
||||
}
|
||||
|
||||
// ExportSpan exports `s` based on the the following rules:
|
||||
//
|
||||
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
|
||||
// `s.ParentSpanID` for correlation
|
||||
//
|
||||
// 2. Any calls to .Annotate will not be supported.
|
||||
//
|
||||
// 3. The span itself will be written at `logrus.InfoLevel` unless
|
||||
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
|
||||
// providing `s.Status.Message` as the error value.
|
||||
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
|
||||
// Combine all span annotations with traceID, spanID, parentSpanID
|
||||
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
|
||||
baseEntry.Data["traceID"] = s.TraceID.String()
|
||||
baseEntry.Data["spanID"] = s.SpanID.String()
|
||||
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
|
||||
baseEntry.Data["startTime"] = s.StartTime
|
||||
baseEntry.Data["endTime"] = s.EndTime
|
||||
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
|
||||
baseEntry.Data["name"] = s.Name
|
||||
baseEntry.Time = s.StartTime
|
||||
|
||||
level := logrus.InfoLevel
|
||||
if s.Status.Code != 0 {
|
||||
level = logrus.ErrorLevel
|
||||
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
|
||||
}
|
||||
baseEntry.Log(level, "Span")
|
||||
}
|
17
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
package oc
|
||||
|
||||
import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If
|
||||
// `err` is `nil` assumes `trace.StatusCodeOk`.
|
||||
func SetSpanStatus(span *trace.Span, err error) {
|
||||
status := trace.Status{}
|
||||
if err != nil {
|
||||
// TODO: JTERRY75 - Handle errors in a non-generic way
|
||||
status.Code = trace.StatusCodeUnknown
|
||||
status.Message = err.Error()
|
||||
}
|
||||
span.SetStatus(status)
|
||||
}
|
9
vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go
generated
vendored
|
@ -4,7 +4,8 @@ import (
|
|||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
)
|
||||
|
||||
// ProcessConfig is used as both the input of Container.CreateProcess
|
||||
|
@ -62,7 +63,7 @@ type MappedVirtualDisk struct {
|
|||
CreateInUtilityVM bool `json:",omitempty"`
|
||||
ReadOnly bool `json:",omitempty"`
|
||||
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
|
||||
AttachOnly bool `json:",omitempty:`
|
||||
AttachOnly bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AssignedDevice represents a device that has been directly assigned to a container
|
||||
|
@ -133,9 +134,10 @@ type ContainerProperties struct {
|
|||
State string
|
||||
Name string
|
||||
SystemType string
|
||||
RuntimeOSType string `json:"RuntimeOsType,omitempty"`
|
||||
Owner string
|
||||
SiloGUID string `json:"SiloGuid,omitempty"`
|
||||
RuntimeID string `json:"RuntimeId,omitempty"`
|
||||
RuntimeID guid.GUID `json:"RuntimeId,omitempty"`
|
||||
IsRuntimeTemplate bool `json:",omitempty"`
|
||||
RuntimeImagePath string `json:",omitempty"`
|
||||
Stopped bool `json:",omitempty"`
|
||||
|
@ -214,6 +216,7 @@ type MappedVirtualDiskController struct {
|
|||
type GuestDefinedCapabilities struct {
|
||||
NamespaceAddRequestSupported bool `json:",omitempty"`
|
||||
SignalProcessSupported bool `json:",omitempty"`
|
||||
DumpStacksSupported bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Attachment struct {
|
||||
|
||||
Type_ string `json:"Type,omitempty"`
|
||||
|
||||
Path string `json:"Path,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type CacheQueryStatsResponse struct {
|
||||
|
||||
L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"`
|
||||
|
||||
L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type CloseHandle struct {
|
||||
|
||||
Handle string `json:"Handle,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port.
|
||||
type ComPort struct {
|
||||
|
||||
NamedPipe string `json:"NamedPipe,omitempty"`
|
||||
|
||||
OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"`
|
||||
|
|
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go
generated
vendored
|
@ -10,14 +10,13 @@
|
|||
package hcsschema
|
||||
|
||||
type ComputeSystem struct {
|
||||
|
||||
Owner string `json:"Owner,omitempty"`
|
||||
|
||||
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
|
||||
|
||||
HostingSystemId string `json:"HostingSystemId,omitempty"`
|
||||
|
||||
HostedSystem *HostedSystem `json:"HostedSystem,omitempty"`
|
||||
HostedSystem interface{} `json:"HostedSystem,omitempty"`
|
||||
|
||||
Container *Container `json:"Container,omitempty"`
|
||||
|
||||
|
|
32
vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go
generated
vendored
32
vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go
generated
vendored
|
@ -25,37 +25,37 @@ func (c contextKey) String() string {
|
|||
|
||||
var (
|
||||
// ContextOAuth2 takes a oauth2.TokenSource as authentication for the request.
|
||||
ContextOAuth2 = contextKey("token")
|
||||
ContextOAuth2 = contextKey("token")
|
||||
|
||||
// ContextBasicAuth takes BasicAuth as authentication for the request.
|
||||
ContextBasicAuth = contextKey("basic")
|
||||
ContextBasicAuth = contextKey("basic")
|
||||
|
||||
// ContextAccessToken takes a string oauth2 access token as authentication for the request.
|
||||
ContextAccessToken = contextKey("accesstoken")
|
||||
ContextAccessToken = contextKey("accesstoken")
|
||||
|
||||
// ContextAPIKey takes an APIKey as authentication for the request
|
||||
ContextAPIKey = contextKey("apikey")
|
||||
ContextAPIKey = contextKey("apikey")
|
||||
)
|
||||
|
||||
// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth
|
||||
// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth
|
||||
type BasicAuth struct {
|
||||
UserName string `json:"userName,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
UserName string `json:"userName,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// APIKey provides API key based authentication to a request passed via context using ContextAPIKey
|
||||
type APIKey struct {
|
||||
Key string
|
||||
Prefix string
|
||||
Key string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
BasePath string `json:"basePath,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Scheme string `json:"scheme,omitempty"`
|
||||
DefaultHeader map[string]string `json:"defaultHeader,omitempty"`
|
||||
UserAgent string `json:"userAgent,omitempty"`
|
||||
HTTPClient *http.Client
|
||||
BasePath string `json:"basePath,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Scheme string `json:"scheme,omitempty"`
|
||||
DefaultHeader map[string]string `json:"defaultHeader,omitempty"`
|
||||
UserAgent string `json:"userAgent,omitempty"`
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
func NewConfiguration() *Configuration {
|
||||
|
@ -69,4 +69,4 @@ func NewConfiguration() *Configuration {
|
|||
|
||||
func (c *Configuration) AddDefaultHeader(key string, value string) {
|
||||
c.DefaultHeader[key] = value
|
||||
}
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type ConsoleSize struct {
|
||||
|
||||
Height int32 `json:"Height,omitempty"`
|
||||
|
||||
Width int32 `json:"Width,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Container struct {
|
||||
|
||||
GuestOs *GuestOs `json:"GuestOs,omitempty"`
|
||||
|
||||
Storage *Storage `json:"Storage,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// memory usage as viewed from within the container
|
||||
type ContainerMemoryInformation struct {
|
||||
|
||||
TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"`
|
||||
|
||||
TotalUsage int32 `json:"TotalUsage,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Devices struct {
|
||||
|
||||
ComPorts map[string]ComPort `json:"ComPorts,omitempty"`
|
||||
|
||||
Scsi map[string]Scsi `json:"Scsi,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type EnhancedModeVideo struct {
|
||||
|
||||
ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type FlexibleIoDevice struct {
|
||||
|
||||
EmulatorId string `json:"EmulatorId,omitempty"`
|
||||
|
||||
HostingModel string `json:"HostingModel,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type GuestCrashReporting struct {
|
||||
|
||||
WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type GuestOs struct {
|
||||
|
||||
HostName string `json:"HostName,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type HostedSystem struct {
|
||||
|
||||
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
|
||||
|
||||
Container *Container `json:"Container,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type HvSocket struct {
|
||||
|
||||
Config *HvSocketSystemConfig `json:"Config,omitempty"`
|
||||
|
||||
EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go
generated
vendored
|
@ -11,6 +11,5 @@ package hcsschema
|
|||
|
||||
// HvSocket configuration for a VM
|
||||
type HvSocket2 struct {
|
||||
|
||||
HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Layer struct {
|
||||
|
||||
Id string `json:"Id,omitempty"`
|
||||
|
||||
Path string `json:"Path,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type MappedDirectory struct {
|
||||
|
||||
HostPath string `json:"HostPath,omitempty"`
|
||||
|
||||
HostPathType string `json:"HostPathType,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type MappedPipe struct {
|
||||
|
||||
ContainerPipeName string `json:"ContainerPipeName,omitempty"`
|
||||
|
||||
HostPath string `json:"HostPath,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type Memory struct {
|
||||
|
||||
SizeInMB int32 `json:"SizeInMB,omitempty"`
|
||||
}
|
||||
|
|
5
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go
generated
vendored
|
@ -22,4 +22,9 @@ type Memory2 struct {
|
|||
|
||||
// EnableDeferredCommit is private in the schema. If regenerated need to add back.
|
||||
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
|
||||
|
||||
// EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed
|
||||
// to the VM, allowing it to trim non-zeroed pages from the working set (if supported by
|
||||
// the guest operating system).
|
||||
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
|
||||
}
|
||||
|
|
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go
generated
vendored
|
@ -10,8 +10,7 @@
|
|||
package hcsschema
|
||||
|
||||
type MemoryInformationForVm struct {
|
||||
|
||||
VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"`
|
||||
VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"`
|
||||
|
||||
VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"`
|
||||
|
||||
|
|
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go
generated
vendored
|
@ -11,10 +11,9 @@ package hcsschema
|
|||
|
||||
// Memory runtime statistics
|
||||
type MemoryStats struct {
|
||||
MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
|
||||
|
||||
MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"`
|
||||
MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
|
||||
|
||||
MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"`
|
||||
|
||||
MemoryUsagePrivateWorkingSetBytes int32 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
|
||||
MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type NetworkAdapter struct {
|
||||
|
||||
EndpointId string `json:"EndpointId,omitempty"`
|
||||
|
||||
MacAddress string `json:"MacAddress,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Networking struct {
|
||||
|
||||
AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"`
|
||||
|
||||
DnsSearchList string `json:"DnsSearchList,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go
generated
vendored
|
@ -11,6 +11,5 @@ package hcsschema
|
|||
|
||||
// Notification data that is indicated to components running in the Virtual Machine.
|
||||
type PauseNotification struct {
|
||||
|
||||
Reason string `json:"Reason,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// Options for HcsPauseComputeSystem
|
||||
type PauseOptions struct {
|
||||
|
||||
SuspensionLevel string `json:"SuspensionLevel,omitempty"`
|
||||
|
||||
HostedNotification *PauseNotification `json:"HostedNotification,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type Plan9 struct {
|
||||
|
||||
Shares []Plan9Share `json:"Shares,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go
generated
vendored
|
@ -15,7 +15,6 @@ import (
|
|||
|
||||
// Information about a process running in a container
|
||||
type ProcessDetails struct {
|
||||
|
||||
ProcessId int32 `json:"ProcessId,omitempty"`
|
||||
|
||||
ImageName string `json:"ImageName,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// Passed to HcsRpc_ModifyProcess
|
||||
type ProcessModifyRequest struct {
|
||||
|
||||
Operation string `json:"Operation,omitempty"`
|
||||
|
||||
ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type ProcessParameters struct {
|
||||
|
||||
ApplicationName string `json:"ApplicationName,omitempty"`
|
||||
|
||||
CommandLine string `json:"CommandLine,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// Status of a process running in a container
|
||||
type ProcessStatus struct {
|
||||
|
||||
ProcessId int32 `json:"ProcessId,omitempty"`
|
||||
|
||||
Exited bool `json:"Exited,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Processor struct {
|
||||
|
||||
Count int32 `json:"Count,omitempty"`
|
||||
|
||||
Maximum int32 `json:"Maximum,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Processor2 struct {
|
||||
|
||||
Count int32 `json:"Count,omitempty"`
|
||||
|
||||
Limit int32 `json:"Limit,omitempty"`
|
||||
|
|
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go
generated
vendored
|
@ -11,10 +11,9 @@ package hcsschema
|
|||
|
||||
// CPU runtime statistics
|
||||
type ProcessorStats struct {
|
||||
TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"`
|
||||
|
||||
TotalRuntime100ns int32 `json:"TotalRuntime100ns,omitempty"`
|
||||
RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"`
|
||||
|
||||
RuntimeUser100ns int32 `json:"RuntimeUser100ns,omitempty"`
|
||||
|
||||
RuntimeKernel100ns int32 `json:"RuntimeKernel100ns,omitempty"`
|
||||
RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"`
|
||||
}
|
||||
|
|
9
vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go
generated
vendored
|
@ -9,8 +9,11 @@
|
|||
|
||||
package hcsschema
|
||||
|
||||
type Properties struct {
|
||||
import (
|
||||
v1 "github.com/containerd/cgroups/stats/v1"
|
||||
)
|
||||
|
||||
type Properties struct {
|
||||
Id string `json:"Id,omitempty"`
|
||||
|
||||
SystemType string `json:"SystemType,omitempty"`
|
||||
|
@ -44,4 +47,8 @@ type Properties struct {
|
|||
SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"`
|
||||
|
||||
GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"`
|
||||
|
||||
// Metrics is not part of the API for HCS but this is used for LCOW v2 to
|
||||
// return the full cgroup metrics from the guest.
|
||||
Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"`
|
||||
}
|
||||
|
|
5
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go
generated
vendored
|
@ -9,8 +9,7 @@
|
|||
|
||||
package hcsschema
|
||||
|
||||
// By default the basic properties will be returned. This query provides a way to request specific properties.
|
||||
// By default the basic properties will be returned. This query provides a way to request specific properties.
|
||||
type PropertyQuery struct {
|
||||
|
||||
PropertyTypes []string `json:"PropertyTypes,omitempty"`
|
||||
PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"`
|
||||
}
|
||||
|
|
23
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.1
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type PropertyType string
|
||||
|
||||
const (
|
||||
PTMemory PropertyType = "Memory"
|
||||
PTGuestMemory PropertyType = "GuestMemory"
|
||||
PTStatistics PropertyType = "Statistics"
|
||||
PTProcessList PropertyType = "ProcessList"
|
||||
PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed"
|
||||
PTSharedMemoryRegion PropertyType = "SharedMemoryRegion"
|
||||
PTGuestConnection PropertyType = "GuestConnection"
|
||||
PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus"
|
||||
)
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type RdpConnectionOptions struct {
|
||||
|
||||
AccessSids []string `json:"AccessSids,omitempty"`
|
||||
|
||||
NamedPipe string `json:"NamedPipe,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type RegistryChanges struct {
|
||||
|
||||
AddValues []RegistryValue `json:"AddValues,omitempty"`
|
||||
|
||||
DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type RegistryKey struct {
|
||||
|
||||
Hive string `json:"Hive,omitempty"`
|
||||
|
||||
Name string `json:"Name,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type RegistryValue struct {
|
||||
|
||||
Key *RegistryKey `json:"Key,omitempty"`
|
||||
|
||||
Name string `json:"Name,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go
generated
vendored
|
@ -10,6 +10,5 @@
|
|||
package hcsschema
|
||||
|
||||
type SharedMemoryConfiguration struct {
|
||||
|
||||
Regions []SharedMemoryRegion `json:"Regions,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type SharedMemoryRegion struct {
|
||||
|
||||
SectionName string `json:"SectionName,omitempty"`
|
||||
|
||||
StartOffset int32 `json:"StartOffset,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type SharedMemoryRegionInfo struct {
|
||||
|
||||
SectionName string `json:"SectionName,omitempty"`
|
||||
|
||||
GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go
generated
vendored
|
@ -11,7 +11,6 @@ package hcsschema
|
|||
|
||||
// Silo job information
|
||||
type SiloProperties struct {
|
||||
|
||||
Enabled bool `json:"Enabled,omitempty"`
|
||||
|
||||
JobName string `json:"JobName,omitempty"`
|
||||
|
|
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go
generated
vendored
|
@ -15,12 +15,11 @@ import (
|
|||
|
||||
// Runtime statistics for a container
|
||||
type Statistics struct {
|
||||
|
||||
Timestamp time.Time `json:"Timestamp,omitempty"`
|
||||
|
||||
ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"`
|
||||
|
||||
Uptime100ns int32 `json:"Uptime100ns,omitempty"`
|
||||
Uptime100ns uint64 `json:"Uptime100ns,omitempty"`
|
||||
|
||||
Processor *ProcessorStats `json:"Processor,omitempty"`
|
||||
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type StorageQoS struct {
|
||||
|
||||
IopsMaximum int32 `json:"IopsMaximum,omitempty"`
|
||||
|
||||
BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"`
|
||||
|
|
9
vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go
generated
vendored
|
@ -11,12 +11,11 @@ package hcsschema
|
|||
|
||||
// Storage runtime statistics
|
||||
type StorageStats struct {
|
||||
ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"`
|
||||
|
||||
ReadCountNormalized int32 `json:"ReadCountNormalized,omitempty"`
|
||||
ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"`
|
||||
|
||||
ReadSizeBytes int32 `json:"ReadSizeBytes,omitempty"`
|
||||
WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"`
|
||||
|
||||
WriteCountNormalized int32 `json:"WriteCountNormalized,omitempty"`
|
||||
|
||||
WriteSizeBytes int32 `json:"WriteSizeBytes,omitempty"`
|
||||
WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"`
|
||||
}
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Topology struct {
|
||||
|
||||
Memory *Memory2 `json:"Memory,omitempty"`
|
||||
|
||||
Processor *Processor2 `json:"Processor,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type Uefi struct {
|
||||
|
||||
EnableDebugger bool `json:"EnableDebugger,omitempty"`
|
||||
|
||||
SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"`
|
||||
|
|
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go
generated
vendored
|
@ -10,7 +10,6 @@
|
|||
package hcsschema
|
||||
|
||||
type UefiBootEntry struct {
|
||||
|
||||
DeviceType string `json:"DeviceType,omitempty"`
|
||||
|
||||
DevicePath string `json:"DevicePath,omitempty"`
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue