Merge pull request #47200 from thaJeztah/24.0_backport_test_fixes
[24.0 backport] assorted test- and CI fixes
This commit is contained in:
commit
7e8b823636
26 changed files with 191 additions and 164 deletions
2
.github/workflows/bin-image.yml
vendored
2
.github/workflows/bin-image.yml
vendored
|
@ -144,9 +144,9 @@ jobs:
|
|||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
needs:
|
||||
- build
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
steps:
|
||||
-
|
||||
name: Download meta bake definition
|
||||
|
|
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -10,8 +10,6 @@ on:
|
|||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
|
|
2
Makefile
2
Makefile
|
@ -4,7 +4,7 @@ DOCKER ?= docker
|
|||
BUILDX ?= $(DOCKER) buildx
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info -f {{ .Driver }} 2>&1))
|
||||
export DOCKER_GRAPHDRIVER
|
||||
|
||||
DOCKER_GITCOMMIT := $(shell git rev-parse HEAD)
|
||||
|
|
|
@ -10,6 +10,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestImageTagError(t *testing.T) {
|
||||
|
@ -36,15 +39,52 @@ func TestImageTagInvalidReference(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure we don't allow the use of invalid repository names or tags; these tag operations should fail.
|
||||
func TestImageTagInvalidSourceImageName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "client should not have made an API call")),
|
||||
}
|
||||
|
||||
err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag")
|
||||
if err == nil || err.Error() != `Error parsing reference: "invalid_source_image_name_" is not a valid repository/tag: invalid reference format` {
|
||||
t.Fatalf("expected Parsing Reference Error, got %v", err)
|
||||
invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "aa/asdf$$^/aa"}
|
||||
for _, repo := range invalidRepos {
|
||||
repo := repo
|
||||
t.Run("invalidRepo/"+repo, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox", repo)
|
||||
assert.Check(t, is.ErrorContains(err, "not a valid repository/tag"))
|
||||
})
|
||||
}
|
||||
|
||||
longTag := testutil.GenerateRandomAlphaOnlyString(121)
|
||||
invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
|
||||
for _, repotag := range invalidTags {
|
||||
repotag := repotag
|
||||
t.Run("invalidTag/"+repotag, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox", repotag)
|
||||
assert.Check(t, is.ErrorContains(err, "not a valid repository/tag"))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("test repository name begin with '-'", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox:latest", "-busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
})
|
||||
|
||||
t.Run("test namespace name begin with '-'", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox:latest", "-test/busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
})
|
||||
|
||||
t.Run("test index name begin with '-'", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox:latest", "-index:5000/busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestImageTagHexSource(t *testing.T) {
|
||||
|
|
|
@ -174,7 +174,6 @@ test_env() {
|
|||
DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \
|
||||
DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
|
||||
DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \
|
||||
DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \
|
||||
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
|
||||
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
|
||||
DOCKER_HOST="$DOCKER_HOST" \
|
||||
|
|
|
@ -57,4 +57,4 @@ source hack/make/.integration-test-helpers
|
|||
exec docker run --rm ${run_opts} --mount type=bind,"src=${ABS_DEST}","dst=/src/${DEST}" "${docker_py_image}" pytest ${PY_TEST_OPTIONS} tests/integration
|
||||
)
|
||||
bundle .integration-daemon-stop
|
||||
) 2>&1 | tee -a "$DEST/test.log"
|
||||
) &> >(tee -a "$DEST/test.log")
|
||||
|
|
|
@ -24,4 +24,4 @@ fi
|
|||
set -x
|
||||
exit ${testexit}
|
||||
|
||||
) 2>&1 | tee -a "$DEST/test.log"
|
||||
) &> >(tee -a "$DEST/test.log")
|
||||
|
|
|
@ -53,7 +53,6 @@ test_env() {
|
|||
DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \
|
||||
DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
|
||||
DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \
|
||||
DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \
|
||||
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
|
||||
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
|
||||
DOCKER_HOST="$DOCKER_HOST" \
|
||||
|
|
|
@ -576,6 +576,9 @@ func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
|
|||
s.daemonsLock.Lock()
|
||||
for _, d := range s.daemons {
|
||||
if d != nil {
|
||||
if c.Failed() {
|
||||
d.TailLogsT(c, 100)
|
||||
}
|
||||
d.Stop(c)
|
||||
d.Cleanup(c)
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func (s *DockerCLIEventSuite) TestEventsOOMDisableFalse(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIEventSuite) TestEventsOOMDisableTrue(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport, NotPpc64le)
|
||||
testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport, NotPpc64le)
|
||||
skip.If(c, GitHubActions, "FIXME: https://github.com/moby/moby/pull/36541")
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
|
|
|
@ -835,7 +835,6 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
|||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *testing.T) {
|
||||
testRequires(c, NotArm)
|
||||
hostsFile := "/etc/hosts"
|
||||
cstmBridgeNw := "custom-bridge-nw"
|
||||
cstmBridgeNw1 := "custom-bridge-nw1"
|
||||
|
@ -1137,7 +1136,6 @@ func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *testing.T) {
|
||||
testRequires(c, NotArm)
|
||||
dockerCmd(c, "network", "create", "test1")
|
||||
dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top")
|
||||
assert.Assert(c, waitRun("c1") == nil)
|
||||
|
@ -1158,7 +1156,6 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c
|
|||
// host port mapping to/from networks which do cause and do not cause
|
||||
// the container default gateway to change, and verify docker port cmd
|
||||
// returns congruent information
|
||||
testRequires(c, NotArm)
|
||||
cnt := "c1"
|
||||
dockerCmd(c, "network", "create", "aaa")
|
||||
dockerCmd(c, "network", "create", "ccc")
|
||||
|
@ -1416,7 +1413,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLINetworkSuite) TestUserDefinedNetworkConnectDisconnectLink(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "foo1")
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "foo2")
|
||||
|
||||
|
@ -1477,7 +1474,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
|
||||
defaults := []string{"bridge", "host", "none"}
|
||||
out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top")
|
||||
|
@ -1490,7 +1487,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(
|
|||
}
|
||||
|
||||
func (s *DockerCLINetworkSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "net1")
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "net2")
|
||||
|
||||
|
|
|
@ -283,7 +283,6 @@ func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) {
|
||||
testRequires(c, NotArm)
|
||||
pushDigest, err := setupImage(c)
|
||||
assert.NilError(c, err, "error setting up image")
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ func (s *DockerCLIRestartSuite) TestRestartWithVolumes(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRestartSuite) TestRestartDisconnectedContainer(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon, NotUserNamespace)
|
||||
|
||||
// Run a container on the default bridge network
|
||||
out, _ := dockerCmd(c, "run", "-d", "--name", "c0", "busybox", "top")
|
||||
|
@ -213,7 +213,7 @@ func (s *DockerCLIRestartSuite) TestRestartContainerSuccess(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRestartSuite) TestRestartWithPolicyUserDefinedNetwork(c *testing.T) {
|
||||
// TODO Windows. This may be portable following HNS integration post TP5.
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "udNet")
|
||||
|
||||
dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top")
|
||||
|
|
|
@ -81,8 +81,8 @@ func (s *DockerCLIRunSuite) TestRunLeakyFileDescriptors(c *testing.T) {
|
|||
// it should be possible to lookup Google DNS
|
||||
// this will fail when Internet access is unavailable
|
||||
func (s *DockerCLIRunSuite) TestRunLookupGoogleDNS(c *testing.T) {
|
||||
testRequires(c, Network, NotArm)
|
||||
if testEnv.OSType == "windows" {
|
||||
testRequires(c, Network)
|
||||
if testEnv.DaemonInfo.OSType == "windows" {
|
||||
// nslookup isn't present in Windows busybox. Is built-in. Further,
|
||||
// nslookup isn't present in nanoserver. Hence just use PowerShell...
|
||||
dockerCmd(c, "run", testEnv.PlatformDefaults.BaseImage, "powershell", "Resolve-DNSName", "google.com")
|
||||
|
@ -216,7 +216,7 @@ func (s *DockerCLIRunSuite) TestRunLinksContainerWithContainerID(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestUserDefinedNetworkLinks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
|
||||
|
||||
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
|
||||
|
@ -252,7 +252,7 @@ func (s *DockerCLIRunSuite) TestUserDefinedNetworkLinks(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestUserDefinedNetworkLinksWithRestart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
|
||||
|
||||
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
|
||||
|
@ -290,7 +290,7 @@ func (s *DockerCLIRunSuite) TestUserDefinedNetworkLinksWithRestart(c *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithNetAliasOnDefaultNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
|
||||
defaults := []string{"bridge", "host", "none"}
|
||||
for _, net := range defaults {
|
||||
|
@ -301,7 +301,7 @@ func (s *DockerCLIRunSuite) TestRunWithNetAliasOnDefaultNetworks(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestUserDefinedNetworkAlias(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "net1")
|
||||
|
||||
cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox:glibc", "top")
|
||||
|
@ -753,7 +753,7 @@ func (s *DockerCLIRunSuite) TestRunUserByID(c *testing.T) {
|
|||
func (s *DockerCLIRunSuite) TestRunUserByIDBig(c *testing.T) {
|
||||
// TODO Windows: This test cannot run on a Windows daemon as Windows does
|
||||
// not support the use of -u
|
||||
testRequires(c, DaemonIsLinux, NotArm)
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id")
|
||||
if err == nil {
|
||||
c.Fatal("No error, but must be.", out)
|
||||
|
@ -1115,7 +1115,7 @@ func (s *DockerCLIRunSuite) TestRunUnprivilegedCannotMount(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *testing.T) {
|
||||
// Not applicable for Windows as there is no concept of unprivileged
|
||||
testRequires(c, DaemonIsLinux, NotArm)
|
||||
testRequires(c, DaemonIsLinux)
|
||||
if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 {
|
||||
c.Fatal("sys should not be writable in a non privileged container")
|
||||
}
|
||||
|
@ -1123,7 +1123,7 @@ func (s *DockerCLIRunSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *te
|
|||
|
||||
func (s *DockerCLIRunSuite) TestRunSysWritableInPrivilegedContainers(c *testing.T) {
|
||||
// Not applicable for Windows as there is no concept of unprivileged
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 {
|
||||
c.Fatalf("sys should be writable in privileged container")
|
||||
}
|
||||
|
@ -1410,7 +1410,7 @@ func (s *DockerCLIRunSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *testing.T)
|
|||
// check if the container resolv.conf file has at least 0644 perm.
|
||||
func (s *DockerCLIRunSuite) TestRunNonRootUserResolvName(c *testing.T) {
|
||||
// Not applicable on Windows as Windows does not support --user
|
||||
testRequires(c, testEnv.IsLocalDaemon, Network, DaemonIsLinux, NotArm)
|
||||
testRequires(c, testEnv.IsLocalDaemon, Network, DaemonIsLinux)
|
||||
|
||||
dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "example.com")
|
||||
|
||||
|
@ -3436,7 +3436,7 @@ func (s *DockerCLIRunSuite) TestTwoContainersInNetHost(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestContainersInUserDefinedNetwork(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork")
|
||||
dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top")
|
||||
assert.Assert(c, waitRun("first") == nil)
|
||||
|
@ -3444,7 +3444,7 @@ func (s *DockerCLIRunSuite) TestContainersInUserDefinedNetwork(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestContainersInMultipleNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
// Create 2 networks using bridge driver
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
|
||||
|
@ -3463,7 +3463,7 @@ func (s *DockerCLIRunSuite) TestContainersInMultipleNetworks(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestContainersNetworkIsolation(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
// Create 2 networks using bridge driver
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
|
||||
|
@ -3508,7 +3508,7 @@ func (s *DockerCLIRunSuite) TestNetworkRmWithActiveContainers(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestContainerRestartInMultipleNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
// Create 2 networks using bridge driver
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
|
||||
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
|
||||
|
|
|
@ -81,7 +81,7 @@ func (s *DockerCLIRunSuite) TestRunWithVolumesIsRecursive(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunDeviceDirectory(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
if _, err := os.Stat("/dev/snd"); err != nil {
|
||||
c.Skip("Host does not have /dev/snd")
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ func (s *DockerCLIRunSuite) TestRunSysctls(c *testing.T) {
|
|||
|
||||
// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:bullseye-slim unshare' exits with operation not permitted.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyUnshare(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotArm, Apparmor)
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, Apparmor)
|
||||
jsonData := `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
"syscalls": [
|
||||
|
@ -955,7 +955,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyChmod(c *testing.T) {
|
|||
// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:bullseye-slim unshare --map-root-user --user sh -c whoami' with a specific profile to
|
||||
// deny unshare of a userns exits with operation not permitted.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyUnshareUserns(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotArm, Apparmor)
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, Apparmor)
|
||||
// from sched.h
|
||||
jsonData := fmt.Sprintf(`{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
|
@ -1344,7 +1344,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompWithDefaultProfile(c *testing.T) {
|
|||
|
||||
// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271)
|
||||
func (s *DockerCLIRunSuite) TestRunDeviceSymlink(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, testEnv.IsLocalDaemon)
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
if _, err := os.Stat("/dev/zero"); err != nil {
|
||||
c.Skip("Host does not have /dev/zero")
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
remoteipam "github.com/docker/docker/libnetwork/ipams/remote/api"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/moby/swarmkit/v2/ca/keyutils"
|
||||
"github.com/vishvananda/netlink"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -1223,7 +1224,10 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
|||
poll.WaitOn(c, pollCheck(c, d3.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
}
|
||||
|
||||
const swarmIsEncryptedMsg = "Swarm is encrypted and needs to be unlocked"
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
ctx := context.TODO()
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
outs, err := d.Cmd("swarm", "update", "--autolock")
|
||||
|
@ -1242,12 +1246,16 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
|||
d.RestartNode(c)
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result := icmd.RunCmd(cmd)
|
||||
unlock := func(d *daemon.Daemon, key string) *icmd.Result {
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = strings.NewReader(key)
|
||||
return icmd.RunCmd(cmd)
|
||||
}
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
result := unlock(d, unlockKey)
|
||||
if result.Error == nil {
|
||||
// On occasion, the daemon may not have finished
|
||||
// rotating the KEK before restarting. The test is
|
||||
|
@ -1257,13 +1265,16 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
|||
// restart again, the new key should be required this
|
||||
// time.
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
// Wait for the rotation to happen
|
||||
// Since there are multiple rotations, we need to wait until for the number of rotations we are currently on to be reflected in the logs
|
||||
// This is a little janky... its depending on specific log messages AND these are debug logs... but it is the best we can do for now.
|
||||
matcher := testdaemon.ScanLogsMatchCount(testdaemon.ScanLogsMatchString("successfully rotated KEK"), i+1)
|
||||
poll.WaitOn(c, d.PollCheckLogs(ctx, matcher), poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
d.Restart(c)
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result = icmd.RunCmd(cmd)
|
||||
result = unlock(d, unlockKey)
|
||||
}
|
||||
result.Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
|
@ -1271,28 +1282,20 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
|||
})
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(newUnlockKey)
|
||||
icmd.RunCmd(cmd).Assert(c, icmd.Success)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
unlock(d, newUnlockKey).Assert(c, icmd.Success)
|
||||
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateActive)
|
||||
|
||||
retry := 0
|
||||
for {
|
||||
checkNodeLs := func(t poll.LogT) poll.Result {
|
||||
// an issue sometimes prevents leader to be available right away
|
||||
outs, err = d.Cmd("node", "ls")
|
||||
if err != nil && retry < 5 {
|
||||
if strings.Contains(outs, "swarm does not have a leader") {
|
||||
retry++
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
out, err := d.Cmd("node", "ls")
|
||||
if err != nil {
|
||||
return poll.Continue("error running node ls: %v: %s", err, out)
|
||||
}
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, !strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
break
|
||||
return poll.Success()
|
||||
}
|
||||
poll.WaitOn(c, checkNodeLs, poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
|
||||
unlockKey = newUnlockKey
|
||||
}
|
||||
|
@ -1308,6 +1311,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
if runtime.GOARCH == "ppc64le" {
|
||||
c.Skip("Disabled on ppc64le")
|
||||
}
|
||||
ctx := context.TODO()
|
||||
|
||||
d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
|
@ -1329,15 +1333,23 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
d2.RestartNode(c)
|
||||
d3.RestartNode(c)
|
||||
|
||||
unlock := func(d *daemon.Daemon, key string) *icmd.Result {
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = strings.NewReader(key)
|
||||
return icmd.RunCmd(cmd)
|
||||
}
|
||||
|
||||
const swarmIsEncryptedMsg = "Swarm is encrypted and needs to be unlocked"
|
||||
|
||||
for _, d := range []*daemon.Daemon{d2, d3} {
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ := d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result := icmd.RunCmd(cmd)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
// unlock with the original key should fail
|
||||
// Use poll here because the daemon may not have finished
|
||||
result := unlock(d, unlockKey)
|
||||
if result.Error == nil {
|
||||
// On occasion, the daemon may not have finished
|
||||
// rotating the KEK before restarting. The test is
|
||||
|
@ -1347,13 +1359,14 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
// restart again, the new key should be required this
|
||||
// time.
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
// Wait for the rotation to happen
|
||||
// Since there are multiple rotations, we need to wait until for the number of rotations we are currently on to be reflected in the logs
|
||||
// This is a little janky... its depending on specific log messages AND these are debug logs... but it is the best we can do for now.
|
||||
matcher := testdaemon.ScanLogsMatchCount(testdaemon.ScanLogsMatchString("successfully rotated KEK"), i+1)
|
||||
poll.WaitOn(c, d.PollCheckLogs(ctx, matcher), poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
d.Restart(c)
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result = icmd.RunCmd(cmd)
|
||||
result = unlock(d, unlockKey)
|
||||
}
|
||||
result.Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
|
@ -1361,31 +1374,21 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
})
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(newUnlockKey)
|
||||
icmd.RunCmd(cmd).Assert(c, icmd.Success)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
// now unlock with the rotated key, this should succeed
|
||||
unlock(d, newUnlockKey).Assert(c, icmd.Success)
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateActive)
|
||||
|
||||
retry := 0
|
||||
for {
|
||||
checkNodeLs := func(t poll.LogT) poll.Result {
|
||||
// an issue sometimes prevents leader to be available right away
|
||||
outs, err = d.Cmd("node", "ls")
|
||||
if err != nil && retry < 5 {
|
||||
if strings.Contains(outs, "swarm does not have a leader") {
|
||||
retry++
|
||||
c.Logf("[%s] got 'swarm does not have a leader'. retrying (attempt %d/5)", d.ID(), retry)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
} else {
|
||||
c.Logf("[%s] gave error: '%v'. retrying (attempt %d/5): %s", d.ID(), err, retry, outs)
|
||||
}
|
||||
out, err := d.Cmd("node", "ls")
|
||||
if err != nil {
|
||||
return poll.Continue("error running node ls: %v: %s", err, out)
|
||||
}
|
||||
assert.NilError(c, err, "[%s] failed after %d retries: %v (%s)", d.ID(), retry, err, outs)
|
||||
assert.Assert(c, !strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
break
|
||||
return poll.Success()
|
||||
}
|
||||
poll.WaitOn(c, checkNodeLs, poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
}
|
||||
|
||||
unlockKey = newUnlockKey
|
||||
|
|
|
@ -19,10 +19,6 @@ import (
|
|||
"github.com/docker/docker/testutil/registry"
|
||||
)
|
||||
|
||||
func ArchitectureIsNot(arch string) bool {
|
||||
return os.Getenv("DOCKER_ENGINE_GOARCH") != arch
|
||||
}
|
||||
|
||||
func DaemonIsWindows() bool {
|
||||
return testEnv.OSType == "windows"
|
||||
}
|
||||
|
@ -50,19 +46,15 @@ func OnlyDefaultNetworks() bool {
|
|||
}
|
||||
|
||||
func IsAmd64() bool {
|
||||
return os.Getenv("DOCKER_ENGINE_GOARCH") == "amd64"
|
||||
}
|
||||
|
||||
func NotArm() bool {
|
||||
return ArchitectureIsNot("arm")
|
||||
return testEnv.DaemonVersion.Arch == "amd64"
|
||||
}
|
||||
|
||||
func NotArm64() bool {
|
||||
return ArchitectureIsNot("arm64")
|
||||
return testEnv.DaemonVersion.Arch != "arm64"
|
||||
}
|
||||
|
||||
func NotPpc64le() bool {
|
||||
return ArchitectureIsNot("ppc64le")
|
||||
return testEnv.DaemonVersion.Arch != "ppc64le"
|
||||
}
|
||||
|
||||
func UnixCli() bool {
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestRemoveImageGarbageCollector(t *testing.T) {
|
|||
// This test uses very platform specific way to prevent
|
||||
// daemon for remove image layer.
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
skip.If(t, testEnv.IsRootless, "rootless mode doesn't support overlay2 on most distros")
|
||||
|
||||
// Create daemon with overlay2 graphdriver because vfs uses disk differently
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
@ -27,43 +26,11 @@ func TestTagUnprefixedRepoByNameOrName(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
// ensure we don't allow the use of invalid repository names or tags; these tag operations should fail
|
||||
// TODO (yongtang): Migrate to unit tests
|
||||
func TestTagInvalidReference(t *testing.T) {
|
||||
func TestTagUsingDigestAlgorithmAsName(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
client := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"}
|
||||
|
||||
for _, repo := range invalidRepos {
|
||||
err := client.ImageTag(ctx, "busybox", repo)
|
||||
assert.Check(t, is.ErrorContains(err, "not a valid repository/tag"))
|
||||
}
|
||||
|
||||
longTag := testutil.GenerateRandomAlphaOnlyString(121)
|
||||
|
||||
invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
|
||||
|
||||
for _, repotag := range invalidTags {
|
||||
err := client.ImageTag(ctx, "busybox", repotag)
|
||||
assert.Check(t, is.ErrorContains(err, "not a valid repository/tag"))
|
||||
}
|
||||
|
||||
// test repository name begin with '-'
|
||||
err := client.ImageTag(ctx, "busybox:latest", "-busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
|
||||
// test namespace name begin with '-'
|
||||
err = client.ImageTag(ctx, "busybox:latest", "-test/busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
|
||||
// test index name begin with '-'
|
||||
err = client.ImageTag(ctx, "busybox:latest", "-index:5000/busybox:test")
|
||||
assert.Check(t, is.ErrorContains(err, "Error parsing reference"))
|
||||
|
||||
// test setting tag fails
|
||||
err = client.ImageTag(ctx, "busybox:latest", "sha256:sometag")
|
||||
err := client.ImageTag(ctx, "busybox:latest", "sha256:sometag")
|
||||
assert.Check(t, is.ErrorContains(err, "refusing to create an ambiguous tag using digest algorithm as name"))
|
||||
}
|
||||
|
||||
|
@ -76,8 +43,12 @@ func TestTagValidPrefixedRepo(t *testing.T) {
|
|||
validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"}
|
||||
|
||||
for _, repo := range validRepos {
|
||||
err := client.ImageTag(ctx, "busybox", repo)
|
||||
assert.NilError(t, err)
|
||||
repo := repo
|
||||
t.Run(repo, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := client.ImageTag(ctx, "busybox", repo)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,18 +78,20 @@ func TestTagOfficialNames(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, name := range names {
|
||||
err := client.ImageTag(ctx, "busybox", name+":latest")
|
||||
assert.NilError(t, err)
|
||||
name := name
|
||||
t.Run("tag from busybox to "+name, func(t *testing.T) {
|
||||
err := client.ImageTag(ctx, "busybox", name+":latest")
|
||||
assert.NilError(t, err)
|
||||
|
||||
// ensure we don't have multiple tag names.
|
||||
insp, _, err := client.ImageInspectWithRaw(ctx, "busybox")
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, !is.Contains(insp.RepoTags, name)().Success())
|
||||
}
|
||||
// ensure we don't have multiple tag names.
|
||||
insp, _, err := client.ImageInspectWithRaw(ctx, "busybox")
|
||||
assert.NilError(t, err)
|
||||
// TODO(vvoland): Not sure what's actually being tested here. Is is still doing anything useful?
|
||||
assert.Assert(t, !is.Contains(insp.RepoTags, name)().Success())
|
||||
|
||||
for _, name := range names {
|
||||
err := client.ImageTag(ctx, name+":latest", "fooo/bar:latest")
|
||||
assert.NilError(t, err)
|
||||
err = client.ImageTag(ctx, name+":latest", "test-tag-official-names/foobar:latest")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -41,7 +40,7 @@ func setupTestV2(t *testing.T) func() {
|
|||
}
|
||||
|
||||
func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) {
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
defer setupTestV2(t)()
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
@ -63,7 +62,7 @@ func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthZPluginV2Disable(t *testing.T) {
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
defer setupTestV2(t)()
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
@ -89,7 +88,7 @@ func TestAuthZPluginV2Disable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) {
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
defer setupTestV2(t)()
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
@ -124,7 +123,7 @@ func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthZPluginV2BadManifestFailsDaemonStart(t *testing.T) {
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
defer setupTestV2(t)()
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
|
|
@ -407,7 +407,7 @@ func TestGraphdriverPluginV2(t *testing.T) {
|
|||
skip.If(t, runtime.GOOS == "windows")
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
skip.If(t, !requirement.HasHubConnectivity(t))
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
skip.If(t, !requirement.Overlay2Supported(testEnv.DaemonInfo.KernelVersion))
|
||||
|
||||
d := daemon.New(t, daemon.WithExperimental())
|
||||
|
|
|
@ -3,7 +3,6 @@ package service
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -24,7 +23,7 @@ import (
|
|||
func TestServicePlugin(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
|
||||
skip.If(t, testEnv.NotAmd64)
|
||||
defer setupTest(t)()
|
||||
|
||||
reg := registry.NewV2(t)
|
||||
|
|
|
@ -11,8 +11,6 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const imageSize = 64 * 1024 * 1024
|
||||
|
||||
// CanTestQuota - checks if xfs prjquota can be tested
|
||||
// returns a reason if not
|
||||
func CanTestQuota() (string, bool) {
|
||||
|
@ -29,6 +27,12 @@ func CanTestQuota() (string, bool) {
|
|||
// PrepareQuotaTestImage - prepares an xfs prjquota test image
|
||||
// returns the path the the image on success
|
||||
func PrepareQuotaTestImage(t *testing.T) (string, error) {
|
||||
// imageSize is the size of the test-image. The minimum size allowed
|
||||
// is 300MB.
|
||||
//
|
||||
// See https://git.kernel.org/pub/scm/fs/xfs/xfsprogs-dev.git/commit/?id=6e0ed3d19c54603f0f7d628ea04b550151d8a262
|
||||
const imageSize = 300 * 1024 * 1024
|
||||
|
||||
mkfs, err := exec.LookPath("mkfs.xfs")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -335,6 +335,17 @@ func ScanLogsMatchString(contains string) func(string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// ScanLogsMatchCount returns a function that can be used to scan the daemon logs until the passed in matcher function matches `count` times
|
||||
func ScanLogsMatchCount(f func(string) bool, count int) func(string) bool {
|
||||
matched := 0
|
||||
return func(line string) bool {
|
||||
if f(line) {
|
||||
matched++
|
||||
}
|
||||
return matched == count
|
||||
}
|
||||
}
|
||||
|
||||
// ScanLogsMatchAll returns a function that can be used to scan the daemon logs until *all* the passed in strings are matched
|
||||
func ScanLogsMatchAll(contains ...string) func(string) bool {
|
||||
matched := make(map[string]bool)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
type Execution struct {
|
||||
client client.APIClient
|
||||
DaemonInfo types.Info
|
||||
DaemonVersion types.Version
|
||||
OSType string
|
||||
PlatformDefaults PlatformDefaults
|
||||
protectedElements protectedElements
|
||||
|
@ -49,12 +50,17 @@ func FromClient(c *client.Client) (*Execution, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get info from daemon")
|
||||
}
|
||||
v, err := c.ServerVersion(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get version info from daemon")
|
||||
}
|
||||
|
||||
osType := getOSType(info)
|
||||
|
||||
return &Execution{
|
||||
client: c,
|
||||
DaemonInfo: info,
|
||||
DaemonVersion: v,
|
||||
OSType: osType,
|
||||
PlatformDefaults: getPlatformDefaults(info, osType),
|
||||
protectedElements: newProtectedElements(),
|
||||
|
@ -232,3 +238,8 @@ func EnsureFrozenImagesLinux(testEnv *Execution) error {
|
|||
func (e *Execution) GitHubActions() bool {
|
||||
return os.Getenv("GITHUB_ACTIONS") != ""
|
||||
}
|
||||
|
||||
// NotAmd64 returns true if the daemon's architecture is not amd64
|
||||
func (e *Execution) NotAmd64() bool {
|
||||
return e.DaemonVersion.Arch != "amd64"
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func ensureHTTPServerImage(t testing.TB) {
|
|||
if goos == "" {
|
||||
goos = "linux"
|
||||
}
|
||||
goarch := os.Getenv("DOCKER_ENGINE_GOARCH")
|
||||
goarch := testEnv.DaemonVersion.Arch
|
||||
if goarch == "" {
|
||||
goarch = "amd64"
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue