integration: change container.Run signature to fix linting

Line 59: warning: context.Context should be the first parameter of a function (golint)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9f9b4290b9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2019-06-06 13:15:31 +02:00
parent 9c49308cce
commit 3ade7ca12b
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
34 changed files with 105 additions and 106 deletions

View file

@ -79,7 +79,7 @@ func TestBuildSquashParent(t *testing.T) {
resp.Body.Close()
assert.NilError(t, err)
cid := container.Run(t, ctx, client,
cid := container.Run(ctx, t, client,
container.WithImage(name),
container.WithCmd("/bin/sh", "-c", "cat /hello"),
)
@ -94,11 +94,11 @@ func TestBuildSquashParent(t *testing.T) {
assert.NilError(t, err)
assert.Check(t, is.Equal(strings.TrimSpace(actualStdout.String()), "hello\nworld"))
container.Run(t, ctx, client,
container.Run(ctx, t, client,
container.WithImage(name),
container.WithCmd("/bin/sh", "-c", "[ ! -f /remove_me ]"),
)
container.Run(t, ctx, client,
container.Run(ctx, t, client,
container.WithImage(name),
container.WithCmd("/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`),
)

View file

@ -50,7 +50,7 @@ func TestCheckpoint(t *testing.T) {
}
t.Log("Start a container")
cID := container.Run(t, ctx, client, container.WithMount(mnt))
cID := container.Run(ctx, t, client, container.WithMount(mnt))
poll.WaitOn(t,
container.IsInState(ctx, client, cID, "running"),
poll.WithDelay(100*time.Millisecond),

View file

@ -94,7 +94,7 @@ func TestDaemonRestartIpcMode(t *testing.T) {
ctx := context.Background()
// check the container is created with private ipc mode as per daemon default
cID := container.Run(t, ctx, c,
cID := container.Run(ctx, t, c,
container.WithCmd("top"),
container.WithRestartPolicy("always"),
)
@ -113,7 +113,7 @@ func TestDaemonRestartIpcMode(t *testing.T) {
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private"))
// check a new container is created with shareable ipc mode as per new daemon default
cID = container.Run(t, ctx, c)
cID = container.Run(ctx, t, c)
defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
inspect, err = c.ContainerInspect(ctx, cID)

View file

@ -19,7 +19,7 @@ func TestDiff(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", `mkdir /foo; echo xyzzy > /foo/bar`))
cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", `mkdir /foo; echo xyzzy > /foo/bar`))
// Wait for it to exit as cannot diff a running container on Windows, and
// it will take a few seconds to exit. Also there's no way in Windows to

View file

@ -24,7 +24,7 @@ func TestExecWithCloseStdin(t *testing.T) {
client := testEnv.APIClient()
// run top with detached mode
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
expected := "closeIO"
execResp, err := client.ContainerExecCreate(ctx, cID,
@ -90,7 +90,7 @@ func TestExec(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/root"))
cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/root"))
id, err := client.ContainerExecCreate(ctx, cID,
types.ExecConfig{
@ -125,7 +125,7 @@ func TestExecUser(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client, container.WithTty(true), container.WithUser("1:1"))
cID := container.Run(ctx, t, client, container.WithTty(true), container.WithUser("1:1"))
result, err := container.Exec(ctx, client, cID, []string{"id"})
assert.NilError(t, err)

View file

@ -25,7 +25,7 @@ func TestExportContainerAndImportImage(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("true"))
cID := container.Run(ctx, t, client, container.WithCmd("true"))
poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond))
reference := "repo/testexp:v1"

View file

@ -22,7 +22,7 @@ func TestHealthCheckWorkdir(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) {
c.Config.Healthcheck = &containertypes.HealthConfig{
Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"},
Interval: 50 * time.Millisecond,

View file

@ -24,7 +24,7 @@ func TestInspectCpusetInConfigPre120(t *testing.T) {
name := "cpusetinconfig-pre120-" + t.Name()
// Create container with up to-date-API
container.Run(t, ctx, request.NewAPIClient(t), container.WithName(name),
container.Run(ctx, t, request.NewAPIClient(t), container.WithName(name),
container.WithCmd("true"),
func(c *container.TestContainerConfig) {
c.HostConfig.Resources.CpusetCpus = "0"

View file

@ -18,7 +18,7 @@ func TestKillContainerInvalidSignal(t *testing.T) {
defer setupTest(t)()
client := testEnv.APIClient()
ctx := context.Background()
id := container.Run(t, ctx, client)
id := container.Run(ctx, t, client)
err := client.ContainerKill(ctx, id, "0")
assert.Error(t, err, "Error response from daemon: Invalid signal: 0")
@ -60,7 +60,7 @@ func TestKillContainer(t *testing.T) {
tc := tc
t.Run(tc.doc, func(t *testing.T) {
ctx := context.Background()
id := container.Run(t, ctx, client)
id := container.Run(ctx, t, client)
err := client.ContainerKill(ctx, id, tc.signal)
assert.NilError(t, err)
@ -95,7 +95,7 @@ func TestKillWithStopSignalAndRestartPolicies(t *testing.T) {
tc := tc
t.Run(tc.doc, func(t *testing.T) {
ctx := context.Background()
id := container.Run(t, ctx, client,
id := container.Run(ctx, t, client,
container.WithRestartPolicy("always"),
func(c *container.TestContainerConfig) {
c.Config.StopSignal = tc.stopsignal
@ -137,7 +137,7 @@ func TestKillDifferentUserContainer(t *testing.T) {
ctx := context.Background()
client := request.NewAPIClient(t, client.WithVersion("1.19"))
id := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.Config.User = "daemon"
})
poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond))
@ -154,7 +154,7 @@ func TestInspectOomKilledTrue(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) {
c.HostConfig.Resources.Memory = 32 * 1024 * 1024
})
@ -172,7 +172,7 @@ func TestInspectOomKilledFalse(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "echo hello world"))
cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world"))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))

View file

@ -24,7 +24,7 @@ func TestLinksEtcHostsContentMatch(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithNetworkMode("host"))
cID := container.Run(ctx, t, client, container.WithNetworkMode("host"))
res, err := container.Exec(ctx, client, cID, []string{"cat", "/etc/hosts"})
assert.NilError(t, err)
assert.Assert(t, is.Len(res.Stderr(), 0))
@ -42,8 +42,8 @@ func TestLinksContainerNames(t *testing.T) {
containerA := "first_" + t.Name()
containerB := "second_" + t.Name()
container.Run(t, ctx, client, container.WithName(containerA))
container.Run(t, ctx, client, container.WithName(containerB), container.WithLinks(containerA+":"+containerA))
container.Run(ctx, t, client, container.WithName(containerA))
container.Run(ctx, t, client, container.WithName(containerB), container.WithLinks(containerA+":"+containerA))
f := filters.NewArgs(filters.Arg("name", containerA))

View file

@ -21,7 +21,7 @@ func TestLogsFollowTailEmpty(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
id := container.Run(t, ctx, client, container.WithCmd("sleep", "100000"))
id := container.Run(ctx, t, client, container.WithCmd("sleep", "100000"))
logs, err := client.ContainerLogs(ctx, id, types.ContainerLogsOptions{ShowStdout: true, Tail: "2"})
if logs != nil {

View file

@ -256,9 +256,9 @@ func TestContainerBindMountNonRecursive(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
containers := []string{
container.Run(t, ctx, client, container.WithMount(implicit), container.WithCmd(recursiveVerifier...)),
container.Run(t, ctx, client, container.WithMount(recursive), container.WithCmd(recursiveVerifier...)),
container.Run(t, ctx, client, container.WithMount(nonRecursive), container.WithCmd(nonRecursiveVerifier...)),
container.Run(ctx, t, client, container.WithMount(implicit), container.WithCmd(recursiveVerifier...)),
container.Run(ctx, t, client, container.WithMount(recursive), container.WithCmd(recursiveVerifier...)),
container.Run(ctx, t, client, container.WithMount(nonRecursive), container.WithCmd(nonRecursiveVerifier...)),
}
for _, c := range containers {

View file

@ -71,7 +71,7 @@ func TestNetworkLoopbackNat(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID))
cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID))
poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond))
@ -93,7 +93,7 @@ func startServerContainer(t *testing.T, msg string, port int) string {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
c.HostConfig.PortBindings = nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", port)): []nat.PortBinding{
{

View file

@ -25,7 +25,7 @@ func TestPause(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
since := request.DaemonUnixTime(ctx, t, client, testEnv)
@ -57,7 +57,7 @@ func TestPauseFailsOnWindowsServerContainers(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
err := client.ContainerPause(ctx, cID)
@ -71,7 +71,7 @@ func TestPauseStopPausedContainer(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
err := client.ContainerPause(ctx, cID)

View file

@ -36,7 +36,7 @@ func TestRemoveContainerWithRemovedVolume(t *testing.T) {
tempDir := fs.NewDir(t, "test-rm-container-with-removed-volume", fs.WithMode(0755))
defer tempDir.Remove()
cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test"))
cID := container.Run(ctx, t, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test"))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
err := os.RemoveAll(tempDir.Path())
@ -59,7 +59,7 @@ func TestRemoveContainerWithVolume(t *testing.T) {
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv"))
cID := container.Run(ctx, t, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv"))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
insp, _, err := client.ContainerInspectWithRaw(ctx, cID, true)
@ -82,7 +82,7 @@ func TestRemoveContainerRunning(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{})
assert.Check(t, is.ErrorContains(err, "cannot remove a running container"))
@ -93,7 +93,7 @@ func TestRemoveContainerForceRemoveRunning(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{
Force: true,

View file

@ -30,18 +30,18 @@ func TestRenameLinkedContainer(t *testing.T) {
aName := "a0" + t.Name()
bName := "b0" + t.Name()
aID := container.Run(t, ctx, client, container.WithName(aName))
bID := container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName))
aID := container.Run(ctx, t, client, container.WithName(aName))
bID := container.Run(ctx, t, client, container.WithName(bName), container.WithLinks(aName))
err := client.ContainerRename(ctx, aID, "a1"+t.Name())
assert.NilError(t, err)
container.Run(t, ctx, client, container.WithName(aName))
container.Run(ctx, t, client, container.WithName(aName))
err = client.ContainerRemove(ctx, bID, types.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
bID = container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName))
bID = container.Run(ctx, t, client, container.WithName(bName), container.WithLinks(aName))
inspect, err := client.ContainerInspect(ctx, bID)
assert.NilError(t, err)
@ -54,7 +54,7 @@ func TestRenameStoppedContainer(t *testing.T) {
client := testEnv.APIClient()
oldName := "first_name" + t.Name()
cID := container.Run(t, ctx, client, container.WithName(oldName), container.WithCmd("sh"))
cID := container.Run(ctx, t, client, container.WithName(oldName), container.WithCmd("sh"))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
inspect, err := client.ContainerInspect(ctx, cID)
@ -76,7 +76,7 @@ func TestRenameRunningContainerAndReuse(t *testing.T) {
client := testEnv.APIClient()
oldName := "first_name" + t.Name()
cID := container.Run(t, ctx, client, container.WithName(oldName))
cID := container.Run(ctx, t, client, container.WithName(oldName))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
newName := "new_name" + stringid.GenerateRandomID()
@ -90,7 +90,7 @@ func TestRenameRunningContainerAndReuse(t *testing.T) {
_, err = client.ContainerInspect(ctx, oldName)
assert.Check(t, is.ErrorContains(err, "No such container: "+oldName))
cID = container.Run(t, ctx, client, container.WithName(oldName))
cID = container.Run(ctx, t, client, container.WithName(oldName))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
inspect, err = client.ContainerInspect(ctx, cID)
@ -104,7 +104,7 @@ func TestRenameInvalidName(t *testing.T) {
client := testEnv.APIClient()
oldName := "first_name" + t.Name()
cID := container.Run(t, ctx, client, container.WithName(oldName))
cID := container.Run(ctx, t, client, container.WithName(oldName))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
err := client.ContainerRename(ctx, oldName, "new:invalid")
@ -132,7 +132,7 @@ func TestRenameAnonymousContainer(t *testing.T) {
_, err := client.NetworkCreate(ctx, networkName, types.NetworkCreate{})
assert.NilError(t, err)
cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
networkName: {},
}
@ -155,7 +155,7 @@ func TestRenameAnonymousContainer(t *testing.T) {
if testEnv.OSType == "windows" {
count = "-n"
}
cID = container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
cID = container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
networkName: {},
}
@ -175,7 +175,7 @@ func TestRenameContainerWithSameName(t *testing.T) {
client := testEnv.APIClient()
oldName := "old" + t.Name()
cID := container.Run(t, ctx, client, container.WithName(oldName))
cID := container.Run(ctx, t, client, container.WithName(oldName))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
err := client.ContainerRename(ctx, oldName, oldName)
@ -198,12 +198,12 @@ func TestRenameContainerWithLinkedContainer(t *testing.T) {
client := testEnv.APIClient()
db1Name := "db1" + t.Name()
db1ID := container.Run(t, ctx, client, container.WithName(db1Name))
db1ID := container.Run(ctx, t, client, container.WithName(db1Name))
poll.WaitOn(t, container.IsInState(ctx, client, db1ID, "running"), poll.WithDelay(100*time.Millisecond))
app1Name := "app1" + t.Name()
app2Name := "app2" + t.Name()
app1ID := container.Run(t, ctx, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql"))
app1ID := container.Run(ctx, t, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql"))
poll.WaitOn(t, container.IsInState(ctx, client, app1ID, "running"), poll.WithDelay(100*time.Millisecond))
err := client.ContainerRename(ctx, app1Name, app2Name)

View file

@ -22,7 +22,7 @@ func TestResize(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
@ -40,7 +40,7 @@ func TestResizeWithInvalidSize(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
@ -55,7 +55,7 @@ func TestResizeWhenContainerNotStarted(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("echo"))
cID := container.Run(ctx, t, client, container.WithCmd("echo"))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))

View file

@ -29,7 +29,7 @@ func TestKernelTCPMemory(t *testing.T) {
kernelMemoryTCP int64 = 200 * 1024 * 1024
)
cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.HostConfig.Resources = containertypes.Resources{
KernelMemoryTCP: kernelMemoryTCP,
}
@ -65,7 +65,7 @@ func TestNISDomainname(t *testing.T) {
domainname = "baz.cyphar.com"
)
cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.Config.Hostname = hostname
c.Config.Domainname = domainname
})

View file

@ -25,7 +25,7 @@ func TestStats(t *testing.T) {
info, err := client.Info(ctx)
assert.NilError(t, err)
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))

View file

@ -54,7 +54,7 @@ func TestStopContainerWithTimeout(t *testing.T) {
d := d
t.Run(strconv.Itoa(d.timeout), func(t *testing.T) {
t.Parallel()
id := container.Run(t, ctx, client, testCmd)
id := container.Run(ctx, t, client, testCmd)
timeout := time.Duration(d.timeout) * time.Second
err := client.ContainerStop(ctx, id, &timeout)
@ -78,7 +78,7 @@ func TestDeleteDevicemapper(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
id := container.Run(t, ctx, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo"))
id := container.Run(ctx, t, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo"))
poll.WaitOn(t, container.IsStopped(ctx, client, id), poll.WithDelay(100*time.Millisecond))

View file

@ -17,7 +17,7 @@ func TestStopContainerWithRestartPolicyAlways(t *testing.T) {
names := []string{"verifyRestart1-" + t.Name(), "verifyRestart2-" + t.Name()}
for _, name := range names {
container.Run(t, ctx, client,
container.Run(ctx, t, client,
container.WithName(name),
container.WithCmd("false"),
container.WithRestartPolicy("always"),

View file

@ -51,7 +51,7 @@ func TestStopContainerWithTimeout(t *testing.T) {
d := d
t.Run(strconv.Itoa(d.timeout), func(t *testing.T) {
t.Parallel()
id := container.Run(t, ctx, client, testCmd)
id := container.Run(ctx, t, client, testCmd)
timeout := time.Duration(d.timeout) * time.Second
err := client.ContainerStop(ctx, id, &timeout)

View file

@ -26,7 +26,7 @@ func TestUpdateMemory(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
c.HostConfig.Resources = containertypes.Resources{
Memory: 200 * 1024 * 1024,
}
@ -72,7 +72,7 @@ func TestUpdateCPUQuota(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
for _, test := range []struct {
desc string
@ -140,7 +140,7 @@ func TestUpdatePidsLimit(t *testing.T) {
t.Run(test.desc, func(t *testing.T) {
// Using "network=host" to speed up creation (13.96s vs 6.54s)
cID := container.Run(t, ctx, apiClient, container.WithPidsLimit(test.initial), container.WithNetworkMode("host"))
cID := container.Run(ctx, t, apiClient, container.WithPidsLimit(test.initial), container.WithNetworkMode("host"))
_, err := c.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{
Resources: containertypes.Resources{

View file

@ -17,7 +17,7 @@ func TestUpdateRestartPolicy(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "sleep 1 && false"), func(c *container.TestContainerConfig) {
cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "sleep 1 && false"), func(c *container.TestContainerConfig) {
c.HostConfig.RestartPolicy = containertypes.RestartPolicy{
Name: "on-failure",
MaximumRetryCount: 3,
@ -50,7 +50,7 @@ func TestUpdateRestartWithAutoRemove(t *testing.T) {
client := testEnv.APIClient()
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithAutoRemove)
cID := container.Run(ctx, t, client, container.WithAutoRemove)
_, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{
RestartPolicy: containertypes.RestartPolicy{

View file

@ -39,7 +39,7 @@ func TestWaitNonBlocked(t *testing.T) {
t.Run(tc.doc, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
containerID := container.Run(t, ctx, cli, container.WithCmd("sh", "-c", tc.cmd))
containerID := container.Run(ctx, t, cli, container.WithCmd("sh", "-c", tc.cmd))
poll.WaitOn(t, container.IsInState(ctx, cli, containerID, "exited"), poll.WithTimeout(30*time.Second), poll.WithDelay(100*time.Millisecond))
waitresC, errC := cli.ContainerWait(ctx, containerID, "")
@ -81,7 +81,7 @@ func TestWaitBlocked(t *testing.T) {
t.Run(tc.doc, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
containerID := container.Run(t, ctx, cli, container.WithCmd("sh", "-c", tc.cmd))
containerID := container.Run(ctx, t, cli, container.WithCmd("sh", "-c", tc.cmd))
poll.WaitOn(t, container.IsInState(ctx, cli, containerID, "running"), poll.WithTimeout(30*time.Second), poll.WithDelay(100*time.Millisecond))
waitresC, errC := cli.ContainerWait(ctx, containerID, "")

View file

@ -48,8 +48,7 @@ func Create(ctx context.Context, t *testing.T, client client.APIClient, ops ...f
}
// Run creates and start a container with the specified options
// nolint: golint
func Run(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint
func Run(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) string {
t.Helper()
id := Create(ctx, t, client, ops...)

View file

@ -153,8 +153,8 @@ func testIpvlanL2NilParent(client dclient.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
_, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1})
assert.NilError(t, err)
@ -171,8 +171,8 @@ func testIpvlanL2InternalMode(client dclient.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
@ -197,11 +197,11 @@ func testIpvlanL3NilParent(client dclient.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.220.10"),
)
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.230.10"),
)
@ -223,11 +223,11 @@ func testIpvlanL3InternalMode(client dclient.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.220.10"),
)
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.230.10"),
)
@ -259,12 +259,12 @@ func testIpvlanL2MultiSubnet(client dclient.APIClient) func(*testing.T) {
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.200.20"),
container.WithIPv6(netName, "2001:db8:abc8::20"),
)
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.200.21"),
container.WithIPv6(netName, "2001:db8:abc8::21"),
@ -280,12 +280,12 @@ func testIpvlanL2MultiSubnet(client dclient.APIClient) func(*testing.T) {
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
id3 := container.Run(t, ctx, client,
id3 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.202.20"),
container.WithIPv6(netName, "2001:db8:abc6::20"),
)
id4 := container.Run(t, ctx, client,
id4 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.202.21"),
container.WithIPv6(netName, "2001:db8:abc6::21"),
@ -326,12 +326,12 @@ func testIpvlanL3MultiSubnet(client dclient.APIClient) func(*testing.T) {
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.10.20"),
container.WithIPv6(netName, "2001:db8:abc9::20"),
)
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.10.21"),
container.WithIPv6(netName, "2001:db8:abc9::21"),
@ -347,12 +347,12 @@ func testIpvlanL3MultiSubnet(client dclient.APIClient) func(*testing.T) {
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
id3 := container.Run(t, ctx, client,
id3 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.12.20"),
container.WithIPv6(netName, "2001:db8:abc7::20"),
)
id4 := container.Run(t, ctx, client,
id4 := container.Run(ctx, t, client,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.12.21"),
container.WithIPv6(netName, "2001:db8:abc7::21"),
@ -392,7 +392,7 @@ func testIpvlanAddressing(client dclient.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netNameL2))
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode(netNameL2),
)
// Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet
@ -414,7 +414,7 @@ func testIpvlanAddressing(client dclient.APIClient) func(*testing.T) {
)
assert.Check(t, n.IsNetworkAvailable(client, netNameL3))
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode(netNameL3),
)
// Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops

View file

@ -141,8 +141,8 @@ func testMacvlanNilParent(client client.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
_, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1})
assert.Check(t, err == nil)
@ -160,8 +160,8 @@ func testMacvlanInternalMode(client client.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
@ -191,12 +191,12 @@ func testMacvlanMultiSubnet(client client.APIClient) func(*testing.T) {
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.100.20"),
container.WithIPv6("dualstackbridge", "2001:db8:abc2::20"),
)
id2 := container.Run(t, ctx, client,
id2 := container.Run(ctx, t, client,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.100.21"),
container.WithIPv6("dualstackbridge", "2001:db8:abc2::21"),
@ -212,12 +212,12 @@ func testMacvlanMultiSubnet(client client.APIClient) func(*testing.T) {
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
id3 := container.Run(t, ctx, client,
id3 := container.Run(ctx, t, client,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.102.20"),
container.WithIPv6("dualstackbridge", "2001:db8:abc4::20"),
)
id4 := container.Run(t, ctx, client,
id4 := container.Run(ctx, t, client,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.102.21"),
container.WithIPv6("dualstackbridge", "2001:db8:abc4::21"),
@ -257,7 +257,7 @@ func testMacvlanAddressing(client client.APIClient) func(*testing.T) {
assert.Check(t, n.IsNetworkAvailable(client, netName))
ctx := context.Background()
id1 := container.Run(t, ctx, client,
id1 := container.Run(ctx, t, client,
container.WithNetworkMode("dualstackbridge"),
)

View file

@ -29,14 +29,14 @@ func TestRunContainerWithBridgeNone(t *testing.T) {
c := d.NewClientT(t)
ctx := context.Background()
id1 := container.Run(t, ctx, c)
id1 := container.Run(ctx, t, c)
defer c.ContainerRemove(ctx, id1, types.ContainerRemoveOptions{Force: true})
result, err := container.Exec(ctx, c, id1, []string{"ip", "l"})
assert.NilError(t, err)
assert.Check(t, is.Equal(false, strings.Contains(result.Combined(), "eth0")), "There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled")
id2 := container.Run(t, ctx, c, container.WithNetworkMode("bridge"))
id2 := container.Run(ctx, t, c, container.WithNetworkMode("bridge"))
defer c.ContainerRemove(ctx, id2, types.ContainerRemoveOptions{Force: true})
result, err = container.Exec(ctx, c, id2, []string{"ip", "l"})
@ -50,7 +50,7 @@ func TestRunContainerWithBridgeNone(t *testing.T) {
err = cmd.Run()
assert.NilError(t, err, "Failed to get current process network namespace: %+v", err)
id3 := container.Run(t, ctx, c, container.WithNetworkMode("host"))
id3 := container.Run(ctx, t, c, container.WithNetworkMode("host"))
defer c.ContainerRemove(ctx, id3, types.ContainerRemoveOptions{Force: true})
result, err = container.Exec(ctx, c, id3, []string{"sh", "-c", nsCommand})

View file

@ -92,7 +92,7 @@ func TestAuthZPluginAllowRequest(t *testing.T) {
ctx := context.Background()
// Ensure command successful
cID := container.Run(t, ctx, c)
cID := container.Run(ctx, t, c)
assertURIRecorded(t, ctrl.requestsURIs, "/containers/create")
assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", cID))
@ -224,7 +224,7 @@ func TestAuthZPluginAllowEventStream(t *testing.T) {
defer cancel()
// Create a container and wait for the creation events
cID := container.Run(t, ctx, c)
cID := container.Run(ctx, t, c)
poll.WaitOn(t, container.IsInState(ctx, c, cID, "running"))
created := false
@ -348,7 +348,7 @@ func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) {
exportedImagePath := filepath.Join(tmp, "export.tar")
cID := container.Run(t, ctx, c)
cID := container.Run(ctx, t, c)
responseReader, err := c.ContainerExport(context.Background(), cID)
assert.NilError(t, err)
@ -388,7 +388,7 @@ func TestAuthzPluginEnsureContainerCopyToFrom(t *testing.T) {
c := d.NewClientT(t)
ctx := context.Background()
cID := container.Run(t, ctx, c)
cID := container.Run(ctx, t, c)
defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
_, err = f.Seek(0, io.SeekStart)

View file

@ -55,7 +55,7 @@ func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) {
d.LoadBusybox(t)
// Ensure docker run command and accompanying docker ps are successful
cID := container.Run(t, ctx, c)
cID := container.Run(ctx, t, c)
_, err = c.ContainerInspect(ctx, cID)
assert.NilError(t, err)

View file

@ -399,7 +399,7 @@ func testGraphDriverPull(c client.APIClient, d *daemon.Daemon) func(*testing.T)
_, err = io.Copy(ioutil.Discard, r)
assert.NilError(t, err)
container.Run(t, ctx, c, container.WithImage("busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0"))
container.Run(ctx, t, c, container.WithImage("busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0"))
}
}
@ -439,7 +439,7 @@ func TestGraphdriverPluginV2(t *testing.T) {
// nolint: golint
func testGraphDriver(t *testing.T, c client.APIClient, ctx context.Context, driverName string, afterContainerRunFn func(*testing.T)) { //nolint: golint
id := container.Run(t, ctx, c, container.WithCmd("sh", "-c", "echo hello > /hello"))
id := container.Run(ctx, t, c, container.WithCmd("sh", "-c", "echo hello > /hello"))
if afterContainerRunFn != nil {
afterContainerRunFn(t)

View file

@ -33,7 +33,7 @@ func TestContinueAfterPluginCrash(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
id := container.Run(t, ctx, client,
id := container.Run(ctx, t, client,
container.WithAutoRemove,
container.WithLogDriver("test"),
container.WithCmd(

View file

@ -30,7 +30,7 @@ func TestEventsExecDie(t *testing.T) {
ctx := context.Background()
client := testEnv.APIClient()
cID := container.Run(t, ctx, client)
cID := container.Run(ctx, t, client)
id, err := client.ContainerExecCreate(ctx, cID,
types.ExecConfig{