Wire up tests to support otel tracing
Integration tests will now configure clients to propagate traces as well as create spans for all tests. Some extra changes were needed (or desired for trace propagation) in the test helpers to pass through tracing spans via context. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
parent
642e9917ff
commit
e8dc902781
211 changed files with 3830 additions and 2712 deletions
5
Makefile
5
Makefile
|
@ -75,7 +75,10 @@ DOCKER_ENVS := \
|
|||
-e PLATFORM \
|
||||
-e DEFAULT_PRODUCT_LICENSE \
|
||||
-e PRODUCT \
|
||||
-e PACKAGER_NAME
|
||||
-e PACKAGER_NAME \
|
||||
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
||||
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
||||
-e OTEL_SERVICE_NAME
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
|
||||
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
|
||||
|
|
|
@ -197,6 +197,8 @@ test_env() {
|
|||
TEMP="$TEMP" \
|
||||
TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \
|
||||
TEST_INTEGRATION_USE_SNAPSHOTTER="$TEST_INTEGRATION_USE_SNAPSHOTTER" \
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT="$OTEL_EXPORTER_OTLP_ENDPOINT" \
|
||||
OTEL_SERVICE_NAME="$OTEL_SERVICE_NAME" \
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -16,8 +17,8 @@ type DockerBenchmarkSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerBenchmarkSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerBenchmarkSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerBenchmarkSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -18,11 +18,15 @@ import (
|
|||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration-cli/environment"
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
ienv "github.com/docker/docker/testutil/environment"
|
||||
"github.com/docker/docker/testutil/fakestorage"
|
||||
"github.com/docker/docker/testutil/fixtures/plugin"
|
||||
"github.com/docker/docker/testutil/registry"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
|
@ -38,37 +42,64 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
testEnv *environment.Execution
|
||||
testEnvOnce sync.Once
|
||||
testEnv *environment.Execution
|
||||
|
||||
// the docker client binary to use
|
||||
dockerBinary = ""
|
||||
|
||||
testEnvOnce sync.Once
|
||||
baseContext context.Context
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
|
||||
testEnv, err = environment.New()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
|
||||
os.Exit(testRun(m))
|
||||
}
|
||||
|
||||
func testRun(m *testing.M) (ret int) {
|
||||
// Global set up
|
||||
dockerBinary = testEnv.DockerBinary()
|
||||
err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
|
||||
|
||||
var err error
|
||||
|
||||
shutdown := testutil.ConfigureTracing()
|
||||
ctx, span := otel.Tracer("").Start(context.Background(), "integration-cli/TestMain")
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
ret = 255
|
||||
} else {
|
||||
if ret != 0 {
|
||||
span.SetAttributes(attribute.Int("exitCode", ret))
|
||||
span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
|
||||
}
|
||||
}
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
}()
|
||||
|
||||
baseContext = ctx
|
||||
|
||||
testEnv, err = environment.New(ctx)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
if testEnv.IsLocalDaemon() {
|
||||
setupLocalInfo()
|
||||
}
|
||||
|
||||
dockerBinary = testEnv.DockerBinary()
|
||||
|
||||
err = ienv.EnsureFrozenImagesLinux(ctx, &testEnv.Execution)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
testEnv.Print()
|
||||
printCliVersion()
|
||||
os.Exit(m.Run())
|
||||
|
||||
return m.Run()
|
||||
}
|
||||
|
||||
func printCliVersion() {
|
||||
|
@ -84,262 +115,311 @@ func printCliVersion() {
|
|||
fmt.Println(cmd.Stdout())
|
||||
}
|
||||
|
||||
func ensureTestEnvSetup(t *testing.T) {
|
||||
func ensureTestEnvSetup(ctx context.Context, t *testing.T) {
|
||||
testEnvOnce.Do(func() {
|
||||
cli.SetTestEnvironment(testEnv)
|
||||
fakestorage.SetTestEnvironment(&testEnv.Execution)
|
||||
ienv.ProtectAll(t, &testEnv.Execution)
|
||||
ienv.ProtectAll(ctx, t, &testEnv.Execution)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerAPISuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerAPISuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerAPISuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerBenchmarkSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerBenchmarkSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerBenchmarkSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIAttachSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIAttachSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIAttachSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIBuildSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIBuildSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIBuildSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLICommitSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLICommitSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLICommitSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLICpSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLICpSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLICpSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLICreateSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLICreateSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLICreateSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIEventSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIEventSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIEventSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIExecSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIExecSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIExecSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIHealthSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIHealthSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIHealthSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIHistorySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIHistorySuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIHistorySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIImagesSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIImagesSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIImagesSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIImportSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIImportSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIImportSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIInfoSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIInfoSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIInfoSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIInspectSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIInspectSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIInspectSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLILinksSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLILinksSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLILinksSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLILoginSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLILoginSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLILoginSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLILogsSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLILogsSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLILogsSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLINetmodeSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLINetmodeSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLINetmodeSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLINetworkSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLINetworkSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLINetworkSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPluginLogDriverSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPluginLogDriverSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPluginLogDriverSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPluginsSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPluginsSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPluginsSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPortSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPortSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPortSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIProxySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIProxySuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIProxySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPruneSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPruneSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPruneSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPsSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPsSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPsSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPullSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPullSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPullSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIPushSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIPushSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIPushSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIRestartSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIRestartSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIRestartSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIRmiSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIRmiSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIRmiSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIRunSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIRunSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIRunSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLISaveLoadSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLISaveLoadSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLISaveLoadSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLISearchSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLISearchSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLISearchSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLISNISuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLISNISuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLISNISuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIStartSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIStartSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIStartSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIStatsSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIStatsSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIStatsSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLITopSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLITopSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLITopSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIUpdateSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIUpdateSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIUpdateSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerCLIVolumeSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerCLIVolumeSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerCLIVolumeSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerRegistrySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistrySuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerRegistrySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerSchema1RegistrySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerSchema1RegistrySuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerSchema1RegistrySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerRegistryAuthHtpasswdSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistryAuthHtpasswdSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerRegistryAuthHtpasswdSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerRegistryAuthTokenSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistryAuthTokenSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerRegistryAuthTokenSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerDaemonSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerDaemonSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerDaemonSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerSwarmSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerSwarmSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerSwarmSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerPluginSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerPluginSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerPluginSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerExternalVolumeSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, &DockerExternalVolumeSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerExternalVolumeSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerNetworkSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, &DockerNetworkSuite{ds: &DockerSuite{}})
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
suite.Run(ctx, t, &DockerNetworkSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerHubPullSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
ensureTestEnvSetup(ctx, t)
|
||||
// FIXME. Temporarily turning this off for Windows as GH16039 was breaking
|
||||
// Windows to Linux CI @icecrime
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, newDockerHubPullSuite())
|
||||
suite.Run(ctx, t, newDockerHubPullSuite())
|
||||
}
|
||||
|
||||
type DockerSuite struct{}
|
||||
|
@ -365,8 +445,8 @@ func (s *DockerSuite) OnTimeout(c *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TearDownTest(c *testing.T) {
|
||||
testEnv.Clean(c)
|
||||
func (s *DockerSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
testEnv.Clean(ctx, c)
|
||||
}
|
||||
|
||||
type DockerRegistrySuite struct {
|
||||
|
@ -379,21 +459,21 @@ func (s *DockerRegistrySuite) OnTimeout(c *testing.T) {
|
|||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerRegistrySuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerRegistrySuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
type DockerSchema1RegistrySuite struct {
|
||||
|
@ -406,21 +486,21 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *testing.T) {
|
|||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Schema1)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
type DockerRegistryAuthHtpasswdSuite struct {
|
||||
|
@ -433,14 +513,14 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *testing.T) {
|
|||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Htpasswd)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.reg != nil {
|
||||
out, err := s.d.Cmd("logout", privateRegistryURL)
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -449,7 +529,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *testing.T) {
|
|||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
type DockerRegistryAuthTokenSuite struct {
|
||||
|
@ -462,12 +542,12 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *testing.T) {
|
|||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerRegistryAuthTokenSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerRegistryAuthTokenSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.reg != nil {
|
||||
out, err := s.d.Cmd("logout", privateRegistryURL)
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -476,7 +556,7 @@ func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *testing.T) {
|
|||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *testing.T, tokenURL string) {
|
||||
|
@ -496,20 +576,20 @@ func (s *DockerDaemonSuite) OnTimeout(c *testing.T) {
|
|||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerDaemonSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerDaemonSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TearDownSuite(c *testing.T) {
|
||||
func (s *DockerDaemonSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// ignore errors here
|
||||
|
@ -542,11 +622,11 @@ func (s *DockerSwarmSuite) OnTimeout(c *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerSwarmSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *daemon.Daemon {
|
||||
func (s *DockerSwarmSuite) AddDaemon(ctx context.Context, c *testing.T, joinSwarm, manager bool) *daemon.Daemon {
|
||||
c.Helper()
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary,
|
||||
testdaemon.WithEnvironment(testEnv.Execution),
|
||||
|
@ -554,12 +634,12 @@ func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *dae
|
|||
)
|
||||
if joinSwarm {
|
||||
if len(s.daemons) > 0 {
|
||||
d.StartAndSwarmJoin(c, s.daemons[0].Daemon, manager)
|
||||
d.StartAndSwarmJoin(ctx, c, s.daemons[0].Daemon, manager)
|
||||
} else {
|
||||
d.StartAndSwarmInit(c)
|
||||
d.StartAndSwarmInit(ctx, c)
|
||||
}
|
||||
} else {
|
||||
d.StartNodeWithBusybox(c)
|
||||
d.StartNodeWithBusybox(ctx, c)
|
||||
}
|
||||
|
||||
s.daemonsLock.Lock()
|
||||
|
@ -570,7 +650,7 @@ func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *dae
|
|||
return d
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerSwarmSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
s.daemonsLock.Lock()
|
||||
for _, d := range s.daemons {
|
||||
|
@ -582,7 +662,7 @@ func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
|
|||
s.daemons = nil
|
||||
s.portIndex = 0
|
||||
s.daemonsLock.Unlock()
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
type DockerPluginSuite struct {
|
||||
|
@ -602,26 +682,26 @@ func (ps *DockerPluginSuite) getPluginRepoWithTag() string {
|
|||
return ps.getPluginRepo() + ":" + "latest"
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) SetUpSuite(c *testing.T) {
|
||||
func (ps *DockerPluginSuite) SetUpSuite(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting)
|
||||
ps.registry = registry.NewV2(c)
|
||||
ps.registry.WaitReady(c)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil)
|
||||
assert.NilError(c, err, "failed to create plugin")
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) TearDownSuite(c *testing.T) {
|
||||
func (ps *DockerPluginSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
if ps.registry != nil {
|
||||
ps.registry.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) TearDownTest(c *testing.T) {
|
||||
ps.ds.TearDownTest(c)
|
||||
func (ps *DockerPluginSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
ps.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package daemon // import "github.com/docker/docker/integration-cli/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -79,14 +80,16 @@ func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
|
|||
|
||||
// CheckActiveContainerCount returns the number of active containers
|
||||
// FIXME(vdemeester) should re-use ActivateContainers in some way
|
||||
func (d *Daemon) CheckActiveContainerCount(t *testing.T) (interface{}, string) {
|
||||
t.Helper()
|
||||
out, err := d.Cmd("ps", "-q")
|
||||
assert.NilError(t, err)
|
||||
if len(strings.TrimSpace(out)) == 0 {
|
||||
return 0, ""
|
||||
func (d *Daemon) CheckActiveContainerCount(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
t.Helper()
|
||||
out, err := d.Cmd("ps", "-q")
|
||||
assert.NilError(t, err)
|
||||
if len(strings.TrimSpace(out)) == 0 {
|
||||
return 0, ""
|
||||
}
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
|
||||
}
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
|
||||
}
|
||||
|
||||
// WaitRun waits for a container to be running for 10s
|
||||
|
|
|
@ -15,9 +15,9 @@ import (
|
|||
|
||||
// CheckServiceTasksInState returns the number of tasks with a matching state,
|
||||
// and optional message substring.
|
||||
func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckServiceTasksInState(ctx context.Context, service string, state swarm.TaskState, message string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
tasks := d.GetServiceTasks(ctx, c, service)
|
||||
var count int
|
||||
for _, task := range tasks {
|
||||
if task.Status.State == state {
|
||||
|
@ -32,9 +32,9 @@ func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState,
|
|||
|
||||
// CheckServiceTasksInStateWithError returns the number of tasks with a matching state,
|
||||
// and optional message substring.
|
||||
func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckServiceTasksInStateWithError(ctx context.Context, service string, state swarm.TaskState, errorMessage string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
tasks := d.GetServiceTasks(ctx, c, service)
|
||||
var count int
|
||||
for _, task := range tasks {
|
||||
if task.Status.State == state {
|
||||
|
@ -48,14 +48,14 @@ func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.T
|
|||
}
|
||||
|
||||
// CheckServiceRunningTasks returns the number of running tasks for the specified service
|
||||
func (d *Daemon) CheckServiceRunningTasks(service string) func(*testing.T) (interface{}, string) {
|
||||
return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "")
|
||||
func (d *Daemon) CheckServiceRunningTasks(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
|
||||
return d.CheckServiceTasksInState(ctx, service, swarm.TaskStateRunning, "")
|
||||
}
|
||||
|
||||
// CheckServiceUpdateState returns the current update state for the specified service
|
||||
func (d *Daemon) CheckServiceUpdateState(service string) func(*testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckServiceUpdateState(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
service := d.GetService(c, service)
|
||||
service := d.GetService(ctx, c, service)
|
||||
if service.UpdateStatus == nil {
|
||||
return "", ""
|
||||
}
|
||||
|
@ -64,10 +64,10 @@ func (d *Daemon) CheckServiceUpdateState(service string) func(*testing.T) (inter
|
|||
}
|
||||
|
||||
// CheckPluginRunning returns the runtime state of the plugin
|
||||
func (d *Daemon) CheckPluginRunning(plugin string) func(c *testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckPluginRunning(ctx context.Context, plugin string) func(c *testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
apiclient := d.NewClientT(c)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(ctx, plugin)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return false, fmt.Sprintf("%v", err)
|
||||
}
|
||||
|
@ -77,10 +77,10 @@ func (d *Daemon) CheckPluginRunning(plugin string) func(c *testing.T) (interface
|
|||
}
|
||||
|
||||
// CheckPluginImage returns the runtime state of the plugin
|
||||
func (d *Daemon) CheckPluginImage(plugin string) func(c *testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckPluginImage(ctx context.Context, plugin string) func(c *testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
apiclient := d.NewClientT(c)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(ctx, plugin)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return false, fmt.Sprintf("%v", err)
|
||||
}
|
||||
|
@ -90,94 +90,106 @@ func (d *Daemon) CheckPluginImage(plugin string) func(c *testing.T) (interface{}
|
|||
}
|
||||
|
||||
// CheckServiceTasks returns the number of tasks for the specified service
|
||||
func (d *Daemon) CheckServiceTasks(service string) func(*testing.T) (interface{}, string) {
|
||||
func (d *Daemon) CheckServiceTasks(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
tasks := d.GetServiceTasks(ctx, c, service)
|
||||
return len(tasks), ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckRunningTaskNetworks returns the number of times each network is referenced from a task.
|
||||
func (d *Daemon) CheckRunningTaskNetworks(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
func (d *Daemon) CheckRunningTaskNetworks(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(t)
|
||||
defer cli.Close()
|
||||
|
||||
tasks, err := cli.TaskList(context.Background(), types.TaskListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
|
||||
})
|
||||
assert.NilError(c, err)
|
||||
tasks, err := cli.TaskList(ctx, types.TaskListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
result := make(map[string]int)
|
||||
for _, task := range tasks {
|
||||
for _, network := range task.Spec.Networks {
|
||||
result[network.Target]++
|
||||
result := make(map[string]int)
|
||||
for _, task := range tasks {
|
||||
for _, network := range task.Spec.Networks {
|
||||
result[network.Target]++
|
||||
}
|
||||
}
|
||||
return result, ""
|
||||
}
|
||||
return result, ""
|
||||
}
|
||||
|
||||
// CheckRunningTaskImages returns the times each image is running as a task.
|
||||
func (d *Daemon) CheckRunningTaskImages(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
func (d *Daemon) CheckRunningTaskImages(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(t)
|
||||
defer cli.Close()
|
||||
|
||||
tasks, err := cli.TaskList(context.Background(), types.TaskListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
|
||||
})
|
||||
assert.NilError(c, err)
|
||||
tasks, err := cli.TaskList(ctx, types.TaskListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
result := make(map[string]int)
|
||||
for _, task := range tasks {
|
||||
if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil {
|
||||
result[task.Spec.ContainerSpec.Image]++
|
||||
result := make(map[string]int)
|
||||
for _, task := range tasks {
|
||||
if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil {
|
||||
result[task.Spec.ContainerSpec.Image]++
|
||||
}
|
||||
}
|
||||
return result, ""
|
||||
}
|
||||
return result, ""
|
||||
}
|
||||
|
||||
// CheckNodeReadyCount returns the number of ready node on the swarm
|
||||
func (d *Daemon) CheckNodeReadyCount(c *testing.T) (interface{}, string) {
|
||||
nodes := d.ListNodes(c)
|
||||
var readyCount int
|
||||
for _, node := range nodes {
|
||||
if node.Status.State == swarm.NodeStateReady {
|
||||
readyCount++
|
||||
func (d *Daemon) CheckNodeReadyCount(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
nodes := d.ListNodes(ctx, t)
|
||||
var readyCount int
|
||||
for _, node := range nodes {
|
||||
if node.Status.State == swarm.NodeStateReady {
|
||||
readyCount++
|
||||
}
|
||||
}
|
||||
return readyCount, ""
|
||||
}
|
||||
return readyCount, ""
|
||||
}
|
||||
|
||||
// CheckLocalNodeState returns the current swarm node state
|
||||
func (d *Daemon) CheckLocalNodeState(c *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(c)
|
||||
return info.LocalNodeState, ""
|
||||
func (d *Daemon) CheckLocalNodeState(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(ctx, t)
|
||||
return info.LocalNodeState, ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckControlAvailable returns the current swarm control available
|
||||
func (d *Daemon) CheckControlAvailable(c *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
return info.ControlAvailable, ""
|
||||
func (d *Daemon) CheckControlAvailable(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(ctx, t)
|
||||
assert.Equal(t, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
return info.ControlAvailable, ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckLeader returns whether there is a leader on the swarm or not
|
||||
func (d *Daemon) CheckLeader(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
func (d *Daemon) CheckLeader(ctx context.Context) func(t *testing.T) (interface{}, string) {
|
||||
return func(t *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(t)
|
||||
defer cli.Close()
|
||||
|
||||
errList := "could not get node list"
|
||||
errList := "could not get node list"
|
||||
|
||||
ls, err := cli.NodeList(context.Background(), types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return err, errList
|
||||
}
|
||||
|
||||
for _, node := range ls {
|
||||
if node.ManagerStatus != nil && node.ManagerStatus.Leader {
|
||||
return nil, ""
|
||||
ls, err := cli.NodeList(ctx, types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return err, errList
|
||||
}
|
||||
|
||||
for _, node := range ls {
|
||||
if node.ManagerStatus != nil && node.ManagerStatus.Leader {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("no leader"), "could not find leader"
|
||||
}
|
||||
return fmt.Errorf("no leader"), "could not find leader"
|
||||
}
|
||||
|
||||
// CmdRetryOutOfSequence tries the specified command against the current daemon
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -14,6 +13,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -75,7 +75,8 @@ func (s *DockerAPISuite) TestGetContainersAttachWebsocket(c *testing.T) {
|
|||
|
||||
// regression gh14320
|
||||
func (s *DockerAPISuite) TestPostContainersAttachContainerNotFound(c *testing.T) {
|
||||
resp, _, err := request.Post("/containers/doesnotexist/attach")
|
||||
ctx := testutil.GetContext(c)
|
||||
resp, _, err := request.Post(ctx, "/containers/doesnotexist/attach")
|
||||
assert.NilError(c, err)
|
||||
// connection will shutdown, err should be "persistent connection closed"
|
||||
assert.Equal(c, resp.StatusCode, http.StatusNotFound)
|
||||
|
@ -86,7 +87,8 @@ func (s *DockerAPISuite) TestPostContainersAttachContainerNotFound(c *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerAPISuite) TestGetContainersWsAttachContainerNotFound(c *testing.T) {
|
||||
res, body, err := request.Get("/containers/doesnotexist/attach/ws")
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Get(ctx, "/containers/doesnotexist/attach/ws")
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
assert.NilError(c, err)
|
||||
b, err := request.ReadBody(body)
|
||||
|
@ -190,7 +192,7 @@ func (s *DockerAPISuite) TestPostContainersAttach(c *testing.T) {
|
|||
Logs: false,
|
||||
}
|
||||
|
||||
resp, err := apiClient.ContainerAttach(context.Background(), cid, attachOpts)
|
||||
resp, err := apiClient.ContainerAttach(testutil.GetContext(c), cid, attachOpts)
|
||||
assert.NilError(c, err)
|
||||
mediaType, b := resp.MediaType()
|
||||
assert.Check(c, b)
|
||||
|
@ -199,7 +201,7 @@ func (s *DockerAPISuite) TestPostContainersAttach(c *testing.T) {
|
|||
|
||||
// Make sure we do see "hello" if Logs is true
|
||||
attachOpts.Logs = true
|
||||
resp, err = apiClient.ContainerAttach(context.Background(), cid, attachOpts)
|
||||
resp, err = apiClient.ContainerAttach(testutil.GetContext(c), cid, attachOpts)
|
||||
assert.NilError(c, err)
|
||||
|
||||
defer resp.Conn.Close()
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -13,6 +12,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/docker/docker/testutil/fakegit"
|
||||
"github.com/docker/docker/testutil/fakestorage"
|
||||
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
func (s *DockerAPISuite) TestBuildAPIDockerFileRemote(c *testing.T) {
|
||||
testRequires(c, NotUserNamespace)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
// -xdev is required because sysfs can cause EPERM
|
||||
testD := `FROM busybox
|
||||
|
@ -31,7 +32,7 @@ RUN find /tmp/`
|
|||
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD}))
|
||||
defer server.Close()
|
||||
|
||||
res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON)
|
||||
res, body, err := request.Post(ctx, "/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -46,6 +47,8 @@ RUN find /tmp/`
|
|||
}
|
||||
|
||||
func (s *DockerAPISuite) TestBuildAPIRemoteTarballContext(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buffer)
|
||||
defer tw.Close()
|
||||
|
@ -66,7 +69,7 @@ func (s *DockerAPISuite) TestBuildAPIRemoteTarballContext(c *testing.T) {
|
|||
}))
|
||||
defer server.Close()
|
||||
|
||||
res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar"))
|
||||
res, b, err := request.Post(ctx, "/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
b.Close()
|
||||
|
@ -113,8 +116,9 @@ RUN echo 'right'
|
|||
}))
|
||||
defer server.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar"
|
||||
res, body, err := request.Post(url, request.ContentType("application/tar"))
|
||||
res, body, err := request.Post(ctx, url, request.ContentType("application/tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -133,7 +137,8 @@ RUN echo from dockerfile`,
|
|||
}, false)
|
||||
defer git.Close()
|
||||
|
||||
res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON)
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(ctx, "/build?remote="+git.RepoURL, request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -153,8 +158,9 @@ RUN echo from Dockerfile`,
|
|||
}, false)
|
||||
defer git.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
// Make sure it tries to 'dockerfile' query param value
|
||||
res, body, err := request.Post("/build?dockerfile=baz&remote="+git.RepoURL, request.JSON)
|
||||
res, body, err := request.Post(ctx, "/build?dockerfile=baz&remote="+git.RepoURL, request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -175,8 +181,10 @@ RUN echo from dockerfile`,
|
|||
}, false)
|
||||
defer git.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
// Make sure it tries to 'dockerfile' query param value
|
||||
res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON)
|
||||
res, body, err := request.Post(ctx, "/build?remote="+git.RepoURL, request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -218,7 +226,9 @@ func (s *DockerAPISuite) TestBuildAPIUnnormalizedTarPaths(c *testing.T) {
|
|||
|
||||
assert.NilError(c, tw.Close(), "failed to close tar archive")
|
||||
|
||||
res, body, err := request.Post("/build", request.RawContent(io.NopCloser(buffer)), request.ContentType("application/x-tar"))
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
res, body, err := request.Post(ctx, "/build", request.RawContent(io.NopCloser(buffer)), request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -248,15 +258,17 @@ func (s *DockerAPISuite) TestBuildOnBuildWithCopy(c *testing.T) {
|
|||
|
||||
FROM onbuildbase
|
||||
`
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
fakecontext.WithFile("file", "some content"),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
@ -268,14 +280,16 @@ func (s *DockerAPISuite) TestBuildOnBuildWithCopy(c *testing.T) {
|
|||
|
||||
func (s *DockerAPISuite) TestBuildOnBuildCache(c *testing.T) {
|
||||
build := func(dockerfile string) []byte {
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
|
||||
|
@ -301,11 +315,12 @@ func (s *DockerAPISuite) TestBuildOnBuildCache(c *testing.T) {
|
|||
parentID, childID := imageIDs[0], imageIDs[1]
|
||||
|
||||
client := testEnv.APIClient()
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
// check parentID is correct
|
||||
// Parent is graphdriver-only
|
||||
if !testEnv.UsingSnapshotter() {
|
||||
image, _, err := client.ImageInspectWithRaw(context.Background(), childID)
|
||||
image, _, err := client.ImageInspectWithRaw(ctx, childID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Check(c, is.Equal(parentID, image.Parent))
|
||||
|
@ -317,10 +332,11 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *testing.T) {
|
|||
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
// tag the image to upload it to the private registry
|
||||
err := client.ImageTag(context.TODO(), "busybox", repoName)
|
||||
ctx := testutil.GetContext(c)
|
||||
err := client.ImageTag(ctx, "busybox", repoName)
|
||||
assert.Check(c, err)
|
||||
// push the image to the registry
|
||||
rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"})
|
||||
rc, err := client.ImagePush(ctx, repoName, types.ImagePushOptions{RegistryAuth: "{}"})
|
||||
assert.Check(c, err)
|
||||
_, err = io.Copy(io.Discard, rc)
|
||||
assert.Check(c, err)
|
||||
|
@ -332,14 +348,15 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *testing.T) {
|
|||
COPY --from=foo /abc /
|
||||
`, repoName, repoName)
|
||||
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build?pull=1",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
|
||||
|
@ -376,14 +393,16 @@ func (s *DockerAPISuite) TestBuildAddRemoteNoDecompress(c *testing.T) {
|
|||
RUN [ -f test.tar ]
|
||||
`, server.URL())
|
||||
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
|
||||
|
@ -405,15 +424,17 @@ func (s *DockerAPISuite) TestBuildChownOnCopy(c *testing.T) {
|
|||
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'test1:test2' ]
|
||||
RUN [ $(ls -nl / | grep new_dir | awk '{print $3":"$4}') = '1001:1002' ]
|
||||
`
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
fakecontext.WithFile("test_file1", "some test content"),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
@ -434,9 +455,10 @@ COPY file /file`
|
|||
fakecontext.WithDockerfile(dockerfile),
|
||||
fakecontext.WithFile("file", "bar"))
|
||||
|
||||
build := func(ctx *fakecontext.Fake) string {
|
||||
res, body, err := request.Post("/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
ctx := testutil.GetContext(c)
|
||||
build := func(bCtx *fakecontext.Fake) string {
|
||||
res, body, err := request.Post(ctx, "/build",
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
|
||||
assert.NilError(c, err)
|
||||
|
@ -474,9 +496,10 @@ ADD file /file`
|
|||
fakecontext.WithDockerfile(dockerfile),
|
||||
fakecontext.WithFile("file", "bar"))
|
||||
|
||||
build := func(ctx *fakecontext.Fake) string {
|
||||
res, body, err := request.Post("/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
ctx := testutil.GetContext(c)
|
||||
build := func(bCtx *fakecontext.Fake) string {
|
||||
res, body, err := request.Post(ctx, "/build",
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
|
||||
assert.NilError(c, err)
|
||||
|
@ -508,14 +531,16 @@ func (s *DockerAPISuite) TestBuildScratchCopy(c *testing.T) {
|
|||
dockerfile := `FROM scratch
|
||||
ADD Dockerfile /
|
||||
ENV foo bar`
|
||||
ctx := fakecontext.New(c, "",
|
||||
bCtx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
)
|
||||
defer ctx.Close()
|
||||
defer bCtx.Close()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Post(
|
||||
ctx,
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.RawContent(bCtx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -24,7 +25,7 @@ func (s *DockerAPISuite) TestBuildWithRecycleBin(c *testing.T) {
|
|||
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
|
||||
defer ctx.Close()
|
||||
|
||||
res, body, err := request.Post(
|
||||
res, body, err := request.Post(testutil.GetContext(c),
|
||||
"/build",
|
||||
request.RawContent(ctx.AsTarReader(c)),
|
||||
request.ContentType("application/x-tar"))
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/docker/go-connections/nat"
|
||||
|
@ -47,7 +48,8 @@ func (s *DockerAPISuite) TestContainerAPIGetAll(c *testing.T) {
|
|||
options := types.ContainerListOptions{
|
||||
All: true,
|
||||
}
|
||||
containers, err := apiClient.ContainerList(context.Background(), options)
|
||||
ctx := testutil.GetContext(c)
|
||||
containers, err := apiClient.ContainerList(ctx, options)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(containers), startCount+1)
|
||||
actual := containers[0].Names[0]
|
||||
|
@ -66,7 +68,8 @@ func (s *DockerAPISuite) TestContainerAPIGetJSONNoFieldsOmitted(c *testing.T) {
|
|||
options := types.ContainerListOptions{
|
||||
All: true,
|
||||
}
|
||||
containers, err := apiClient.ContainerList(context.Background(), options)
|
||||
ctx := testutil.GetContext(c)
|
||||
containers, err := apiClient.ContainerList(ctx, options)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(containers), startCount+1)
|
||||
actual := fmt.Sprintf("%+v", containers[0])
|
||||
|
@ -105,7 +108,7 @@ func (s *DockerAPISuite) TestContainerAPIGetExport(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
body, err := apiClient.ContainerExport(context.Background(), name)
|
||||
body, err := apiClient.ContainerExport(testutil.GetContext(c), name)
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
found := false
|
||||
|
@ -132,7 +135,7 @@ func (s *DockerAPISuite) TestContainerAPIGetChanges(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
changes, err := apiClient.ContainerDiff(context.Background(), name)
|
||||
changes, err := apiClient.ContainerDiff(testutil.GetContext(c), name)
|
||||
assert.NilError(c, err)
|
||||
|
||||
// Check the changelog for removal of /etc/passwd
|
||||
|
@ -160,7 +163,7 @@ func (s *DockerAPISuite) TestGetContainerStats(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
stats, err := apiClient.ContainerStats(context.Background(), name, true)
|
||||
stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, true)
|
||||
assert.NilError(c, err)
|
||||
bc <- b{stats, err}
|
||||
}()
|
||||
|
@ -194,7 +197,7 @@ func (s *DockerAPISuite) TestGetContainerStatsRmRunning(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
stats, err := apiClient.ContainerStats(context.Background(), id, true)
|
||||
stats, err := apiClient.ContainerStats(testutil.GetContext(c), id, true)
|
||||
assert.NilError(c, err)
|
||||
defer stats.Body.Close()
|
||||
|
||||
|
@ -265,7 +268,7 @@ func (s *DockerAPISuite) TestGetContainerStatsStream(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
stats, err := apiClient.ContainerStats(context.Background(), name, true)
|
||||
stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, true)
|
||||
assert.NilError(c, err)
|
||||
bc <- b{stats, err}
|
||||
}()
|
||||
|
@ -307,7 +310,7 @@ func (s *DockerAPISuite) TestGetContainerStatsNoStream(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
stats, err := apiClient.ContainerStats(context.Background(), name, false)
|
||||
stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
|
||||
assert.NilError(c, err)
|
||||
bc <- b{stats, err}
|
||||
}()
|
||||
|
@ -344,7 +347,7 @@ func (s *DockerAPISuite) TestGetStoppedContainerStats(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
resp, err := apiClient.ContainerStats(context.Background(), name, false)
|
||||
resp, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
chResp <- err
|
||||
|
@ -373,7 +376,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerPause(context.Background(), ContainerID)
|
||||
err = apiClient.ContainerPause(testutil.GetContext(c), ContainerID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
pausedContainers := getPaused(c)
|
||||
|
@ -382,7 +385,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
|
|||
c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
|
||||
}
|
||||
|
||||
err = apiClient.ContainerUnpause(context.Background(), ContainerID)
|
||||
err = apiClient.ContainerUnpause(testutil.GetContext(c), ContainerID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
pausedContainers = getPaused(c)
|
||||
|
@ -400,7 +403,7 @@ func (s *DockerAPISuite) TestContainerAPITop(c *testing.T) {
|
|||
defer apiClient.Close()
|
||||
|
||||
// sort by comm[andline] to make sure order stays the same in case of PID rollover
|
||||
top, err := apiClient.ContainerTop(context.Background(), id, []string{"aux", "--sort=comm"})
|
||||
top, err := apiClient.ContainerTop(testutil.GetContext(c), id, []string{"aux", "--sort=comm"})
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(top.Titles), 11, fmt.Sprintf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles))
|
||||
|
||||
|
@ -422,7 +425,7 @@ func (s *DockerAPISuite) TestContainerAPITopWindows(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
top, err := apiClient.ContainerTop(context.Background(), id, nil)
|
||||
top, err := apiClient.ContainerTop(testutil.GetContext(c), id, nil)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(top.Titles), 4, "expected 4 titles, found %d: %v", len(top.Titles), top.Titles)
|
||||
|
||||
|
@ -455,7 +458,7 @@ func (s *DockerAPISuite) TestContainerAPICommit(c *testing.T) {
|
|||
Reference: "testcontainerapicommit:testtag",
|
||||
}
|
||||
|
||||
img, err := apiClient.ContainerCommit(context.Background(), cName, options)
|
||||
img, err := apiClient.ContainerCommit(testutil.GetContext(c), cName, options)
|
||||
assert.NilError(c, err)
|
||||
|
||||
cmd := inspectField(c, img.ID, "Config.Cmd")
|
||||
|
@ -482,7 +485,7 @@ func (s *DockerAPISuite) TestContainerAPICommitWithLabelInConfig(c *testing.T) {
|
|||
Config: &config,
|
||||
}
|
||||
|
||||
img, err := apiClient.ContainerCommit(context.Background(), cName, options)
|
||||
img, err := apiClient.ContainerCommit(testutil.GetContext(c), cName, options)
|
||||
assert.NilError(c, err)
|
||||
|
||||
label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1")
|
||||
|
@ -522,7 +525,7 @@ func (s *DockerAPISuite) TestContainerAPIBadPort(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
assert.ErrorContains(c, err, `invalid port specification: "aa80"`)
|
||||
}
|
||||
|
||||
|
@ -536,7 +539,7 @@ func (s *DockerAPISuite) TestContainerAPICreate(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
out, _ := dockerCmd(c, "start", "-a", ctr.ID)
|
||||
|
@ -548,7 +551,7 @@ func (s *DockerAPISuite) TestContainerAPICreateEmptyConfig(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &container.Config{}, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &container.Config{}, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
|
||||
assert.ErrorContains(c, err, "no command specified")
|
||||
}
|
||||
|
@ -571,7 +574,7 @@ func (s *DockerAPISuite) TestContainerAPICreateMultipleNetworksConfig(c *testing
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &networkingConfig, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &networkingConfig, nil, "")
|
||||
msg := err.Error()
|
||||
// network name order in error message is not deterministic
|
||||
assert.Assert(c, strings.Contains(msg, "container cannot be connected to network endpoints"))
|
||||
|
@ -606,10 +609,10 @@ func UtilCreateNetworkMode(c *testing.T, networkMode container.NetworkMode) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Equal(c, containerJSON.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
|
||||
|
@ -633,10 +636,10 @@ func (s *DockerAPISuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T)
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares")
|
||||
|
@ -654,7 +657,7 @@ func (s *DockerAPISuite) TestContainerAPIVerifyHeader(c *testing.T) {
|
|||
create := func(ct string) (*http.Response, io.ReadCloser, error) {
|
||||
jsonData := bytes.NewBuffer(nil)
|
||||
assert.Assert(c, json.NewEncoder(jsonData).Encode(config) == nil)
|
||||
return request.Post("/containers/create", request.RawContent(io.NopCloser(jsonData)), request.ContentType(ct))
|
||||
return request.Post(testutil.GetContext(c), "/containers/create", request.RawContent(io.NopCloser(jsonData)), request.ContentType(ct))
|
||||
}
|
||||
|
||||
// Try with no content-type
|
||||
|
@ -700,7 +703,7 @@ func (s *DockerAPISuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
|
|||
}
|
||||
}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
@ -724,7 +727,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *testi
|
|||
}
|
||||
}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
@ -748,7 +751,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyRetryMismatch(c *testing.T
|
|||
}
|
||||
}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
@ -772,7 +775,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *test
|
|||
}
|
||||
}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
@ -796,7 +799,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *testi
|
|||
}
|
||||
}`
|
||||
|
||||
res, _, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
}
|
||||
|
@ -827,7 +830,7 @@ func (s *DockerAPISuite) TestContainerAPIPostCreateNull(c *testing.T) {
|
|||
"NetworkDisabled":false,
|
||||
"OnBuild":null}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
|
||||
|
@ -858,7 +861,7 @@ func (s *DockerAPISuite) TestCreateWithTooLowMemoryLimit(c *testing.T) {
|
|||
"Memory": 524287
|
||||
}`
|
||||
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
b, err2 := request.ReadBody(body)
|
||||
assert.Assert(c, err2 == nil)
|
||||
|
@ -881,7 +884,7 @@ func (s *DockerAPISuite) TestContainerAPIRename(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRename(context.Background(), containerID, newName)
|
||||
err = apiClient.ContainerRename(testutil.GetContext(c), containerID, newName)
|
||||
assert.NilError(c, err)
|
||||
|
||||
name := inspectField(c, containerID, "Name")
|
||||
|
@ -896,7 +899,7 @@ func (s *DockerAPISuite) TestContainerAPIKill(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerKill(context.Background(), name, "SIGKILL")
|
||||
err = apiClient.ContainerKill(testutil.GetContext(c), name, "SIGKILL")
|
||||
assert.NilError(c, err)
|
||||
|
||||
state := inspectField(c, name, "State.Running")
|
||||
|
@ -911,7 +914,7 @@ func (s *DockerAPISuite) TestContainerAPIRestart(c *testing.T) {
|
|||
defer apiClient.Close()
|
||||
|
||||
timeout := 1
|
||||
err = apiClient.ContainerRestart(context.Background(), name, container.StopOptions{Timeout: &timeout})
|
||||
err = apiClient.ContainerRestart(testutil.GetContext(c), name, container.StopOptions{Timeout: &timeout})
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Assert(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) == nil)
|
||||
|
@ -927,7 +930,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartNotimeoutParam(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRestart(context.Background(), name, container.StopOptions{})
|
||||
err = apiClient.ContainerRestart(testutil.GetContext(c), name, container.StopOptions{})
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Assert(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) == nil)
|
||||
|
@ -945,15 +948,15 @@ func (s *DockerAPISuite) TestContainerAPIStart(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
|
||||
assert.NilError(c, err)
|
||||
|
||||
err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
|
||||
err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
|
||||
assert.NilError(c, err)
|
||||
|
||||
// second call to start should give 304
|
||||
// maybe add ContainerStartWithRaw to test it
|
||||
err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
|
||||
err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
|
||||
assert.NilError(c, err)
|
||||
|
||||
// TODO(tibor): figure out why this doesn't work on windows
|
||||
|
@ -968,7 +971,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerStop(context.Background(), name, container.StopOptions{
|
||||
err = apiClient.ContainerStop(testutil.GetContext(c), name, container.StopOptions{
|
||||
Timeout: &timeout,
|
||||
})
|
||||
assert.NilError(c, err)
|
||||
|
@ -976,7 +979,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
|
|||
|
||||
// second call to start should give 304
|
||||
// maybe add ContainerStartWithRaw to test it
|
||||
err = apiClient.ContainerStop(context.Background(), name, container.StopOptions{
|
||||
err = apiClient.ContainerStop(testutil.GetContext(c), name, container.StopOptions{
|
||||
Timeout: &timeout,
|
||||
})
|
||||
assert.NilError(c, err)
|
||||
|
@ -995,7 +998,7 @@ func (s *DockerAPISuite) TestContainerAPIWait(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
waitResC, errC := apiClient.ContainerWait(context.Background(), name, "")
|
||||
waitResC, errC := apiClient.ContainerWait(testutil.GetContext(c), name, "")
|
||||
|
||||
select {
|
||||
case err = <-errC:
|
||||
|
@ -1013,7 +1016,7 @@ func (s *DockerAPISuite) TestContainerAPICopyNotExistsAnyMore(c *testing.T) {
|
|||
Resource: "/test.txt",
|
||||
}
|
||||
// no copy in client/
|
||||
res, _, err := request.Post("/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
@ -1027,7 +1030,7 @@ func (s *DockerAPISuite) TestContainerAPICopyPre124(c *testing.T) {
|
|||
Resource: "/test.txt",
|
||||
}
|
||||
|
||||
res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -1057,7 +1060,7 @@ func (s *DockerAPISuite) TestContainerAPICopyResourcePathEmptyPre124(c *testing.
|
|||
Resource: "",
|
||||
}
|
||||
|
||||
res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
assert.NilError(c, err)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
@ -1078,7 +1081,7 @@ func (s *DockerAPISuite) TestContainerAPICopyResourcePathNotFoundPre124(c *testi
|
|||
Resource: "/notexist",
|
||||
}
|
||||
|
||||
res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -1096,7 +1099,7 @@ func (s *DockerAPISuite) TestContainerAPICopyContainerNotFoundPr124(c *testing.T
|
|||
Resource: "/something",
|
||||
}
|
||||
|
||||
res, _, err := request.Post("/v1.23/containers/notexists/copy", request.JSONBody(postData))
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/v1.23/containers/notexists/copy", request.JSONBody(postData))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
@ -1113,7 +1116,7 @@ func (s *DockerAPISuite) TestContainerAPIDelete(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{})
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), id, types.ContainerRemoveOptions{})
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
@ -1122,7 +1125,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteNotExist(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), "doesnotexist", types.ContainerRemoveOptions{})
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), "doesnotexist", types.ContainerRemoveOptions{})
|
||||
assert.ErrorContains(c, err, "No such container: doesnotexist")
|
||||
}
|
||||
|
||||
|
@ -1139,7 +1142,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteForce(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), id, removeOptions)
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
@ -1167,7 +1170,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveLinks(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), "tlink2/tlink1", removeOptions)
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), "tlink2/tlink1", removeOptions)
|
||||
assert.NilError(c, err)
|
||||
|
||||
linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links")
|
||||
|
@ -1201,7 +1204,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveVolume(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), id, removeOptions)
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
|
||||
assert.NilError(c, err)
|
||||
|
||||
_, err = os.Stat(source)
|
||||
|
@ -1216,7 +1219,7 @@ func (s *DockerAPISuite) TestContainerAPIChunkedEncoding(c *testing.T) {
|
|||
"OpenStdin": true,
|
||||
}
|
||||
|
||||
resp, _, err := request.Post("/containers/create", request.JSONBody(config), request.With(func(req *http.Request) error {
|
||||
resp, _, err := request.Post(testutil.GetContext(c), "/containers/create", request.JSONBody(config), request.With(func(req *http.Request) error {
|
||||
// This is a cheat to make the http request do chunked encoding
|
||||
// Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
|
||||
// https://golang.org/src/pkg/net/http/request.go?s=11980:12172
|
||||
|
@ -1238,7 +1241,7 @@ func (s *DockerAPISuite) TestContainerAPIPostContainerStop(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerStop(context.Background(), containerID, container.StopOptions{})
|
||||
err = apiClient.ContainerStop(testutil.GetContext(c), containerID, container.StopOptions{})
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second) == nil)
|
||||
}
|
||||
|
@ -1255,7 +1258,7 @@ func (s *DockerAPISuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
|
||||
assert.NilError(c, err)
|
||||
out, _ := dockerCmd(c, "start", "-a", "echotest")
|
||||
assert.Equal(c, strings.TrimSpace(out), "hello world")
|
||||
|
@ -1265,7 +1268,7 @@ func (s *DockerAPISuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c
|
|||
Entrypoint string
|
||||
Cmd []string
|
||||
}{"busybox", "echo", []string{"hello", "world"}}
|
||||
_, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2))
|
||||
_, _, err = request.Post(testutil.GetContext(c), "/containers/create?name=echotest2", request.JSONBody(config2))
|
||||
assert.NilError(c, err)
|
||||
out, _ = dockerCmd(c, "start", "-a", "echotest2")
|
||||
assert.Equal(c, strings.TrimSpace(out), "hello world")
|
||||
|
@ -1282,7 +1285,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCmd(c *testing
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
|
||||
assert.NilError(c, err)
|
||||
out, _ := dockerCmd(c, "start", "-a", "echotest")
|
||||
assert.Equal(c, strings.TrimSpace(out), "hello world")
|
||||
|
@ -1292,7 +1295,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCmd(c *testing
|
|||
Entrypoint string
|
||||
Cmd string
|
||||
}{"busybox", "echo", "hello world"}
|
||||
_, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2))
|
||||
_, _, err = request.Post(testutil.GetContext(c), "/containers/create?name=echotest2", request.JSONBody(config2))
|
||||
assert.NilError(c, err)
|
||||
out, _ = dockerCmd(c, "start", "-a", "echotest2")
|
||||
assert.Equal(c, strings.TrimSpace(out), "hello world")
|
||||
|
@ -1309,7 +1312,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *
|
|||
CapAdd string
|
||||
CapDrop string
|
||||
}{"busybox", "NET_ADMIN", "cap_sys_admin"}
|
||||
res, _, err := request.Post("/containers/create?name=capaddtest0", request.JSONBody(config))
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/containers/create?name=capaddtest0", request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
|
||||
|
@ -1325,7 +1328,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config2, &hostConfig, &network.NetworkingConfig{}, nil, "capaddtest1")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config2, &hostConfig, &network.NetworkingConfig{}, nil, "capaddtest1")
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
@ -1339,7 +1342,7 @@ func (s *DockerAPISuite) TestContainerAPICreateNoHostConfig118(c *testing.T) {
|
|||
apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.18"))
|
||||
assert.NilError(c, err)
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
@ -1368,7 +1371,7 @@ func (s *DockerAPISuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRoot
|
|||
apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.20"))
|
||||
assert.NilError(c, err)
|
||||
|
||||
err = apiClient.CopyToContainer(context.Background(), cID, "/vol2/symlinkToAbsDir", nil, types.CopyToContainerOptions{})
|
||||
err = apiClient.CopyToContainer(testutil.GetContext(c), cID, "/vol2/symlinkToAbsDir", nil, types.CopyToContainerOptions{})
|
||||
assert.ErrorContains(c, err, "container rootfs is marked read-only")
|
||||
}
|
||||
|
||||
|
@ -1390,7 +1393,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithWrongCpusetValues(c *testin
|
|||
}
|
||||
name := "wrong-cpuset-cpus"
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig1, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig1, &network.NetworkingConfig{}, nil, name)
|
||||
expected := "Invalid value 1-42,, for cpuset cpus"
|
||||
assert.ErrorContains(c, err, expected)
|
||||
|
||||
|
@ -1400,7 +1403,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithWrongCpusetValues(c *testin
|
|||
},
|
||||
}
|
||||
name = "wrong-cpuset-mems"
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig2, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig2, &network.NetworkingConfig{}, nil, name)
|
||||
expected = "Invalid value 42-3,1-- for cpuset mems"
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
@ -1419,7 +1422,7 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeNegative(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
assert.ErrorContains(c, err, "SHM size can not be less than 0")
|
||||
}
|
||||
|
||||
|
@ -1436,10 +1439,10 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *tes
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, dconfig.DefaultShmSize)
|
||||
|
@ -1463,10 +1466,10 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeOmitted(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(67108864))
|
||||
|
@ -1494,10 +1497,10 @@ func (s *DockerAPISuite) TestPostContainersCreateWithShmSize(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(1073741824))
|
||||
|
@ -1520,10 +1523,10 @@ func (s *DockerAPISuite) TestPostContainersCreateMemorySwappinessHostConfigOmitt
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
|
||||
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.31") {
|
||||
|
@ -1551,7 +1554,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *
|
|||
defer apiClient.Close()
|
||||
|
||||
name := "oomscoreadj-over"
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
|
||||
|
||||
expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]"
|
||||
assert.ErrorContains(c, err, expected)
|
||||
|
@ -1561,7 +1564,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *
|
|||
}
|
||||
|
||||
name = "oomscoreadj-low"
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
|
||||
|
||||
expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]"
|
||||
assert.ErrorContains(c, err, expected)
|
||||
|
@ -1573,7 +1576,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteWithEmptyName(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
err = apiClient.ContainerRemove(context.Background(), "", types.ContainerRemoveOptions{})
|
||||
err = apiClient.ContainerRemove(testutil.GetContext(c), "", types.ContainerRemoveOptions{})
|
||||
assert.Check(c, errdefs.IsNotFound(err))
|
||||
}
|
||||
|
||||
|
@ -1593,10 +1596,10 @@ func (s *DockerAPISuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T)
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
|
||||
assert.NilError(c, err)
|
||||
|
||||
err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
|
||||
err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Assert(c, waitRun(name) == nil)
|
||||
|
@ -1607,7 +1610,7 @@ func (s *DockerAPISuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T)
|
|||
}
|
||||
bc := make(chan b, 1)
|
||||
go func() {
|
||||
stats, err := apiClient.ContainerStats(context.Background(), name, false)
|
||||
stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
|
||||
bc <- b{stats, err}
|
||||
}()
|
||||
|
||||
|
@ -1931,7 +1934,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsValidation(c *testing.T) {
|
|||
for i, x := range cases {
|
||||
x := x
|
||||
c.Run(fmt.Sprintf("case %d", i), func(c *testing.T) {
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &x.config, &x.hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &x.config, &x.hostConfig, &network.NetworkingConfig{}, nil, "")
|
||||
if len(x.msg) > 0 {
|
||||
assert.ErrorContains(c, err, x.msg, "%v", cases[i].config)
|
||||
} else {
|
||||
|
@ -1964,7 +1967,7 @@ func (s *DockerAPISuite) TestContainerAPICreateMountsBindRead(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "test")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "test")
|
||||
assert.NilError(c, err)
|
||||
|
||||
out, _ := dockerCmd(c, "start", "-a", "test")
|
||||
|
@ -2099,7 +2102,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
|
|||
}...)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := testutil.GetContext(c)
|
||||
apiclient := testEnv.APIClient()
|
||||
for i, x := range cases {
|
||||
x := x
|
||||
|
@ -2138,7 +2141,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
|
|||
|
||||
err = apiclient.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{})
|
||||
assert.NilError(c, err)
|
||||
poll.WaitOn(c, containerExit(apiclient, ctr.ID), poll.WithDelay(time.Second))
|
||||
poll.WaitOn(c, containerExit(ctx, apiclient, ctr.ID), poll.WithDelay(time.Second))
|
||||
|
||||
err = apiclient.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
|
@ -2164,9 +2167,9 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
|
||||
func containerExit(ctx context.Context, apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
|
||||
return func(logT poll.LogT) poll.Result {
|
||||
ctr, err := apiclient.ContainerInspect(context.Background(), name)
|
||||
ctr, err := apiclient.ContainerInspect(ctx, name)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
|
@ -2219,7 +2222,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsTmpfs(c *testing.T) {
|
|||
Mounts: []mount.Mount{x.cfg},
|
||||
}
|
||||
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, cName)
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, cName)
|
||||
assert.NilError(c, err)
|
||||
out, _ := dockerCmd(c, "start", "-a", cName)
|
||||
for _, option := range x.expectedOptions {
|
||||
|
@ -2233,7 +2236,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsTmpfs(c *testing.T) {
|
|||
// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled.
|
||||
func (s *DockerAPISuite) TestContainerKillCustomStopSignal(c *testing.T) {
|
||||
id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always"))
|
||||
res, _, err := request.Post("/containers/" + id + "/kill")
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/containers/"+id+"/kill")
|
||||
assert.NilError(c, err)
|
||||
defer res.Body.Close()
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
|
@ -14,6 +13,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -48,7 +48,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsBindNamedPipe(c *testing.T
|
|||
cmd := fmt.Sprintf("echo %s > %s", text, containerPipeName)
|
||||
name := "test-bind-npipe"
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := testutil.GetContext(c)
|
||||
client := testEnv.APIClient()
|
||||
_, err = client.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -22,7 +23,7 @@ func (s *DockerAPISuite) TestExecResizeAPIHeightWidthNoInt(c *testing.T) {
|
|||
cleanedContainerID := strings.TrimSpace(out)
|
||||
|
||||
endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar"
|
||||
res, _, err := request.Post(endpoint)
|
||||
res, _, err := request.Post(testutil.GetContext(c), endpoint)
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -42,7 +43,7 @@ func (s *DockerAPISuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
|
|||
"Cmd": []string{"/bin/sh"},
|
||||
}
|
||||
uri := fmt.Sprintf("/containers/%s/exec", name)
|
||||
res, body, err := request.Post(uri, request.JSONBody(data))
|
||||
res, body, err := request.Post(testutil.GetContext(c), uri, request.JSONBody(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -71,7 +72,7 @@ func (s *DockerAPISuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
|
|||
}
|
||||
defer wc.Close()
|
||||
|
||||
_, rc, err := request.Post(fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain"))
|
||||
_, rc, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain"))
|
||||
if err != nil {
|
||||
// It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned.
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -27,7 +28,7 @@ func (s *DockerAPISuite) TestExecAPICreateNoCmd(c *testing.T) {
|
|||
name := "exec_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
|
||||
res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": nil}))
|
||||
res, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": nil}))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -48,7 +49,7 @@ func (s *DockerAPISuite) TestExecAPICreateNoValidContentType(c *testing.T) {
|
|||
c.Fatalf("Can not encode data to json %s", err)
|
||||
}
|
||||
|
||||
res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.RawContent(io.NopCloser(jsonData)), request.ContentType("test/plain"))
|
||||
res, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.RawContent(io.NopCloser(jsonData)), request.ContentType("test/plain"))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -75,7 +76,7 @@ func (s *DockerAPISuite) TestExecAPICreateContainerPaused(c *testing.T) {
|
|||
config := types.ExecConfig{
|
||||
Cmd: []string{"true"},
|
||||
}
|
||||
_, err = apiClient.ContainerExecCreate(context.Background(), name, config)
|
||||
_, err = apiClient.ContainerExecCreate(testutil.GetContext(c), name, config)
|
||||
assert.ErrorContains(c, err, "Container "+name+" is paused, unpause the container before exec", "Expected message when creating exec command with Container %s is paused", name)
|
||||
}
|
||||
|
||||
|
@ -87,7 +88,7 @@ func (s *DockerAPISuite) TestExecAPIStart(c *testing.T) {
|
|||
startExec(c, id, http.StatusOK)
|
||||
|
||||
var execJSON struct{ PID int }
|
||||
inspectExec(c, id, &execJSON)
|
||||
inspectExec(testutil.GetContext(c), c, id, &execJSON)
|
||||
assert.Assert(c, execJSON.PID > 1)
|
||||
|
||||
id = createExec(c, "test")
|
||||
|
@ -111,7 +112,7 @@ func (s *DockerAPISuite) TestExecAPIStartEnsureHeaders(c *testing.T) {
|
|||
dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top")
|
||||
|
||||
id := createExec(c, "test")
|
||||
resp, _, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
resp, _, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, resp.Header.Get("Server") != "")
|
||||
}
|
||||
|
@ -121,7 +122,7 @@ func (s *DockerAPISuite) TestExecAPIStartBackwardsCompatible(c *testing.T) {
|
|||
runSleepingContainer(c, "-d", "--name", "test")
|
||||
id := createExec(c, "test")
|
||||
|
||||
resp, body, err := request.Post(fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain"))
|
||||
resp, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain"))
|
||||
assert.NilError(c, err)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
|
@ -135,7 +136,7 @@ func (s *DockerAPISuite) TestExecAPIStartMultipleTimesError(c *testing.T) {
|
|||
runSleepingContainer(c, "-d", "--name", "test")
|
||||
execID := createExec(c, "test")
|
||||
startExec(c, execID, http.StatusOK)
|
||||
waitForExec(c, execID)
|
||||
waitForExec(testutil.GetContext(c), c, execID)
|
||||
|
||||
startExec(c, execID, http.StatusConflict)
|
||||
}
|
||||
|
@ -145,6 +146,8 @@ func (s *DockerAPISuite) TestExecAPIStartWithDetach(c *testing.T) {
|
|||
name := "foo"
|
||||
runSleepingContainer(c, "-d", "-t", "--name", name)
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
config := types.ExecConfig{
|
||||
Cmd: []string{"true"},
|
||||
AttachStderr: true,
|
||||
|
@ -154,17 +157,17 @@ func (s *DockerAPISuite) TestExecAPIStartWithDetach(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
createResp, err := apiClient.ContainerExecCreate(context.Background(), name, config)
|
||||
createResp, err := apiClient.ContainerExecCreate(ctx, name, config)
|
||||
assert.NilError(c, err)
|
||||
|
||||
_, body, err := request.Post(fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
_, body, err := request.Post(ctx, fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
comment := fmt.Sprintf("response body: %s", b)
|
||||
assert.NilError(c, err, comment)
|
||||
|
||||
resp, _, err := request.Get("/_ping")
|
||||
resp, _, err := request.Get(ctx, "/_ping")
|
||||
assert.NilError(c, err)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
c.Fatal("daemon is down, it should alive")
|
||||
|
@ -179,10 +182,11 @@ func (s *DockerAPISuite) TestExecAPIStartValidCommand(c *testing.T) {
|
|||
id := createExecCmd(c, name, "true")
|
||||
startExec(c, id, http.StatusOK)
|
||||
|
||||
waitForExec(c, id)
|
||||
ctx := testutil.GetContext(c)
|
||||
waitForExec(ctx, c, id)
|
||||
|
||||
var inspectJSON struct{ ExecIDs []string }
|
||||
inspectContainer(c, name, &inspectJSON)
|
||||
inspectContainer(ctx, c, name, &inspectJSON)
|
||||
|
||||
assert.Assert(c, inspectJSON.ExecIDs == nil)
|
||||
}
|
||||
|
@ -198,10 +202,11 @@ func (s *DockerAPISuite) TestExecAPIStartInvalidCommand(c *testing.T) {
|
|||
} else {
|
||||
startExec(c, id, http.StatusBadRequest)
|
||||
}
|
||||
waitForExec(c, id)
|
||||
ctx := testutil.GetContext(c)
|
||||
waitForExec(ctx, c, id)
|
||||
|
||||
var inspectJSON struct{ ExecIDs []string }
|
||||
inspectContainer(c, name, &inspectJSON)
|
||||
inspectContainer(ctx, c, name, &inspectJSON)
|
||||
|
||||
assert.Assert(c, inspectJSON.ExecIDs == nil)
|
||||
}
|
||||
|
@ -229,13 +234,15 @@ func (s *DockerAPISuite) TestExecStateCleanup(c *testing.T) {
|
|||
|
||||
id := createExecCmd(c, name, "ls")
|
||||
startExec(c, id, http.StatusOK)
|
||||
waitForExec(c, id)
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
waitForExec(ctx, c, id)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
|
||||
|
||||
id = createExecCmd(c, name, "invalid")
|
||||
startExec(c, id, http.StatusBadRequest)
|
||||
waitForExec(c, id)
|
||||
waitForExec(ctx, c, id)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
|
||||
|
||||
|
@ -250,7 +257,7 @@ func createExec(c *testing.T, name string) string {
|
|||
}
|
||||
|
||||
func createExecCmd(c *testing.T, name string, cmd string) string {
|
||||
_, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}}))
|
||||
_, reader, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}}))
|
||||
assert.NilError(c, err)
|
||||
b, err := io.ReadAll(reader)
|
||||
assert.NilError(c, err)
|
||||
|
@ -263,7 +270,7 @@ func createExecCmd(c *testing.T, name string, cmd string) string {
|
|||
}
|
||||
|
||||
func startExec(c *testing.T, id string, code int) {
|
||||
resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
resp, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
|
@ -271,8 +278,8 @@ func startExec(c *testing.T, id string, code int) {
|
|||
assert.Equal(c, resp.StatusCode, code, "response body: %s", b)
|
||||
}
|
||||
|
||||
func inspectExec(c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id))
|
||||
func inspectExec(ctx context.Context, c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get(ctx, fmt.Sprintf("/exec/%s/json", id))
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
|
@ -280,7 +287,7 @@ func inspectExec(c *testing.T, id string, out interface{}) {
|
|||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func waitForExec(c *testing.T, id string) {
|
||||
func waitForExec(ctx context.Context, c *testing.T, id string) {
|
||||
timeout := time.After(60 * time.Second)
|
||||
var execJSON struct{ Running bool }
|
||||
for {
|
||||
|
@ -290,15 +297,15 @@ func waitForExec(c *testing.T, id string) {
|
|||
default:
|
||||
}
|
||||
|
||||
inspectExec(c, id, &execJSON)
|
||||
inspectExec(ctx, c, id, &execJSON)
|
||||
if !execJSON.Running {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func inspectContainer(c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get("/containers/" + id + "/json")
|
||||
func inspectContainer(ctx context.Context, c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get(ctx, "/containers/"+id+"/json")
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
|
@ -11,6 +10,7 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -20,14 +20,15 @@ func (s *DockerAPISuite) TestAPIImagesSaveAndLoad(c *testing.T) {
|
|||
buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar"))
|
||||
id := getIDByName(c, "saveandload")
|
||||
|
||||
res, body, err := request.Get("/images/" + id + "/get")
|
||||
ctx := testutil.GetContext(c)
|
||||
res, body, err := request.Get(ctx, "/images/"+id+"/get")
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
dockerCmd(c, "rmi", id)
|
||||
|
||||
res, loadBody, err := request.Post("/images/load", request.RawContent(body), request.ContentType("application/x-tar"))
|
||||
res, loadBody, err := request.Post(ctx, "/images/load", request.RawContent(body), request.ContentType("application/x-tar"))
|
||||
assert.NilError(c, err)
|
||||
defer loadBody.Close()
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
@ -50,13 +51,13 @@ func (s *DockerAPISuite) TestAPIImagesDelete(c *testing.T) {
|
|||
|
||||
dockerCmd(c, "tag", name, "test:tag1")
|
||||
|
||||
_, err = apiClient.ImageRemove(context.Background(), id, types.ImageRemoveOptions{})
|
||||
_, err = apiClient.ImageRemove(testutil.GetContext(c), id, types.ImageRemoveOptions{})
|
||||
assert.ErrorContains(c, err, "unable to delete")
|
||||
|
||||
_, err = apiClient.ImageRemove(context.Background(), "test:noexist", types.ImageRemoveOptions{})
|
||||
_, err = apiClient.ImageRemove(testutil.GetContext(c), "test:noexist", types.ImageRemoveOptions{})
|
||||
assert.ErrorContains(c, err, "No such image")
|
||||
|
||||
_, err = apiClient.ImageRemove(context.Background(), "test:tag1", types.ImageRemoveOptions{})
|
||||
_, err = apiClient.ImageRemove(testutil.GetContext(c), "test:tag1", types.ImageRemoveOptions{})
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
@ -72,7 +73,7 @@ func (s *DockerAPISuite) TestAPIImagesHistory(c *testing.T) {
|
|||
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar"))
|
||||
id := getIDByName(c, name)
|
||||
|
||||
historydata, err := apiClient.ImageHistory(context.Background(), id)
|
||||
historydata, err := apiClient.ImageHistory(testutil.GetContext(c), id)
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Assert(c, len(historydata) != 0)
|
||||
|
@ -102,8 +103,9 @@ func (s *DockerAPISuite) TestAPIImagesImportBadSrc(c *testing.T) {
|
|||
{http.StatusInternalServerError, "%2Fdata%2Ffile.tar"},
|
||||
}
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
for _, te := range tt {
|
||||
res, _, err := request.Post(strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), request.JSON)
|
||||
res, _, err := request.Post(ctx, strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, te.statusExp)
|
||||
assert.Equal(c, res.Header.Get("Content-Type"), "application/json")
|
||||
|
@ -114,7 +116,7 @@ func (s *DockerAPISuite) TestAPIImagesImportBadSrc(c *testing.T) {
|
|||
func (s *DockerAPISuite) TestAPIImagesSearchJSONContentType(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
|
||||
res, b, err := request.Get("/images/search?term=test", request.JSON)
|
||||
res, b, err := request.Get(testutil.GetContext(c), "/images/search?term=test", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
b.Close()
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
@ -127,7 +129,7 @@ func (s *DockerAPISuite) TestAPIImagesSizeCompatibility(c *testing.T) {
|
|||
apiclient := testEnv.APIClient()
|
||||
defer apiclient.Close()
|
||||
|
||||
images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{})
|
||||
images, err := apiclient.ImageList(testutil.GetContext(c), types.ImageListOptions{})
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, len(images) != 0)
|
||||
for _, image := range images {
|
||||
|
@ -138,7 +140,7 @@ func (s *DockerAPISuite) TestAPIImagesSizeCompatibility(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiclient.Close()
|
||||
|
||||
v124Images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{})
|
||||
v124Images, err := apiclient.ImageList(testutil.GetContext(c), types.ImageListOptions{})
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, len(v124Images) != 0)
|
||||
for _, image := range v124Images {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -9,6 +8,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
@ -111,7 +111,7 @@ func (s *DockerAPISuite) TestInspectAPIImageResponse(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
imageJSON, _, err := apiClient.ImageInspectWithRaw(context.Background(), "busybox")
|
||||
imageJSON, _, err := apiClient.ImageInspectWithRaw(testutil.GetContext(c), "busybox")
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Check(c, len(imageJSON.RepoTags) == 2)
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -15,6 +14,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -30,7 +30,7 @@ func (s *DockerAPISuite) TestLogsAPIWithStdout(c *testing.T) {
|
|||
}
|
||||
|
||||
chLog := make(chan logOut, 1)
|
||||
res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id))
|
||||
res, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
|
@ -62,7 +62,7 @@ func (s *DockerAPISuite) TestLogsAPINoStdoutNorStderr(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerLogs(context.Background(), name, types.ContainerLogsOptions{})
|
||||
_, err = apiClient.ContainerLogs(testutil.GetContext(c), name, types.ContainerLogsOptions{})
|
||||
assert.ErrorContains(c, err, "Bad parameters: you must choose at least one stream")
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ func (s *DockerAPISuite) TestLogsAPIFollowEmptyOutput(c *testing.T) {
|
|||
t0 := time.Now()
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10")
|
||||
|
||||
_, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
|
||||
_, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
|
||||
t1 := time.Now()
|
||||
assert.NilError(c, err)
|
||||
body.Close()
|
||||
|
@ -84,7 +84,7 @@ func (s *DockerAPISuite) TestLogsAPIFollowEmptyOutput(c *testing.T) {
|
|||
|
||||
func (s *DockerAPISuite) TestLogsAPIContainerNotFound(c *testing.T) {
|
||||
name := "nonExistentContainer"
|
||||
resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
|
||||
resp, _, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilFutureFollow(c *testing.T) {
|
|||
}
|
||||
|
||||
cfg := types.ContainerLogsOptions{Until: until.Format(time.RFC3339Nano), Follow: true, ShowStdout: true, Timestamps: true}
|
||||
reader, err := client.ContainerLogs(context.Background(), name, cfg)
|
||||
reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
|
||||
assert.NilError(c, err)
|
||||
|
||||
type logOut struct {
|
||||
|
@ -168,7 +168,7 @@ func (s *DockerAPISuite) TestLogsAPIUntil(c *testing.T) {
|
|||
}
|
||||
|
||||
extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
|
||||
reader, err := client.ContainerLogs(context.Background(), name, cfg)
|
||||
reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
|
||||
assert.NilError(c, err)
|
||||
|
||||
actualStdout := new(bytes.Buffer)
|
||||
|
@ -205,7 +205,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilDefaultValue(c *testing.T) {
|
|||
}
|
||||
|
||||
extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
|
||||
reader, err := client.ContainerLogs(context.Background(), name, cfg)
|
||||
reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
|
||||
assert.NilError(c, err)
|
||||
|
||||
actualStdout := new(bytes.Buffer)
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -268,7 +269,7 @@ func createDeletePredefinedNetwork(c *testing.T, name string) {
|
|||
}
|
||||
|
||||
func isNetworkAvailable(c *testing.T, name string) bool {
|
||||
resp, body, err := request.Get("/networks")
|
||||
resp, body, err := request.Get(testutil.GetContext(c), "/networks")
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
|
@ -291,7 +292,7 @@ func getNetworkIDByName(c *testing.T, name string) string {
|
|||
v := url.Values{}
|
||||
v.Set("filters", filterJSON)
|
||||
|
||||
resp, body, err := request.Get("/networks?" + v.Encode())
|
||||
resp, body, err := request.Get(testutil.GetContext(c), "/networks?"+v.Encode())
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
assert.NilError(c, err)
|
||||
|
||||
|
@ -311,7 +312,7 @@ func getNetworkIDByName(c *testing.T, name string) string {
|
|||
}
|
||||
|
||||
func getNetworkResource(c *testing.T, id string) *types.NetworkResource {
|
||||
_, obj, err := request.Get("/networks/" + id)
|
||||
_, obj, err := request.Get(testutil.GetContext(c), "/networks/"+id)
|
||||
assert.NilError(c, err)
|
||||
|
||||
nr := types.NetworkResource{}
|
||||
|
@ -322,7 +323,7 @@ func getNetworkResource(c *testing.T, id string) *types.NetworkResource {
|
|||
}
|
||||
|
||||
func createNetwork(c *testing.T, config types.NetworkCreateRequest, expectedStatusCode int) string {
|
||||
resp, body, err := request.Post("/networks/create", request.JSONBody(config))
|
||||
resp, body, err := request.Post(testutil.GetContext(c), "/networks/create", request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
@ -347,7 +348,7 @@ func connectNetwork(c *testing.T, nid, cid string) {
|
|||
Container: cid,
|
||||
}
|
||||
|
||||
resp, _, err := request.Post("/networks/"+nid+"/connect", request.JSONBody(config))
|
||||
resp, _, err := request.Post(testutil.GetContext(c), "/networks/"+nid+"/connect", request.JSONBody(config))
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
@ -357,13 +358,13 @@ func disconnectNetwork(c *testing.T, nid, cid string) {
|
|||
Container: cid,
|
||||
}
|
||||
|
||||
resp, _, err := request.Post("/networks/"+nid+"/disconnect", request.JSONBody(config))
|
||||
resp, _, err := request.Post(testutil.GetContext(c), "/networks/"+nid+"/disconnect", request.JSONBody(config))
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func deleteNetwork(c *testing.T, id string, shouldSucceed bool) {
|
||||
resp, _, err := request.Delete("/networks/" + id)
|
||||
resp, _, err := request.Delete(testutil.GetContext(c), "/networks/"+id)
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
if !shouldSucceed {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
@ -17,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/api/types/system"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/skip"
|
||||
|
@ -30,7 +30,7 @@ func (s *DockerAPISuite) TestAPIStatsNoStreamGetCpu(c *testing.T) {
|
|||
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id))
|
||||
resp, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/stats?stream=false", id))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
assert.Equal(c, resp.Header.Get("Content-Type"), "application/json")
|
||||
|
@ -70,7 +70,7 @@ func (s *DockerAPISuite) TestAPIStatsStoppedContainerInGoroutines(c *testing.T)
|
|||
id := strings.TrimSpace(out)
|
||||
|
||||
getGoRoutines := func() int {
|
||||
_, body, err := request.Get("/info")
|
||||
_, body, err := request.Get(testutil.GetContext(c), "/info")
|
||||
assert.NilError(c, err)
|
||||
info := system.Info{}
|
||||
err = json.NewDecoder(body).Decode(&info)
|
||||
|
@ -81,7 +81,7 @@ func (s *DockerAPISuite) TestAPIStatsStoppedContainerInGoroutines(c *testing.T)
|
|||
|
||||
// When the HTTP connection is closed, the number of goroutines should not increase.
|
||||
routines := getGoRoutines()
|
||||
_, body, err := request.Get("/containers/" + id + "/stats")
|
||||
_, body, err := request.Get(testutil.GetContext(c), "/containers/"+id+"/stats")
|
||||
assert.NilError(c, err)
|
||||
body.Close()
|
||||
|
||||
|
@ -194,7 +194,7 @@ func (s *DockerAPISuite) TestAPIStatsNetworkStatsVersioning(c *testing.T) {
|
|||
func getNetworkStats(c *testing.T, id string) map[string]types.NetworkStats {
|
||||
var st *types.StatsJSON
|
||||
|
||||
_, body, err := request.Get("/containers/" + id + "/stats?stream=false")
|
||||
_, body, err := request.Get(testutil.GetContext(c), "/containers/"+id+"/stats?stream=false")
|
||||
assert.NilError(c, err)
|
||||
|
||||
err = json.NewDecoder(body).Decode(&st)
|
||||
|
@ -211,7 +211,7 @@ func getNetworkStats(c *testing.T, id string) map[string]types.NetworkStats {
|
|||
func getVersionedStats(c *testing.T, id string, apiVersion string) map[string]interface{} {
|
||||
stats := make(map[string]interface{})
|
||||
|
||||
_, body, err := request.Get("/" + apiVersion + "/containers/" + id + "/stats?stream=false")
|
||||
_, body, err := request.Get(testutil.GetContext(c), "/"+apiVersion+"/containers/"+id+"/stats?stream=false")
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
|
||||
|
@ -269,9 +269,9 @@ func (s *DockerAPISuite) TestAPIStatsContainerNotFound(c *testing.T) {
|
|||
|
||||
expected := "No such container: nonexistent"
|
||||
|
||||
_, err = apiClient.ContainerStats(context.Background(), "nonexistent", true)
|
||||
_, err = apiClient.ContainerStats(testutil.GetContext(c), "nonexistent", true)
|
||||
assert.ErrorContains(c, err, expected)
|
||||
_, err = apiClient.ContainerStats(context.Background(), "nonexistent", false)
|
||||
_, err = apiClient.ContainerStats(testutil.GetContext(c), "nonexistent", false)
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,7 @@ func (s *DockerAPISuite) TestAPIStatsNoStreamConnectedContainers(c *testing.T) {
|
|||
|
||||
ch := make(chan error, 1)
|
||||
go func() {
|
||||
resp, body, err := request.Get("/containers/" + id2 + "/stats?stream=false")
|
||||
resp, body, err := request.Get(testutil.GetContext(c), "/containers/"+id2+"/stats?stream=false")
|
||||
defer body.Close()
|
||||
if err != nil {
|
||||
ch <- err
|
||||
|
|
|
@ -10,16 +10,18 @@ import (
|
|||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
nodes := d1.ListNodes(c)
|
||||
nodes := d1.ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
loop0:
|
||||
|
@ -34,34 +36,39 @@ loop0:
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
nodes := d.ListNodes(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
||||
nodes := d.ListNodes(ctx, c)
|
||||
|
||||
d.UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityPause
|
||||
})
|
||||
|
||||
n := d.GetNode(c, nodes[0].ID)
|
||||
n := d.GetNode(ctx, c, nodes[0].ID)
|
||||
assert.Equal(c, n.Spec.Availability, swarm.NodeAvailabilityPause)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
_ = s.AddDaemon(c, true, false)
|
||||
|
||||
nodes := d1.ListNodes(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
_ = s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
nodes := d1.ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
// Getting the info so we can take the NodeID
|
||||
d2Info := d2.SwarmInfo(c)
|
||||
d2Info := d2.SwarmInfo(ctx, c)
|
||||
|
||||
// forceful removal of d2 should work
|
||||
d1.RemoveNode(c, d2Info.NodeID, true)
|
||||
d1.RemoveNode(ctx, c, d2Info.NodeID, true)
|
||||
|
||||
nodes = d1.ListNodes(c)
|
||||
nodes = d1.ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
// Restart the node that was removed
|
||||
|
@ -71,57 +78,58 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *testing.T) {
|
|||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Make sure the node didn't rejoin
|
||||
nodes = d1.ListNodes(c)
|
||||
nodes = d1.ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
|
||||
|
||||
// start a service, expect balanced distribution
|
||||
instances := 2
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// drain d2, all containers should move to d1
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityDrain
|
||||
})
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// set d2 back to active
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityActive
|
||||
})
|
||||
|
||||
instances = 1
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
instances = 2
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
|
||||
|
||||
// drained node first so we don't get any old containers
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
d2ContainerCount := len(d2.ActiveContainers(c))
|
||||
d2ContainerCount := len(d2.ActiveContainers(testutil.GetContext(c), c))
|
||||
|
||||
// set d2 to paused, scale service up, only d1 gets new tasks
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityPause
|
||||
})
|
||||
|
||||
instances = 4
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances-d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances-d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -16,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -33,20 +33,21 @@ func setPortConfig(portConfig []swarm.PortConfig) testdaemon.ServiceConstructor
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a service with a port mapping of 8080:8081.
|
||||
portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}}
|
||||
serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
serviceID := d.CreateService(ctx, c, simpleTestService, setInstances(1), setPortConfig(portConfig))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Update the service: changed the port mapping from 8080:8081 to 8082:8083.
|
||||
updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}}
|
||||
remoteService := d.GetService(c, serviceID)
|
||||
d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig))
|
||||
remoteService := d.GetService(ctx, c, serviceID)
|
||||
d.UpdateService(ctx, c, remoteService, setPortConfig(updatedPortConfig))
|
||||
|
||||
// Inspect the service and verify port mapping.
|
||||
updatedService := d.GetService(c, serviceID)
|
||||
updatedService := d.GetService(ctx, c, serviceID)
|
||||
assert.Assert(c, updatedService.Spec.EndpointSpec != nil)
|
||||
assert.Equal(c, len(updatedService.Spec.EndpointSpec.Ports), 1)
|
||||
assert.Equal(c, updatedService.Spec.EndpointSpec.Ports[0].TargetPort, uint32(8083))
|
||||
|
@ -54,19 +55,21 @@ func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
services := d.ListServices(c)
|
||||
services := d.ListServices(ctx, c)
|
||||
assert.Assert(c, services != nil)
|
||||
assert.Assert(c, len(services) == 0, "services: %#v", services)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
client := d.NewClientT(c)
|
||||
defer client.Close()
|
||||
|
@ -74,79 +77,82 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *testing.T) {
|
|||
options := types.ServiceInspectOptions{InsertDefaults: true}
|
||||
|
||||
// insertDefaults inserts UpdateConfig when service is fetched by ID
|
||||
resp, _, err := client.ServiceInspectWithRaw(context.Background(), id, options)
|
||||
resp, _, err := client.ServiceInspectWithRaw(ctx, id, options)
|
||||
out := fmt.Sprintf("%+v", resp)
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(out, "UpdateConfig"))
|
||||
|
||||
// insertDefaults inserts UpdateConfig when service is fetched by ID
|
||||
resp, _, err = client.ServiceInspectWithRaw(context.Background(), "top", options)
|
||||
resp, _, err = client.ServiceInspectWithRaw(ctx, "top", options)
|
||||
out = fmt.Sprintf("%+v", resp)
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(out, "UpdateConfig"))
|
||||
|
||||
service := d.GetService(c, id)
|
||||
service := d.GetService(ctx, c, id)
|
||||
instances = 5
|
||||
d.UpdateService(c, service, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
d.UpdateService(ctx, c, service, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RemoveService(c, service.ID)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
d.RemoveService(ctx, c, service.ID)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
|
||||
|
||||
instances := 9
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// reconciliation on d2 node down
|
||||
d2.Stop(c)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// test downscaling
|
||||
instances = 5
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
d1.CreateService(c, simpleTestService, setGlobalMode)
|
||||
d1.CreateService(ctx, c, simpleTestService, setGlobalMode)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d4 := s.AddDaemon(c, true, false)
|
||||
d5 := s.AddDaemon(c, true, false)
|
||||
d4 := s.AddDaemon(ctx, c, true, false)
|
||||
d5 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d4.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d5.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d4.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d5.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// service image at start
|
||||
image1 := "busybox:latest"
|
||||
|
@ -163,23 +169,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
|
|||
instances := 5
|
||||
parallelism := 2
|
||||
rollbackParallelism := 3
|
||||
id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
|
||||
id := daemons[0].CreateService(ctx, c, serviceForUpdate, setInstances(instances))
|
||||
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := daemons[0].GetService(c, id)
|
||||
daemons[0].UpdateService(c, service, setImage(image2))
|
||||
service := daemons[0].GetService(ctx, c, id)
|
||||
daemons[0].UpdateService(ctx, c, service, setImage(image2))
|
||||
|
||||
// first batch
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 3nd batch
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Roll back to the previous version. This uses the CLI because
|
||||
// rollback used to be a client-side operation.
|
||||
|
@ -187,14 +193,15 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// first batch
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// service image at start
|
||||
image1 := "busybox:latest"
|
||||
|
@ -213,12 +220,12 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
|
|||
instances := 5
|
||||
parallelism := 2
|
||||
rollbackParallelism := 3
|
||||
id := d.CreateService(c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst))
|
||||
id := d.CreateService(ctx, c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst))
|
||||
|
||||
checkStartingTasks := func(expected int) []swarm.Task {
|
||||
var startingTasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, id)
|
||||
tasks := d.GetServiceTasks(ctx, c, id)
|
||||
startingTasks = nil
|
||||
for _, t := range tasks {
|
||||
if t.Status.State == swarm.TaskStateStarting {
|
||||
|
@ -239,47 +246,47 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
|
|||
}
|
||||
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := d.GetService(c, id)
|
||||
d.UpdateService(c, service, setImage(image2))
|
||||
service := d.GetService(ctx, c, id)
|
||||
d.UpdateService(ctx, c, service, setImage(image2))
|
||||
|
||||
// first batch
|
||||
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks := checkStartingTasks(parallelism)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks = checkStartingTasks(parallelism)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 3nd batch
|
||||
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks = checkStartingTasks(1)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Roll back to the previous version. This uses the CLI because
|
||||
// rollback is a client-side operation.
|
||||
|
@ -287,20 +294,21 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// first batch
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// service image at start
|
||||
image1 := "busybox:latest"
|
||||
|
@ -309,18 +317,18 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
|
|||
|
||||
// create service
|
||||
instances := 5
|
||||
id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
|
||||
id := daemons[0].CreateService(ctx, c, serviceForUpdate, setInstances(instances))
|
||||
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := daemons[0].GetService(c, id)
|
||||
daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1))
|
||||
service := daemons[0].GetService(ctx, c, id)
|
||||
daemons[0].UpdateService(ctx, c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1))
|
||||
|
||||
// should update 2 tasks and then pause
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceUpdateState(id), checker.Equals(swarm.UpdateStatePaused)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
v, _ := daemons[0].CheckServiceRunningTasks(id)(c)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceUpdateState(ctx, id), checker.Equals(swarm.UpdateStatePaused)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
v, _ := daemons[0].CheckServiceRunningTasks(ctx, id)(c)
|
||||
assert.Assert(c, v == instances-2)
|
||||
|
||||
// Roll back to the previous version. This uses the CLI because
|
||||
|
@ -328,80 +336,82 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
|
|||
out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id)
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// create service
|
||||
constraints := []string{"node.role==worker"}
|
||||
instances := 3
|
||||
id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id := daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// validate tasks are running on worker nodes
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
tasks := daemons[0].GetServiceTasks(ctx, c, id)
|
||||
for _, task := range tasks {
|
||||
node := daemons[0].GetNode(c, task.NodeID)
|
||||
node := daemons[0].GetNode(ctx, c, task.NodeID)
|
||||
assert.Equal(c, node.Spec.Role, swarm.NodeRoleWorker)
|
||||
}
|
||||
// remove service
|
||||
daemons[0].RemoveService(c, id)
|
||||
daemons[0].RemoveService(ctx, c, id)
|
||||
|
||||
// create service
|
||||
constraints = []string{"node.role!=worker"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate tasks are running on manager nodes
|
||||
for _, task := range tasks {
|
||||
node := daemons[0].GetNode(c, task.NodeID)
|
||||
node := daemons[0].GetNode(ctx, c, task.NodeID)
|
||||
assert.Equal(c, node.Spec.Role, swarm.NodeRoleManager)
|
||||
}
|
||||
// remove service
|
||||
daemons[0].RemoveService(c, id)
|
||||
daemons[0].RemoveService(ctx, c, id)
|
||||
|
||||
// create service
|
||||
constraints = []string{"node.role==nosuchrole"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
// validate tasks are not assigned to any node
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
for _, task := range tasks {
|
||||
assert.Equal(c, task.NodeID, "")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(c)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), nodeCount)
|
||||
|
||||
// add labels to nodes
|
||||
daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
||||
daemons[0].UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
|
||||
n.Spec.Annotations.Labels = map[string]string{
|
||||
"security": "high",
|
||||
}
|
||||
})
|
||||
for i := 1; i < nodeCount; i++ {
|
||||
daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) {
|
||||
daemons[0].UpdateNode(ctx, c, nodes[i].ID, func(n *swarm.Node) {
|
||||
n.Spec.Annotations.Labels = map[string]string{
|
||||
"security": "low",
|
||||
}
|
||||
|
@ -411,92 +421,94 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
|
|||
// create service
|
||||
instances := 3
|
||||
constraints := []string{"node.labels.security==high"}
|
||||
id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id := daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate all tasks are running on nodes[0]
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID == nodes[0].ID)
|
||||
}
|
||||
// remove service
|
||||
daemons[0].RemoveService(c, id)
|
||||
daemons[0].RemoveService(ctx, c, id)
|
||||
|
||||
// create service
|
||||
constraints = []string{"node.labels.security!=high"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate all tasks are NOT running on nodes[0]
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID != nodes[0].ID)
|
||||
}
|
||||
// remove service
|
||||
daemons[0].RemoveService(c, id)
|
||||
daemons[0].RemoveService(ctx, c, id)
|
||||
|
||||
constraints = []string{"node.labels.security==medium"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate tasks are not assigned
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID == "")
|
||||
}
|
||||
// remove service
|
||||
daemons[0].RemoveService(c, id)
|
||||
daemons[0].RemoveService(ctx, c, id)
|
||||
|
||||
// multiple constraints
|
||||
constraints = []string{
|
||||
"node.labels.security==high",
|
||||
fmt.Sprintf("node.id==%s", nodes[1].ID),
|
||||
}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate tasks are not assigned
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID == "")
|
||||
}
|
||||
// make nodes[1] fulfills the constraints
|
||||
daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) {
|
||||
daemons[0].UpdateNode(ctx, c, nodes[1].ID, func(n *swarm.Node) {
|
||||
n.Spec.Annotations.Labels = map[string]string{
|
||||
"security": "high",
|
||||
}
|
||||
})
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(ctx, c, id)
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID == nodes[1].ID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(c)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(ctx, c)
|
||||
assert.Equal(c, len(nodes), nodeCount)
|
||||
|
||||
// add labels to nodes
|
||||
daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
||||
daemons[0].UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
|
||||
n.Spec.Annotations.Labels = map[string]string{
|
||||
"rack": "a",
|
||||
}
|
||||
})
|
||||
for i := 1; i < nodeCount; i++ {
|
||||
daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) {
|
||||
daemons[0].UpdateNode(ctx, c, nodes[i].ID, func(n *swarm.Node) {
|
||||
n.Spec.Annotations.Labels = map[string]string{
|
||||
"rack": "b",
|
||||
}
|
||||
|
@ -506,10 +518,10 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
|
|||
// create service
|
||||
instances := 4
|
||||
prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}}
|
||||
id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances))
|
||||
id := daemons[0].CreateService(ctx, c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(ctx, c, id)
|
||||
// validate all tasks are running on nodes[0]
|
||||
tasksOnNode := make(map[string]int)
|
||||
for _, task := range tasks {
|
||||
|
@ -523,22 +535,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
|
|||
func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
testRequires(c, DaemonIsLinux)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
time.Sleep(1 * time.Second) // make sure all daemons are ready to accept
|
||||
|
||||
instances := 9
|
||||
d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
getContainers := func() map[string]*daemon.Daemon {
|
||||
m := make(map[string]*daemon.Daemon)
|
||||
for _, d := range []*daemon.Daemon{d1, d2, d3} {
|
||||
for _, id := range d.ActiveContainers(c) {
|
||||
for _, id := range d.ActiveContainers(testutil.GetContext(c), c) {
|
||||
m[id] = d
|
||||
}
|
||||
}
|
||||
|
@ -555,7 +568,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
|
|||
_, err := containers[toRemove].Cmd("stop", toRemove)
|
||||
assert.NilError(c, err)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers2 := getContainers()
|
||||
assert.Assert(c, len(containers2) == instances)
|
||||
|
@ -581,7 +594,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
|
|||
|
||||
time.Sleep(time.Second) // give some time to handle the signal
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers2 = getContainers()
|
||||
assert.Assert(c, len(containers2) == instances)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"github.com/moby/swarmkit/v2/ca"
|
||||
|
@ -35,32 +36,33 @@ import (
|
|||
var defaultReconciliationTimeout = 30 * time.Second
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
// todo: should find a better way to verify that components are running than /info
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
info := d1.SwarmInfo(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
info := d1.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
assert.Equal(c, info.Cluster.RootRotationInProgress, false)
|
||||
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
info = d2.SwarmInfo(c)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
|
||||
// Leaving cluster
|
||||
assert.NilError(c, d2.SwarmLeave(c, false))
|
||||
assert.NilError(c, d2.SwarmLeave(ctx, c, false))
|
||||
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
d2.SwarmJoin(c, swarm.JoinRequest{
|
||||
d2.SwarmJoin(ctx, c, swarm.JoinRequest{
|
||||
ListenAddr: d1.SwarmListenAddr(),
|
||||
JoinToken: d1.JoinTokens(c).Worker,
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
|
||||
|
@ -71,96 +73,98 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
|
|||
d1.StartNode(c)
|
||||
d2.StartNode(c)
|
||||
|
||||
info = d1.SwarmInfo(c)
|
||||
info = d1.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
d1.SwarmInit(ctx, c, swarm.InitRequest{})
|
||||
|
||||
// todo: error message differs depending if some components of token are valid
|
||||
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
d2 := s.AddDaemon(ctx, c, false, false)
|
||||
c2 := d2.NewClientT(c)
|
||||
err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err := c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
assert.ErrorContains(c, err, "join token is necessary")
|
||||
info := d2.SwarmInfo(c)
|
||||
info := d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
JoinToken: "foobaz",
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
assert.ErrorContains(c, err, "invalid join token")
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
workerToken := d1.JoinTokens(c).Worker
|
||||
|
||||
d2.SwarmJoin(c, swarm.JoinRequest{
|
||||
d2.SwarmJoin(ctx, c, swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
JoinToken: workerToken,
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
assert.NilError(c, d2.SwarmLeave(c, false))
|
||||
info = d2.SwarmInfo(c)
|
||||
assert.NilError(c, d2.SwarmLeave(ctx, c, false))
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
// change tokens
|
||||
d1.RotateTokens(c)
|
||||
|
||||
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
JoinToken: workerToken,
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
assert.ErrorContains(c, err, "join token is necessary")
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
workerToken = d1.JoinTokens(c).Worker
|
||||
|
||||
d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
|
||||
info = d2.SwarmInfo(c)
|
||||
d2.SwarmJoin(ctx, c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
assert.NilError(c, d2.SwarmLeave(c, false))
|
||||
info = d2.SwarmInfo(c)
|
||||
assert.NilError(c, d2.SwarmLeave(ctx, c, false))
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
// change spec, don't change tokens
|
||||
d1.UpdateSwarm(c, func(s *swarm.Spec) {})
|
||||
|
||||
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
})
|
||||
assert.ErrorContains(c, err, "join token is necessary")
|
||||
info = d2.SwarmInfo(c)
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
|
||||
info = d2.SwarmInfo(c)
|
||||
d2.SwarmJoin(ctx, c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
assert.NilError(c, d2.SwarmLeave(c, false))
|
||||
info = d2.SwarmInfo(c)
|
||||
assert.NilError(c, d2.SwarmLeave(ctx, c, false))
|
||||
info = d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
d1.SwarmInit(ctx, c, swarm.InitRequest{})
|
||||
d1.UpdateSwarm(c, func(s *swarm.Spec) {
|
||||
s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
|
@ -174,20 +178,21 @@ func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
|
|||
},
|
||||
}
|
||||
})
|
||||
info := d1.SwarmInfo(c)
|
||||
info := d1.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
|
||||
assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
|
||||
assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, false, false)
|
||||
splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
|
||||
splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
|
||||
replacementToken := strings.Join(splitToken, "-")
|
||||
c2 := d2.NewClientT(c)
|
||||
err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err := c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
JoinToken: replacementToken,
|
||||
RemoteAddrs: []string{d1.SwarmListenAddr()},
|
||||
|
@ -196,25 +201,26 @@ func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
d1.SwarmInit(ctx, c, swarm.InitRequest{})
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
info := d2.SwarmInfo(c)
|
||||
info := d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Role = swarm.NodeRoleManager
|
||||
})
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Role = swarm.NodeRoleWorker
|
||||
})
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Wait for the role to change to worker in the cert. This is partially
|
||||
// done because it's something worth testing in its own right, and
|
||||
|
@ -235,10 +241,10 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
|
|||
}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Demoting last node should fail
|
||||
node := d1.GetNode(c, d1.NodeID())
|
||||
node := d1.GetNode(ctx, c, d1.NodeID())
|
||||
node.Spec.Role = swarm.NodeRoleWorker
|
||||
url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
|
||||
res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
|
||||
res, body, err := request.Post(testutil.GetContext(c), url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
|
||||
assert.NilError(c, err)
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
|
@ -253,44 +259,46 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
|
|||
if !strings.Contains(string(b), "last manager of the swarm") {
|
||||
assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum"))
|
||||
}
|
||||
info = d1.SwarmInfo(c)
|
||||
info = d1.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
|
||||
// Promote already demoted node
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Role = swarm.NodeRoleManager
|
||||
})
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
// add three managers, one of these is leader
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// start a service by hitting each of the 3 managers
|
||||
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "test1"
|
||||
})
|
||||
d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
d2.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "test2"
|
||||
})
|
||||
d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
d3.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "test3"
|
||||
})
|
||||
|
||||
// 3 services should be started now, because the requests were proxied to leader
|
||||
// query each node and make sure it returns 3 services
|
||||
for _, d := range []*daemon.Daemon{d1, d2, d3} {
|
||||
services := d.ListServices(c)
|
||||
services := d.ListServices(ctx, c)
|
||||
assert.Equal(c, len(services), 3)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
if runtime.GOARCH == "s390x" {
|
||||
c.Skip("Disabled on s390x")
|
||||
}
|
||||
|
@ -299,14 +307,14 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
|
|||
}
|
||||
|
||||
// Create 3 nodes
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// assert that the first node we made is the leader, and the other two are followers
|
||||
assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
|
||||
assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
|
||||
assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false)
|
||||
assert.Equal(c, d1.GetNode(ctx, c, d1.NodeID()).ManagerStatus.Leader, true)
|
||||
assert.Equal(c, d1.GetNode(ctx, c, d2.NodeID()).ManagerStatus.Leader, false)
|
||||
assert.Equal(c, d1.GetNode(ctx, c, d3.NodeID()).ManagerStatus.Leader, false)
|
||||
|
||||
d1.Stop(c)
|
||||
|
||||
|
@ -321,7 +329,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
|
|||
leader = nil
|
||||
followers = nil
|
||||
for _, d := range nodes {
|
||||
n := d.GetNode(c, d.NodeID(), func(err error) bool {
|
||||
n := d.GetNode(ctx, c, d.NodeID(), func(err error) bool {
|
||||
if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
|
||||
lastErr = err
|
||||
return true
|
||||
|
@ -372,6 +380,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
if runtime.GOARCH == "s390x" {
|
||||
c.Skip("Disabled on s390x")
|
||||
}
|
||||
|
@ -379,18 +388,18 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
|
|||
c.Skip("Disabled on ppc64le")
|
||||
}
|
||||
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
d1.CreateService(c, simpleTestService)
|
||||
d1.CreateService(ctx, c, simpleTestService)
|
||||
|
||||
d2.Stop(c)
|
||||
|
||||
// make sure there is a leader
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader(ctx), checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "top1"
|
||||
})
|
||||
|
||||
|
@ -404,36 +413,37 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
|
|||
|
||||
// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
|
||||
_, err := cli.ServiceCreate(testutil.GetContext(c), service.Spec, types.ServiceCreateOptions{})
|
||||
return err.Error(), ""
|
||||
}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
d2.StartNode(c)
|
||||
|
||||
// make sure there is a leader
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader(ctx), checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "top3"
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
d.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
id, err := d.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err, id)
|
||||
id = strings.TrimSpace(id)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
assert.ErrorContains(c, d.SwarmLeave(c, false), "")
|
||||
assert.NilError(c, d.SwarmLeave(c, true))
|
||||
assert.ErrorContains(c, d.SwarmLeave(ctx, c, false), "")
|
||||
assert.NilError(c, d.SwarmLeave(ctx, c, true))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
id2, err := d.Cmd("ps", "-q")
|
||||
assert.NilError(c, err, id2)
|
||||
|
@ -443,26 +453,28 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
|
|||
// #23629
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
id, err := d2.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err, id)
|
||||
id = strings.TrimSpace(id)
|
||||
|
||||
c2 := d2.NewClientT(c)
|
||||
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d2.SwarmListenAddr(),
|
||||
RemoteAddrs: []string{"123.123.123.123:1234"},
|
||||
})
|
||||
assert.ErrorContains(c, err, "Timeout was reached")
|
||||
|
||||
info := d2.SwarmInfo(c)
|
||||
info := d2.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending)
|
||||
|
||||
assert.NilError(c, d2.SwarmLeave(c, true))
|
||||
assert.NilError(c, d2.SwarmLeave(ctx, c, true))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
id2, err := d2.Cmd("ps", "-q")
|
||||
assert.NilError(c, err, id2)
|
||||
|
@ -472,61 +484,65 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
|
|||
// #23705
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
d := s.AddDaemon(c, false, false)
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
client := d.NewClientT(c)
|
||||
err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
|
||||
err := client.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
|
||||
ListenAddr: d.SwarmListenAddr(),
|
||||
RemoteAddrs: []string{"123.123.123.123:1234"},
|
||||
})
|
||||
assert.ErrorContains(c, err, "Timeout was reached")
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
info := d.SwarmInfo(c)
|
||||
info := d.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
d1.GetService(c, id)
|
||||
d1.GetService(ctx, c, id)
|
||||
d1.RestartNode(c)
|
||||
d1.GetService(c, id)
|
||||
d1.GetService(ctx, c, id)
|
||||
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d2.GetService(c, id)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d2.GetService(ctx, c, id)
|
||||
d2.RestartNode(c)
|
||||
d2.GetService(c, id)
|
||||
d2.GetService(ctx, c, id)
|
||||
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d3.GetService(c, id)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
d3.GetService(ctx, c, id)
|
||||
d3.RestartNode(c)
|
||||
d3.GetService(c, id)
|
||||
d3.GetService(ctx, c, id)
|
||||
|
||||
err := d3.Kill()
|
||||
assert.NilError(c, err)
|
||||
time.Sleep(1 * time.Second) // time to handle signal
|
||||
d3.StartNode(c)
|
||||
d3.GetService(c, id)
|
||||
d3.GetService(ctx, c, id)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers := d.ActiveContainers(c)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers := d.ActiveContainers(ctx, c)
|
||||
instances = 4
|
||||
d.UpdateService(c, d.GetService(c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers2 := d.ActiveContainers(c)
|
||||
d.UpdateService(ctx, c, d.GetService(ctx, c, id), setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers2 := d.ActiveContainers(ctx, c)
|
||||
|
||||
loop0:
|
||||
for _, c1 := range containers {
|
||||
|
@ -540,11 +556,12 @@ loop0:
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
req := swarm.InitRequest{
|
||||
ListenAddr: "",
|
||||
}
|
||||
res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
|
||||
res, _, err := request.Post(testutil.GetContext(c), "/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
|
||||
|
@ -552,44 +569,45 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
|
|||
ListenAddr: "0.0.0.0:2377",
|
||||
RemoteAddrs: []string{""},
|
||||
}
|
||||
res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
|
||||
res, _, err = request.Post(testutil.GetContext(c), "/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// drain d2, all containers should move to d1
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityDrain
|
||||
})
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d2.Stop(c)
|
||||
|
||||
d1.SwarmInit(c, swarm.InitRequest{
|
||||
d1.SwarmInit(ctx, c, swarm.InitRequest{
|
||||
ForceNewCluster: true,
|
||||
Spec: swarm.Spec{},
|
||||
})
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
info := d3.SwarmInfo(c)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
info := d3.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
|
||||
instances = 4
|
||||
d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
|
||||
d3.UpdateService(ctx, c, d3.GetService(ctx, c, id), setInstances(instances))
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func simpleTestService(s *swarm.Service) {
|
||||
|
@ -731,13 +749,14 @@ func setGlobalMode(s *swarm.Service) {
|
|||
func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) {
|
||||
var totalMCount, totalWCount int
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
for _, d := range cl {
|
||||
var info swarm.Info
|
||||
|
||||
// check info in a poll.WaitOn(), because if the cluster doesn't have a leader, `info` will return an error
|
||||
checkInfo := func(c *testing.T) (interface{}, string) {
|
||||
client := d.NewClientT(c)
|
||||
daemonInfo, err := client.Info(context.Background())
|
||||
daemonInfo, err := client.Info(ctx)
|
||||
info = daemonInfo.Swarm
|
||||
return err, "cluster not ready in time"
|
||||
}
|
||||
|
@ -751,12 +770,12 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
|
|||
totalMCount++
|
||||
var mCount, wCount int
|
||||
|
||||
for _, n := range d.ListNodes(c) {
|
||||
for _, n := range d.ListNodes(ctx, c) {
|
||||
waitReady := func(c *testing.T) (interface{}, string) {
|
||||
if n.Status.State == swarm.NodeStateReady {
|
||||
return true, ""
|
||||
}
|
||||
nn := d.GetNode(c, n.ID)
|
||||
nn := d.GetNode(ctx, c, n.ID)
|
||||
n = *nn
|
||||
return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID())
|
||||
}
|
||||
|
@ -766,7 +785,7 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
|
|||
if n.Spec.Availability == swarm.NodeAvailabilityActive {
|
||||
return true, ""
|
||||
}
|
||||
nn := d.GetNode(c, n.ID)
|
||||
nn := d.GetNode(ctx, c, n.ID)
|
||||
n = *nn
|
||||
return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID())
|
||||
}
|
||||
|
@ -792,20 +811,21 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
mCount, wCount := 5, 1
|
||||
|
||||
var nodes []*daemon.Daemon
|
||||
for i := 0; i < mCount; i++ {
|
||||
manager := s.AddDaemon(c, true, true)
|
||||
info := manager.SwarmInfo(c)
|
||||
manager := s.AddDaemon(ctx, c, true, true)
|
||||
info := manager.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
nodes = append(nodes, manager)
|
||||
}
|
||||
|
||||
for i := 0; i < wCount; i++ {
|
||||
worker := s.AddDaemon(c, true, false)
|
||||
info := worker.SwarmInfo(c)
|
||||
worker := s.AddDaemon(ctx, c, true, false)
|
||||
info := worker.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
nodes = append(nodes, worker)
|
||||
|
@ -857,38 +877,41 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
service := d.GetService(c, id)
|
||||
service := d.GetService(ctx, c, id)
|
||||
instances = 5
|
||||
|
||||
setInstances(instances)(service)
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
_, err := cli.ServiceUpdate(ctx, service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
assert.NilError(c, err)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
// Unlocking an unlocked swarm results in an error
|
||||
func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
|
||||
assert.ErrorContains(c, err, "swarm is not locked")
|
||||
}
|
||||
|
||||
// #29885
|
||||
func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
|
||||
assert.NilError(c, err)
|
||||
defer ln.Close()
|
||||
d := s.AddDaemon(c, false, false)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
client := d.NewClientT(c)
|
||||
_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
|
||||
_, err = client.SwarmInit(testutil.GetContext(c), swarm.InitRequest{
|
||||
ListenAddr: d.SwarmListenAddr(),
|
||||
})
|
||||
assert.ErrorContains(c, err, "address already in use")
|
||||
|
@ -898,7 +921,8 @@ func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
|
|||
// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
|
||||
// This test makes sure the fixes correctly output scopes instead.
|
||||
func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
|
||||
|
@ -909,19 +933,19 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
|
|||
|
||||
networkCreate.Driver = "bridge"
|
||||
|
||||
n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
|
||||
n1, err := cli.NetworkCreate(testutil.GetContext(c), name, networkCreate)
|
||||
assert.NilError(c, err)
|
||||
|
||||
networkCreate.Driver = "overlay"
|
||||
|
||||
n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
|
||||
n2, err := cli.NetworkCreate(testutil.GetContext(c), name, networkCreate)
|
||||
assert.NilError(c, err)
|
||||
|
||||
r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
|
||||
r1, err := cli.NetworkInspect(testutil.GetContext(c), n1.ID, types.NetworkInspectOptions{})
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, r1.Scope, "local")
|
||||
|
||||
r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
|
||||
r2, err := cli.NetworkInspect(testutil.GetContext(c), n2.ID, types.NetworkInspectOptions{})
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, r2.Scope, "swarm")
|
||||
}
|
||||
|
@ -930,13 +954,14 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
|
|||
func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
|
||||
// Issue #36386 can be a independent one, which is worth further investigation.
|
||||
c.Skip("Root cause of Issue #36386 is needed")
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
instances := 1
|
||||
d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
|
||||
d.CreateService(ctx, c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
|
||||
if s.Spec.TaskTemplate.ContainerSpec == nil {
|
||||
s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
|
||||
}
|
||||
|
@ -946,19 +971,20 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers := d.ActiveContainers(c)
|
||||
containers := d.ActiveContainers(testutil.GetContext(c), c)
|
||||
|
||||
out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
|
||||
assert.NilError(c, err, out)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
||||
m := s.AddDaemon(c, true, true)
|
||||
w := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
m := s.AddDaemon(ctx, c, true, true)
|
||||
w := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
info := m.SwarmInfo(c)
|
||||
info := m.SwarmInfo(ctx, c)
|
||||
|
||||
currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
|
||||
|
||||
|
@ -984,7 +1010,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
|||
// poll to make sure update succeeds
|
||||
var clusterTLSInfo swarm.TLSInfo
|
||||
for j := 0; j < 18; j++ {
|
||||
info := m.SwarmInfo(c)
|
||||
info := m.SwarmInfo(ctx, c)
|
||||
|
||||
// the desired CA cert and key is always redacted
|
||||
assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
|
||||
|
@ -1006,8 +1032,8 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
|||
// could take another second or two for the nodes to trust the new roots after they've all gotten
|
||||
// new TLS certificates
|
||||
for j := 0; j < 18; j++ {
|
||||
mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
|
||||
wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
|
||||
mInfo := m.GetNode(ctx, c, m.NodeID()).Description.TLSInfo
|
||||
wInfo := m.GetNode(ctx, c, w.NodeID()).Description.TLSInfo
|
||||
|
||||
if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
|
||||
break
|
||||
|
@ -1017,17 +1043,17 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
|||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
|
||||
assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
|
||||
assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
|
||||
assert.DeepEqual(c, m.GetNode(ctx, c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
|
||||
assert.DeepEqual(c, m.GetNode(ctx, c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
|
||||
currentTrustRoot = clusterTLSInfo.TrustRoot
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "test-scoped-network"
|
||||
ctx := context.Background()
|
||||
apiclient := d.NewClientT(c)
|
||||
|
||||
resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -19,8 +21,8 @@ type DockerAPISuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerAPISuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerAPISuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerAPISuite) OnTimeout(c *testing.T) {
|
||||
|
@ -28,13 +30,13 @@ func (s *DockerAPISuite) OnTimeout(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerAPISuite) TestAPIOptionsRoute(c *testing.T) {
|
||||
resp, _, err := request.Do("/", request.Method(http.MethodOptions))
|
||||
resp, _, err := request.Do(testutil.GetContext(c), "/", request.Method(http.MethodOptions))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *DockerAPISuite) TestAPIGetEnabledCORS(c *testing.T) {
|
||||
res, body, err := request.Get("/version")
|
||||
res, body, err := request.Get(testutil.GetContext(c), "/version")
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
body.Close()
|
||||
|
@ -59,7 +61,7 @@ func (s *DockerAPISuite) TestAPIClientVersionOldNotSupported(c *testing.T) {
|
|||
v[1] = strconv.Itoa(vMinInt)
|
||||
version := strings.Join(v, ".")
|
||||
|
||||
resp, body, err := request.Get("/v" + version + "/version")
|
||||
resp, body, err := request.Get(testutil.GetContext(c), "/v"+version+"/version")
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
assert.Equal(c, resp.StatusCode, http.StatusBadRequest)
|
||||
|
@ -70,7 +72,7 @@ func (s *DockerAPISuite) TestAPIClientVersionOldNotSupported(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerAPISuite) TestAPIErrorJSON(c *testing.T) {
|
||||
httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{}))
|
||||
httpResp, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.JSONBody(struct{}{}))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, httpResp.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -87,7 +89,7 @@ func (s *DockerAPISuite) TestAPIErrorPlainText(c *testing.T) {
|
|||
// Windows requires API 1.25 or later. This test is validating a behaviour which was present
|
||||
// in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors
|
||||
testRequires(c, DaemonIsLinux)
|
||||
httpResp, body, err := request.Post("/v1.23/containers/create", request.JSONBody(struct{}{}))
|
||||
httpResp, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/create", request.JSONBody(struct{}{}))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, httpResp.StatusCode, http.StatusInternalServerError)
|
||||
|
@ -102,7 +104,7 @@ func (s *DockerAPISuite) TestAPIErrorPlainText(c *testing.T) {
|
|||
|
||||
func (s *DockerAPISuite) TestAPIErrorNotFoundJSON(c *testing.T) {
|
||||
// 404 is a different code path to normal errors, so test separately
|
||||
httpResp, body, err := request.Get("/notfound", request.JSON)
|
||||
httpResp, body, err := request.Get(testutil.GetContext(c), "/notfound", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, httpResp.StatusCode, http.StatusNotFound)
|
||||
assert.Assert(c, strings.Contains(httpResp.Header.Get("Content-Type"), "application/json"))
|
||||
|
@ -112,7 +114,7 @@ func (s *DockerAPISuite) TestAPIErrorNotFoundJSON(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerAPISuite) TestAPIErrorNotFoundPlainText(c *testing.T) {
|
||||
httpResp, body, err := request.Get("/v1.23/notfound", request.JSON)
|
||||
httpResp, body, err := request.Get(testutil.GetContext(c), "/v1.23/notfound", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, httpResp.StatusCode, http.StatusNotFound)
|
||||
assert.Assert(c, strings.Contains(httpResp.Header.Get("Content-Type"), "text/plain"))
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
|
@ -22,8 +23,8 @@ type DockerCLIAttachSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIAttachSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIAttachSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIAttachSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -34,8 +35,8 @@ type DockerCLIBuildSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIBuildSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIBuildSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIBuildSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -14,8 +15,8 @@ type DockerCLICommitSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLICommitSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLICommitSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLICommitSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -30,8 +31,8 @@ type DockerCLICpSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLICpSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLICpSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLICpSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -21,8 +22,8 @@ type DockerCLICreateSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLICreateSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLICreateSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLICreateSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/libnetwork/iptables"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/moby/sys/mount"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -54,7 +55,7 @@ func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
cli.Docker(
|
||||
cli.Args("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"),
|
||||
|
@ -88,7 +89,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *testi
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
|
||||
c.Fatal(err, out)
|
||||
|
@ -111,7 +112,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *testing.T) {
|
|||
|
||||
// #11008
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top")
|
||||
assert.NilError(c, err, "run top1: %v", out)
|
||||
|
@ -169,7 +170,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false")
|
||||
assert.NilError(c, err, "run top1: %v", out)
|
||||
|
@ -221,7 +222,7 @@ func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *testing
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
|
||||
c.Fatalf("Could not run top: %s, %v", out, err)
|
||||
|
@ -239,7 +240,7 @@ func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil {
|
||||
c.Fatalf("Could not run top: %s, %v", out, err)
|
||||
|
@ -288,7 +289,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *testing.T) {
|
|||
setupV6(c)
|
||||
defer teardownV6(c)
|
||||
|
||||
s.d.StartWithBusybox(c, "--ipv6")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6")
|
||||
|
||||
iface, err := net.InterfaceByName("docker0")
|
||||
if err != nil {
|
||||
|
@ -348,7 +349,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *testing.T) {
|
|||
// ipv6 enabled
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100")
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name=ipv6test", "busybox:latest", "top")
|
||||
assert.NilError(c, err, "Could not run container: %s, %v", out, err)
|
||||
|
@ -375,7 +376,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *testing.T) {
|
|||
// ipv6 enabled
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64")
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -391,7 +392,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6HostMode(c *testing.T) {
|
|||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64")
|
||||
out, err := s.d.Cmd("run", "-d", "--name=hostcnt", "--network=host", "busybox:latest", "top")
|
||||
assert.NilError(c, err, "Could not run container: %s, %v", out, err)
|
||||
|
||||
|
@ -467,7 +468,7 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *testing.T) {
|
|||
cmdArgs = append(cmdArgs, "--tls=false", "--host", "tcp://"+net.JoinHostPort(l.daemon, l.port))
|
||||
}
|
||||
|
||||
s.d.StartWithBusybox(c, cmdArgs...)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, cmdArgs...)
|
||||
|
||||
for _, l := range listeningPorts {
|
||||
output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", l.client, l.port), "busybox", "true")
|
||||
|
@ -514,7 +515,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *testing.T) {
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
d.StartWithBusybox(c, "--bridge", bridgeName)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName)
|
||||
|
||||
ipTablesSearchString := bridgeIPNet.String()
|
||||
icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{
|
||||
|
@ -532,7 +533,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *testing.T) {
|
|||
func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *testing.T) {
|
||||
// start with bridge none
|
||||
d := s.d
|
||||
d.StartWithBusybox(c, "--bridge", "none")
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", "none")
|
||||
defer d.Restart(c)
|
||||
|
||||
// verify docker0 iface is not there
|
||||
|
@ -577,7 +578,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *testing.T) {
|
|||
bridgeIP := "192.169.1.1/24"
|
||||
ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
|
||||
|
||||
d.StartWithBusybox(c, "--bip", bridgeIP)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIP)
|
||||
defer d.Restart(c)
|
||||
|
||||
ifconfigSearchString := ip.String()
|
||||
|
@ -633,7 +634,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *testing.T) {
|
|||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"}
|
||||
d.StartWithBusybox(c, args...)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, args...)
|
||||
defer d.Restart(c)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
|
@ -658,7 +659,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *testing.T) {
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
d.StartWithBusybox(c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24")
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24")
|
||||
defer s.d.Restart(c)
|
||||
|
||||
out, err := d.Cmd("run", "-d", "--name", "bb", "busybox", "top")
|
||||
|
@ -687,7 +688,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *testi
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
d.StartWithBusybox(c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP)
|
||||
defer s.d.Restart(c)
|
||||
|
||||
out, err := d.Cmd("run", "-d", "busybox", "top")
|
||||
|
@ -705,7 +706,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *testing.T) {
|
|||
bridgeIP := "192.169.1.1"
|
||||
bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
|
||||
|
||||
d.StartWithBusybox(c, "--bip", bridgeIPNet)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIPNet)
|
||||
defer d.Restart(c)
|
||||
|
||||
expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP)
|
||||
|
@ -725,7 +726,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *testing.T) {
|
|||
bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
|
||||
gatewayIP := "192.169.1.254"
|
||||
|
||||
d.StartWithBusybox(c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP)
|
||||
defer d.Restart(c)
|
||||
|
||||
expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP)
|
||||
|
@ -740,7 +741,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainer
|
|||
deleteInterface(c, defaultNetworkBridge)
|
||||
|
||||
// Program a custom default gateway outside of the container subnet, daemon should accept it and start
|
||||
s.d.StartWithBusybox(c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
|
||||
|
||||
deleteInterface(c, defaultNetworkBridge)
|
||||
s.d.Restart(c)
|
||||
|
@ -756,7 +757,7 @@ func (s *DockerDaemonSuite) TestDaemonIP(c *testing.T) {
|
|||
ipStr := "192.170.1.1/24"
|
||||
ip, _, _ := net.ParseCIDR(ipStr)
|
||||
args := []string{"--ip", ip.String()}
|
||||
d.StartWithBusybox(c, args...)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, args...)
|
||||
defer d.Restart(c)
|
||||
|
||||
out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
|
||||
|
@ -791,7 +792,7 @@ func (s *DockerDaemonSuite) TestDaemonICCPing(c *testing.T) {
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
|
||||
defer d.Restart(c)
|
||||
|
||||
result := icmd.RunCommand("iptables", "-nvL", "FORWARD")
|
||||
|
@ -829,7 +830,7 @@ func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *testing.T) {
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
|
||||
d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
|
||||
defer d.Restart(c)
|
||||
|
||||
result := icmd.RunCommand("iptables", "-nvL", "FORWARD")
|
||||
|
@ -855,7 +856,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *tes
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
s.d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
|
||||
defer s.d.Restart(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
|
||||
|
@ -883,7 +884,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *tes
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024")
|
||||
|
||||
out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -u)")
|
||||
if err != nil {
|
||||
|
@ -929,7 +930,7 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *testing.T) {
|
|||
|
||||
// #11315
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil {
|
||||
c.Fatal(err, out)
|
||||
|
@ -947,7 +948,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -985,7 +986,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline")
|
||||
if err != nil {
|
||||
|
@ -1002,7 +1003,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=none")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
|
||||
if err != nil {
|
||||
|
@ -1019,7 +1020,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=none")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline")
|
||||
if err != nil {
|
||||
|
@ -1059,7 +1060,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=none")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1071,7 +1072,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonLoggingDriverShouldBeIgnoredForBuild(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=splunk")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=splunk")
|
||||
|
||||
result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d),
|
||||
build.WithDockerfile(`
|
||||
|
@ -1107,7 +1108,7 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat")
|
||||
if err != nil {
|
||||
|
@ -1173,7 +1174,7 @@ func (s *DockerDaemonSuite) TestHTTPSRun(c *testing.T) {
|
|||
testDaemonHTTPSAddr = "tcp://localhost:4271"
|
||||
)
|
||||
|
||||
s.d.StartWithBusybox(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
|
||||
"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr)
|
||||
|
||||
args := []string{
|
||||
|
@ -1283,7 +1284,7 @@ func pingContainers(c *testing.T, d *daemon.Daemon, expectFailure bool) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
socket := filepath.Join(s.d.Folder, "docker.sock")
|
||||
|
||||
|
@ -1296,7 +1297,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *testing.T) {
|
|||
// A subsequent daemon restart should clean up said mounts.
|
||||
func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *testing.T) {
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
d.StartWithBusybox(c)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := d.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
@ -1334,7 +1335,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *test
|
|||
// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts.
|
||||
func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *testing.T) {
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
d.StartWithBusybox(c)
|
||||
d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := d.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
@ -1352,7 +1353,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *testing.T) {
|
||||
s.d.StartWithBusybox(t)
|
||||
s.d.StartWithBusybox(testutil.GetContext(t), t)
|
||||
if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil {
|
||||
t.Fatal(out, err)
|
||||
}
|
||||
|
@ -1365,7 +1366,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top")
|
||||
if err != nil {
|
||||
c.Fatal(out, err)
|
||||
|
@ -1426,7 +1427,7 @@ func teardownV6(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1449,7 +1450,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlway
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-opt=max-size=1k")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-opt=max-size=1k")
|
||||
name := "logtest"
|
||||
out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s, err: %v", out, err)
|
||||
|
@ -1465,7 +1466,7 @@ func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil {
|
||||
c.Fatal(err, out)
|
||||
}
|
||||
|
@ -1500,7 +1501,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1594,7 +1595,7 @@ func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *testing.T
|
|||
bridgeIP := "192.169.1.1"
|
||||
bridgeRange := bridgeIP + "/30"
|
||||
|
||||
s.d.StartWithBusybox(c, "--bip", bridgeRange)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeRange)
|
||||
defer s.d.Restart(c)
|
||||
|
||||
var cont int
|
||||
|
@ -1642,7 +1643,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *testing.T) {
|
|||
|
||||
// Test daemon restart with container links + auto restart
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
var parent1Args []string
|
||||
var parent2Args []string
|
||||
|
@ -1705,7 +1706,7 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
|
|||
cgroupParent := "test"
|
||||
name := "cgroup-test"
|
||||
|
||||
s.d.StartWithBusybox(c, "--cgroup-parent", cgroupParent)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--cgroup-parent", cgroupParent)
|
||||
defer s.d.Restart(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup")
|
||||
|
@ -1728,7 +1729,7 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows does not support links
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1751,7 +1752,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows does not support links
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("create", "--name=test", "busybox")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1799,7 +1800,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *testing.T) {
|
|||
// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *testing.T) {
|
||||
testRequires(t, DaemonIsLinux)
|
||||
s.d.StartWithBusybox(t)
|
||||
s.d.StartWithBusybox(testutil.GetContext(t), t)
|
||||
|
||||
cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top")
|
||||
defer s.d.Stop(t)
|
||||
|
@ -1848,7 +1849,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *testi
|
|||
// them now, should remove the mounts.
|
||||
func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
s.d.StartWithBusybox(c, "--live-restore")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
@ -1895,7 +1896,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *testing.T) {
|
|||
// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers.
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *testing.T) {
|
||||
testRequires(t, DaemonIsLinux)
|
||||
s.d.StartWithBusybox(t, "--live-restore")
|
||||
s.d.StartWithBusybox(testutil.GetContext(t), t, "--live-restore")
|
||||
|
||||
cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top")
|
||||
defer s.d.Stop(t)
|
||||
|
@ -1952,7 +1953,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *tes
|
|||
// this ensures that the old, pre gh#16032 functionality continues on
|
||||
func (s *DockerDaemonSuite) TestRunLinksChanged(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows does not support links
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -2045,7 +2046,7 @@ func (s *DockerDaemonSuite) TestDaemonDebugLog(c *testing.T) {
|
|||
|
||||
// Test for #21956
|
||||
func (s *DockerDaemonSuite) TestDaemonLogOptions(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514")
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -2165,7 +2166,7 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *test
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "-b=none", "--iptables=false")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "-b=none", "--iptables=false")
|
||||
|
||||
result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d),
|
||||
build.WithDockerfile(`
|
||||
|
@ -2182,7 +2183,7 @@ func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *testing.T)
|
|||
func (s *DockerDaemonSuite) TestDaemonDNSFlagsInHostMode(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, DaemonIsLinux)
|
||||
|
||||
s.d.StartWithBusybox(c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3")
|
||||
|
||||
expectedOutput := "nameserver 1.2.3.4"
|
||||
out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf")
|
||||
|
@ -2216,7 +2217,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *testing.T) {
|
|||
}
|
||||
`
|
||||
os.WriteFile(configName, []byte(config), 0o644)
|
||||
s.d.StartWithBusybox(c, "--config-file", configName)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--config-file", configName)
|
||||
|
||||
// Run with default runtime
|
||||
out, err := s.d.Cmd("run", "--rm", "busybox", "ls")
|
||||
|
@ -2307,7 +2308,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
|
||||
// Run with default runtime
|
||||
out, err := s.d.Cmd("run", "--rm", "busybox", "ls")
|
||||
|
@ -2327,7 +2328,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
|
|||
assert.Assert(c, is.Contains(out, "/usr/local/bin/vm-manager: no such file or directory"))
|
||||
// Start a daemon without any extra runtimes
|
||||
s.d.Stop(c)
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
// Run with default runtime
|
||||
out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls")
|
||||
|
@ -2350,7 +2351,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
|
|||
assert.Assert(c, is.Contains(string(content), `runtime name 'runc' is reserved`))
|
||||
// Check that we can select a default runtime
|
||||
s.d.Stop(c)
|
||||
s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-runtime=vm", "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
|
||||
out, err = s.d.Cmd("run", "--rm", "busybox", "ls")
|
||||
assert.ErrorContains(c, err, "", out)
|
||||
|
@ -2361,7 +2362,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
// top1 will exist after daemon restarts
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top")
|
||||
|
@ -2384,7 +2385,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *testing.
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c)
|
||||
|
||||
containerName := "error-values"
|
||||
// Make a container with both a non 0 exit code and an error message
|
||||
|
@ -2436,7 +2437,7 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *testing.T) {
|
|||
assert.NilError(c, cmd.Run())
|
||||
|
||||
// custom one
|
||||
s.d.StartWithBusybox(c, "--userland-proxy-path", newProxyPath)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--userland-proxy-path", newProxyPath)
|
||||
out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
|
@ -2456,7 +2457,7 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *testing.T) {
|
|||
// Test case for #22471
|
||||
func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
s.d.StartWithBusybox(c, "--shutdown-timeout=3")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--shutdown-timeout=3")
|
||||
|
||||
_, err := s.d.Cmd("run", "-d", "busybox", "top")
|
||||
assert.NilError(c, err)
|
||||
|
@ -2511,7 +2512,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *testing.T
|
|||
// Test case for 29342
|
||||
func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
s.d.StartWithBusybox(c, "--live-restore")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
|
||||
|
||||
out, err := s.d.Cmd("run", "--init", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && exec top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
@ -2539,7 +2540,7 @@ func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, overlayFSSupported, testEnv.IsLocalDaemon)
|
||||
s.d.StartWithBusybox(c, "--live-restore", "--storage-driver", "overlay2")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore", "--storage-driver", "overlay2")
|
||||
out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
||||
|
@ -2572,7 +2573,7 @@ func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *testing.T) {
|
|||
// #29598
|
||||
func (s *DockerDaemonSuite) TestRestartPolicyWithLiveRestore(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
s.d.StartWithBusybox(c, "--live-restore")
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top")
|
||||
assert.NilError(c, err, "Output: %s", out)
|
||||
|
@ -2633,7 +2634,7 @@ func (s *DockerDaemonSuite) TestShmSize(c *testing.T) {
|
|||
size := 67108864 * 2
|
||||
pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024))
|
||||
|
||||
s.d.StartWithBusybox(c, "--default-shm-size", fmt.Sprintf("%v", size))
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-shm-size", fmt.Sprintf("%v", size))
|
||||
|
||||
name := "shm1"
|
||||
out, err := s.d.Cmd("run", "--name", name, "busybox", "mount")
|
||||
|
@ -2657,7 +2658,7 @@ func (s *DockerDaemonSuite) TestShmSizeReload(c *testing.T) {
|
|||
assert.Assert(c, os.WriteFile(configFile, configData, 0o666) == nil, "could not write temp file for config reload")
|
||||
pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024))
|
||||
|
||||
s.d.StartWithBusybox(c, "--config-file", configFile)
|
||||
s.d.StartWithBusybox(testutil.GetContext(c), c, "--config-file", configFile)
|
||||
|
||||
name := "shm1"
|
||||
out, err := s.d.Cmd("run", "--name", name, "busybox", "mount")
|
||||
|
@ -2749,7 +2750,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
|
|||
d.Start(c)
|
||||
cli := d.NewClientT(c)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 300*time.Second)
|
||||
defer cancel()
|
||||
|
||||
name := "test-plugin-rm-fail"
|
||||
|
@ -2762,7 +2763,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
|
|||
defer out.Close()
|
||||
io.Copy(io.Discard, out)
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel = context.WithTimeout(testutil.GetContext(c), 30*time.Second)
|
||||
defer cancel()
|
||||
p, _, err := cli.PluginInspectWithRaw(ctx, name)
|
||||
assert.NilError(c, err)
|
||||
|
@ -2772,7 +2773,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
|
|||
assert.NilError(c, os.Remove(configPath))
|
||||
|
||||
d.Restart(c)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel = context.WithTimeout(testutil.GetContext(c), 30*time.Second)
|
||||
defer cancel()
|
||||
_, err = cli.Ping(ctx)
|
||||
assert.NilError(c, err)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
eventstestutils "github.com/docker/docker/daemon/events/testutils"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/icmd"
|
||||
|
@ -28,8 +29,8 @@ type DockerCLIEventSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIEventSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIEventSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIEventSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -462,7 +463,7 @@ func (s *DockerCLIEventSuite) TestEventsResize(c *testing.T) {
|
|||
Height: 80,
|
||||
Width: 24,
|
||||
}
|
||||
err = apiClient.ContainerResize(context.Background(), cID, options)
|
||||
err = apiClient.ContainerResize(testutil.GetContext(c), cID, options)
|
||||
assert.NilError(c, err)
|
||||
|
||||
dockerCmd(c, "stop", cID)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/icmd"
|
||||
|
@ -26,8 +27,8 @@ type DockerCLIExecSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIExecSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIExecSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIExecSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -90,8 +91,9 @@ func (s *DockerCLIExecSuite) TestExecAfterContainerRestart(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
// TODO Windows CI: DockerDaemonSuite doesn't run on Windows, and requires a little work to get this ported.
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top")
|
||||
assert.NilError(c, err, "Could not run top: %s", out)
|
||||
|
@ -363,7 +365,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
_, err = apiClient.ContainerExecInspect(context.Background(), execID)
|
||||
_, err = apiClient.ContainerExecInspect(testutil.GetContext(c), execID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
// Now delete the container and then an 'inspect' on the exec should
|
||||
|
@ -371,7 +373,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
|
|||
out, ec := dockerCmd(c, "rm", "-f", id)
|
||||
assert.Equal(c, ec, 0, "error removing container: %s", out)
|
||||
|
||||
_, err = apiClient.ContainerExecInspect(context.Background(), execID)
|
||||
_, err = apiClient.ContainerExecInspect(testutil.GetContext(c), execID)
|
||||
assert.ErrorContains(c, err, "No such exec instance")
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/volume"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -43,20 +45,20 @@ type DockerExternalVolumeSuite struct {
|
|||
*volumePlugin
|
||||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerExternalVolumeSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
s.ec = &eventCounter{}
|
||||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerExternalVolumeSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) SetUpSuite(c *testing.T) {
|
||||
func (s *DockerExternalVolumeSuite) SetUpSuite(ctx context.Context, c *testing.T) {
|
||||
s.volumePlugin = newVolumePlugin(c, volumePluginName)
|
||||
}
|
||||
|
||||
|
@ -267,7 +269,7 @@ func newVolumePlugin(c *testing.T, name string) *volumePlugin {
|
|||
return s
|
||||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TearDownSuite(c *testing.T) {
|
||||
func (s *DockerExternalVolumeSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
s.volumePlugin.Close()
|
||||
|
||||
err := os.RemoveAll("/etc/docker/plugins")
|
||||
|
@ -286,7 +288,8 @@ func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *testing
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -307,7 +310,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *testing.T)
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -320,7 +324,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *testing.T
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -339,7 +344,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *testi
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -396,7 +402,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
driverName := "test-external-volume-driver-retry"
|
||||
|
||||
errchan := make(chan error, 1)
|
||||
|
@ -522,7 +529,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *testing
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -545,11 +553,12 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *test
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
driverName := stringid.GenerateRandomID()
|
||||
p := newVolumePlugin(c, driverName)
|
||||
defer p.Close()
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -593,7 +602,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *t
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount")
|
||||
|
||||
out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true")
|
||||
|
@ -603,7 +613,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test")
|
||||
|
||||
out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -16,8 +17,8 @@ type DockerCLIHealthSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIHealthSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIHealthSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIHealthSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -16,8 +17,8 @@ type DockerCLIHistorySuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIHistorySuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIHistorySuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIHistorySuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -21,8 +22,8 @@ type DockerCLIImagesSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIImagesSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIImagesSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIImagesSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
|
@ -18,8 +19,8 @@ type DockerCLIImportSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIImportSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIImportSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIImportSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -13,8 +14,8 @@ type DockerCLIInfoSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIInfoSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIInfoSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIInfoSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -3,11 +3,11 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
@ -21,7 +21,7 @@ func (s *DockerCLIInfoSuite) TestInfoSecurityOptions(c *testing.T) {
|
|||
apiClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
info, err := apiClient.Info(context.Background())
|
||||
info, err := apiClient.Info(testutil.GetContext(c))
|
||||
assert.NilError(c, err)
|
||||
|
||||
if Apparmor() {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -19,8 +20,8 @@ type DockerCLIInspectSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIInspectSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIInspectSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIInspectSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
@ -17,8 +18,8 @@ type DockerCLILinksSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLILinksSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLILinksSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLILinksSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -13,8 +14,8 @@ type DockerCLILoginSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLILoginSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLILoginSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLILoginSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -9,11 +9,13 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
workingDir, err := os.Getwd()
|
||||
assert.NilError(c, err)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
|
@ -11,6 +12,9 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
)
|
||||
|
@ -19,8 +23,8 @@ type DockerCLILogsSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLILogsSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLILogsSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLILogsSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -282,24 +286,39 @@ func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
|
|||
}
|
||||
|
||||
func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesWithStdout(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
c.Parallel()
|
||||
|
||||
nroutines, err := getGoroutineNumber()
|
||||
ctx := testutil.GetContext(c)
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
|
||||
defer func() {
|
||||
d.Stop(c)
|
||||
d.Cleanup(c)
|
||||
}()
|
||||
d.StartWithBusybox(ctx, c, "--iptables=false")
|
||||
|
||||
out, err := d.Cmd("run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
|
||||
assert.NilError(c, err)
|
||||
cmd := exec.Command(dockerBinary, "logs", "-f", id)
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, d.WaitRun(id))
|
||||
|
||||
client := d.NewClientT(c)
|
||||
nroutines := waitForStableGourtineCount(ctx, c, client)
|
||||
|
||||
cmd := d.Command("logs", "-f", id)
|
||||
r, w := io.Pipe()
|
||||
defer r.Close()
|
||||
defer w.Close()
|
||||
|
||||
cmd.Stdout = w
|
||||
assert.NilError(c, cmd.Start())
|
||||
defer cmd.Process.Kill()
|
||||
res := icmd.StartCmd(cmd)
|
||||
assert.NilError(c, res.Error)
|
||||
defer res.Cmd.Process.Kill()
|
||||
|
||||
finished := make(chan error)
|
||||
go func() {
|
||||
finished <- cmd.Wait()
|
||||
finished <- res.Cmd.Wait()
|
||||
}()
|
||||
|
||||
// Make sure pipe is written to
|
||||
|
@ -314,35 +333,52 @@ func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesWithStdout(c *testing.T) {
|
|||
// Check read from pipe succeeded
|
||||
assert.NilError(c, <-chErr)
|
||||
|
||||
assert.NilError(c, cmd.Process.Kill())
|
||||
assert.NilError(c, res.Cmd.Process.Kill())
|
||||
<-finished
|
||||
|
||||
// NGoroutines is not updated right away, so we need to wait before failing
|
||||
assert.NilError(c, waitForGoroutines(nroutines))
|
||||
waitForGoroutines(ctx, c, client, nroutines)
|
||||
}
|
||||
|
||||
func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesNoOutput(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done")
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
c.Parallel()
|
||||
|
||||
nroutines, err := getGoroutineNumber()
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
|
||||
defer func() {
|
||||
d.Stop(c)
|
||||
d.Cleanup(c)
|
||||
}()
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
d.StartWithBusybox(ctx, c, "--iptables=false")
|
||||
|
||||
out, err := d.Cmd("run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done")
|
||||
assert.NilError(c, err)
|
||||
cmd := exec.Command(dockerBinary, "logs", "-f", id)
|
||||
assert.NilError(c, cmd.Start())
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, d.WaitRun(id))
|
||||
|
||||
client := d.NewClientT(c)
|
||||
nroutines := waitForStableGourtineCount(ctx, c, client)
|
||||
assert.NilError(c, err)
|
||||
|
||||
cmd := d.Command("logs", "-f", id)
|
||||
res := icmd.StartCmd(cmd)
|
||||
assert.NilError(c, res.Error)
|
||||
|
||||
finished := make(chan error)
|
||||
go func() {
|
||||
finished <- cmd.Wait()
|
||||
finished <- res.Cmd.Wait()
|
||||
}()
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
assert.NilError(c, cmd.Process.Kill())
|
||||
assert.NilError(c, res.Cmd.Process.Kill())
|
||||
|
||||
<-finished
|
||||
|
||||
// NGoroutines is not updated right away, so we need to wait before failing
|
||||
assert.NilError(c, waitForGoroutines(nroutines))
|
||||
waitForGoroutines(ctx, c, client, nroutines)
|
||||
}
|
||||
|
||||
func (s *DockerCLILogsSuite) TestLogsCLIContainerNotFound(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -18,8 +19,8 @@ type DockerCLINetmodeSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLINetmodeSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLINetmodeSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLINetmodeSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
|
@ -11,8 +12,8 @@ type DockerCLINetworkSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLINetworkSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLINetworkSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLINetworkSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -39,18 +41,18 @@ const (
|
|||
|
||||
var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest
|
||||
|
||||
func (s *DockerNetworkSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerNetworkSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerNetworkSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerNetworkSuite) SetUpSuite(c *testing.T) {
|
||||
func (s *DockerNetworkSuite) SetUpSuite(ctx context.Context, c *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
s.server = httptest.NewServer(mux)
|
||||
assert.Assert(c, s.server != nil, "Failed to start an HTTP Server")
|
||||
|
@ -210,7 +212,7 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
|||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TearDownSuite(c *testing.T) {
|
||||
func (s *DockerNetworkSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
if s.server == nil {
|
||||
return
|
||||
}
|
||||
|
@ -306,7 +308,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *testing.T) {
|
||||
testRequires(c, OnlyDefaultNetworks)
|
||||
testRequires(c, func() bool { return OnlyDefaultNetworks(testutil.GetContext(c)) })
|
||||
|
||||
testNet := "testnet1"
|
||||
testLabel := "foo"
|
||||
testValue := "bar"
|
||||
|
@ -786,6 +789,8 @@ func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
// On default bridge network built-in service discovery should not happen
|
||||
hostsFile := "/etc/hosts"
|
||||
bridgeName := "external-bridge"
|
||||
|
@ -793,7 +798,7 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
|||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
s.d.StartWithBusybox(c, "--bridge", bridgeName)
|
||||
s.d.StartWithBusybox(ctx, c, "--bridge", bridgeName)
|
||||
defer s.d.Restart(c)
|
||||
|
||||
// run two containers and store first container's etc/hosts content
|
||||
|
@ -944,6 +949,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *testing.T) {
|
|||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
|
||||
ctx := testutil.GetContext(c)
|
||||
dnd := "dnd"
|
||||
did := "did"
|
||||
|
||||
|
@ -951,7 +958,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *testing
|
|||
server := httptest.NewServer(mux)
|
||||
setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did)
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
_, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1")
|
||||
assert.NilError(c, err)
|
||||
|
||||
|
@ -1051,10 +1058,11 @@ func verifyContainerIsConnectedToNetworks(c *testing.T, d *daemon.Daemon, cName
|
|||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
ctx := testutil.GetContext(c)
|
||||
cName := "bb"
|
||||
nwList := []string{"nw1", "nw2", "nw3"}
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
connectContainerToNetworks(c, s.d, cName, nwList)
|
||||
verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
|
||||
|
@ -1070,10 +1078,11 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRest
|
|||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
ctx := testutil.GetContext(c)
|
||||
cName := "cc"
|
||||
nwList := []string{"nw1", "nw2", "nw3"}
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
connectContainerToNetworks(c, s.d, cName, nwList)
|
||||
verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
|
||||
|
@ -1097,7 +1106,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *testing.T) {
|
|||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
// Run a few containers on host network
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -1620,7 +1630,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *t
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *testing.T) {
|
||||
s.d.StartWithBusybox(t, "--live-restore")
|
||||
ctx := testutil.GetContext(t)
|
||||
s.d.StartWithBusybox(ctx, t, "--live-restore")
|
||||
defer s.d.Stop(t)
|
||||
oldCon := "old"
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
|
@ -13,8 +14,8 @@ type DockerCLIPluginLogDriverSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPluginLogDriverSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPluginLogDriverSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPluginLogDriverSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -51,7 +52,7 @@ func (s *DockerCLIPluginLogDriverSuite) TestPluginLogDriverInfoList(c *testing.T
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
info, err := apiClient.Info(context.Background())
|
||||
info, err := apiClient.Info(testutil.GetContext(c))
|
||||
assert.NilError(c, err)
|
||||
|
||||
drivers := strings.Join(info.Plugins.Log, " ")
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fixtures/plugin"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -34,8 +35,8 @@ type DockerCLIPluginsSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPluginsSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPluginsSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPluginsSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -162,7 +163,7 @@ func (ps *DockerPluginSuite) TestPluginSet(c *testing.T) {
|
|||
client := testEnv.APIClient()
|
||||
|
||||
name := "test"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
initialValue := "0"
|
||||
|
@ -207,7 +208,7 @@ func (ps *DockerPluginSuite) TestPluginSet(c *testing.T) {
|
|||
|
||||
func (ps *DockerPluginSuite) TestPluginInstallArgs(c *testing.T) {
|
||||
pName := path.Join(ps.registryHost(), "plugin", "testplugininstallwithargs")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
plugin.CreateInRegistry(ctx, pName, nil, func(cfg *plugin.Config) {
|
||||
|
@ -345,7 +346,7 @@ func (ps *DockerPluginSuite) TestPluginIDPrefix(c *testing.T) {
|
|||
name := "test"
|
||||
client := testEnv.APIClient()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
|
||||
initialValue := "0"
|
||||
err := plugin.Create(ctx, client, name, func(cfg *plugin.Config) {
|
||||
cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}}
|
||||
|
@ -406,7 +407,7 @@ func (ps *DockerPluginSuite) TestPluginListDefaultFormat(c *testing.T) {
|
|||
name := "test:latest"
|
||||
client := testEnv.APIClient()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
|
||||
defer cancel()
|
||||
err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) {
|
||||
cfg.Description = "test plugin"
|
||||
|
|
|
@ -9,15 +9,17 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
type DockerCLIPortSuite struct {
|
||||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPortSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPortSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPortSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -26,21 +28,19 @@ func (s *DockerCLIPortSuite) OnTimeout(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
// one port
|
||||
out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top")
|
||||
firstID := strings.TrimSpace(out)
|
||||
|
||||
out, _ = dockerCmd(c, "port", firstID, "80")
|
||||
|
||||
err := assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
|
||||
out, _ = dockerCmd(c, "port", firstID)
|
||||
|
||||
err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876", "80/tcp -> [::]:9876"})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876", "80/tcp -> [::]:9876"})
|
||||
|
||||
dockerCmd(c, "rm", "-f", firstID)
|
||||
|
||||
|
@ -54,13 +54,11 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "port", ID, "80")
|
||||
|
||||
err = assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
|
||||
out, _ = dockerCmd(c, "port", ID)
|
||||
|
||||
err = assertPortList(c, out, []string{
|
||||
assertPortList(c, out, []string{
|
||||
"80/tcp -> 0.0.0.0:9876",
|
||||
"80/tcp -> [::]:9876",
|
||||
"81/tcp -> 0.0.0.0:9877",
|
||||
|
@ -68,8 +66,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
"82/tcp -> 0.0.0.0:9878",
|
||||
"82/tcp -> [::]:9878",
|
||||
})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
|
||||
dockerCmd(c, "rm", "-f", ID)
|
||||
|
||||
|
@ -84,13 +80,11 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "port", ID, "80")
|
||||
|
||||
err = assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876", "0.0.0.0:9999", "[::]:9999"})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876", "0.0.0.0:9999", "[::]:9999"})
|
||||
|
||||
out, _ = dockerCmd(c, "port", ID)
|
||||
|
||||
err = assertPortList(c, out, []string{
|
||||
assertPortList(c, out, []string{
|
||||
"80/tcp -> 0.0.0.0:9876",
|
||||
"80/tcp -> 0.0.0.0:9999",
|
||||
"80/tcp -> [::]:9876",
|
||||
|
@ -100,8 +94,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
"82/tcp -> 0.0.0.0:9878",
|
||||
"82/tcp -> [::]:9878",
|
||||
})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
dockerCmd(c, "rm", "-f", ID)
|
||||
|
||||
testRange := func() {
|
||||
|
@ -113,16 +105,14 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "port", IDs[i])
|
||||
|
||||
err = assertPortList(c, out, []string{
|
||||
assertPortList(c, out, []string{
|
||||
fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i),
|
||||
fmt.Sprintf("80/tcp -> [::]:%d", 9090+i),
|
||||
})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
// test port range exhaustion
|
||||
out, _, err = dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top")
|
||||
out, _, err := dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top")
|
||||
// Exhausted port range did not return an error
|
||||
assert.Assert(c, err != nil, "out: %s", out)
|
||||
|
||||
|
@ -136,7 +126,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
|
||||
// test invalid port ranges
|
||||
for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} {
|
||||
out, _, err = dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top")
|
||||
out, _, err := dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top")
|
||||
// Port range should have returned an error
|
||||
assert.Assert(c, err != nil, "out: %s", out)
|
||||
}
|
||||
|
@ -147,7 +137,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "port", ID)
|
||||
|
||||
err = assertPortList(c, out, []string{
|
||||
assertPortList(c, out, []string{
|
||||
"80/tcp -> 0.0.0.0:9800",
|
||||
"80/tcp -> [::]:9800",
|
||||
"81/tcp -> 0.0.0.0:9801",
|
||||
|
@ -157,8 +147,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
"83/tcp -> 0.0.0.0:9803",
|
||||
"83/tcp -> [::]:9803",
|
||||
})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
dockerCmd(c, "rm", "-f", ID)
|
||||
|
||||
// test mixing protocols in same port range
|
||||
|
@ -168,18 +156,15 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
|||
out, _ = dockerCmd(c, "port", ID)
|
||||
|
||||
// Running this test multiple times causes the TCP port to increment.
|
||||
err = assertPortRange(ID, []int{8000, 8080}, []int{8000, 8080})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortRange(ctx, ID, []int{8000, 8080}, []int{8000, 8080})
|
||||
dockerCmd(c, "rm", "-f", ID)
|
||||
}
|
||||
|
||||
func assertPortList(c *testing.T, out string, expected []string) error {
|
||||
func assertPortList(c *testing.T, out string, expected []string) {
|
||||
c.Helper()
|
||||
lines := strings.Split(strings.Trim(out, "\n "), "\n")
|
||||
if len(lines) != len(expected) {
|
||||
return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected))
|
||||
}
|
||||
assert.Assert(c, is.Len(lines, len(expected)), "exepcted: %s", strings.Join(expected, ", "))
|
||||
|
||||
sort.Strings(lines)
|
||||
sort.Strings(expected)
|
||||
|
||||
|
@ -196,17 +181,13 @@ func assertPortList(c *testing.T, out string, expected []string) error {
|
|||
if lines[i] == expected[i] {
|
||||
continue
|
||||
}
|
||||
if lines[i] != oldFormat(expected[i]) {
|
||||
return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|")
|
||||
}
|
||||
assert.Equal(c, lines[i], oldFormat(expected[i]))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertPortRange(id string, expectedTCP, expectedUDP []int) error {
|
||||
func assertPortRange(ctx context.Context, id string, expectedTCP, expectedUDP []int) error {
|
||||
client := testEnv.APIClient()
|
||||
inspect, err := client.ContainerInspect(context.TODO(), id)
|
||||
inspect, err := client.ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -331,17 +312,15 @@ func (s *DockerCLIPortSuite) TestPortHostBinding(c *testing.T) {
|
|||
|
||||
out, _ = dockerCmd(c, "port", firstID, "80")
|
||||
|
||||
err := assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
// Port list is not correct
|
||||
assert.NilError(c, err)
|
||||
assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
|
||||
|
||||
dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", "9876")
|
||||
|
||||
dockerCmd(c, "rm", "-f", firstID)
|
||||
|
||||
out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876")
|
||||
out, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876")
|
||||
// Port is still bound after the Container is removed
|
||||
assert.Assert(c, err != nil, "out: %s", out)
|
||||
assert.Assert(c, err != nil, out)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPortSuite) TestPortExposeHostBinding(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -13,8 +14,8 @@ type DockerCLIProxySuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIProxySuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIProxySuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIProxySuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
@ -14,13 +15,14 @@ import (
|
|||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func (s *DockerCLIPruneSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPruneSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPruneSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -49,7 +51,8 @@ func pruneNetworkAndVerify(c *testing.T, d *daemon.Daemon, kept, pruned []string
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
_, err := d.Cmd("network", "create", "n1") // used by container (testprune)
|
||||
assert.NilError(c, err)
|
||||
_, err = d.Cmd("network", "create", "n2")
|
||||
|
@ -72,7 +75,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
|
|||
"busybox", "top")
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(replicas+1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(replicas+1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// prune and verify
|
||||
pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"})
|
||||
|
@ -82,13 +85,14 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
_, err = d.Cmd("service", "rm", serviceName)
|
||||
assert.NilError(c, err)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"})
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestPruneImageDangling(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
result := cli.BuildCmd(c, "test", cli.Daemon(s.d),
|
||||
build.WithDockerfile(`FROM busybox
|
||||
|
@ -258,7 +262,8 @@ func (s *DockerCLIPruneSuite) TestPruneNetworkLabel(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestPruneImageLabel(c *testing.T) {
|
||||
s.d.StartWithBusybox(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
result := cli.BuildCmd(c, "test1", cli.Daemon(s.d),
|
||||
build.WithDockerfile(`FROM busybox
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -22,8 +23,8 @@ type DockerCLIPsSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPsSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPsSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPsSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -17,8 +18,8 @@ type DockerCLIPullSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPullSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPullSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPullSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -21,8 +22,8 @@ type DockerCLIPushSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIPushSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIPushSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIPushSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/registry"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -71,6 +72,7 @@ func registerUserAgentHandler(reg *registry.Mock, result *string) {
|
|||
// a registry, the registry should see a User-Agent string of the form
|
||||
// [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA])
|
||||
func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
var ua string
|
||||
|
||||
reg, err := registry.NewMock(c)
|
||||
|
@ -80,7 +82,7 @@ func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *testing.T) {
|
|||
registerUserAgentHandler(reg, &ua)
|
||||
repoName := fmt.Sprintf("%s/busybox", reg.URL())
|
||||
|
||||
s.d.StartWithBusybox(c, "--insecure-registry", reg.URL())
|
||||
s.d.StartWithBusybox(ctx, c, "--insecure-registry", reg.URL())
|
||||
|
||||
tmp, err := os.MkdirTemp("", "integration-cli-")
|
||||
assert.NilError(c, err)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -18,8 +19,8 @@ type DockerCLIRestartSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIRestartSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIRestartSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIRestartSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -17,8 +18,8 @@ type DockerCLIRmiSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIRmiSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIRmiSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIRmiSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -25,10 +25,12 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/libnetwork/resolvconf"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/moby/sys/mountinfo"
|
||||
|
@ -42,8 +44,8 @@ type DockerCLIRunSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIRunSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -3795,7 +3797,7 @@ func (s *DockerCLIRunSuite) TestRunNamedVolumesFromNotRemoved(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
container, err := apiClient.ContainerInspect(context.Background(), strings.TrimSpace(cid))
|
||||
container, err := apiClient.ContainerInspect(testutil.GetContext(c), strings.TrimSpace(cid))
|
||||
assert.NilError(c, err)
|
||||
var vname string
|
||||
for _, v := range container.Mounts {
|
||||
|
@ -3816,19 +3818,40 @@ func (s *DockerCLIRunSuite) TestRunNamedVolumesFromNotRemoved(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunAttachFailedNoLeak(c *testing.T) {
|
||||
nroutines, err := getGoroutineNumber()
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
|
||||
defer func() {
|
||||
if c.Failed() {
|
||||
d.Daemon.DumpStackAndQuit()
|
||||
} else {
|
||||
d.Stop(c)
|
||||
}
|
||||
d.Cleanup(c)
|
||||
}()
|
||||
d.StartWithBusybox(ctx, c)
|
||||
|
||||
// Run a dummy container to ensure all goroutines are up and running before we get a count
|
||||
_, err := d.Cmd("run", "--rm", "busybox", "true")
|
||||
assert.NilError(c, err)
|
||||
|
||||
runSleepingContainer(c, "--name=test", "-p", "8000:8000")
|
||||
client := d.NewClientT(c)
|
||||
|
||||
nroutines := waitForStableGourtineCount(ctx, c, client)
|
||||
|
||||
out, err := d.Cmd(append([]string{"run", "-d", "--name=test", "-p", "8000:8000", "busybox"}, sleepCommandForDaemonPlatform()...)...)
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// Wait until container is fully up and running
|
||||
assert.Assert(c, waitRun("test") == nil)
|
||||
assert.NilError(c, d.WaitRun("test"))
|
||||
|
||||
out, err = d.Cmd("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
|
||||
|
||||
out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
|
||||
// We will need the following `inspect` to diagnose the issue if test fails (#21247)
|
||||
out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test")
|
||||
out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail")
|
||||
out1, err1 := d.Cmd("inspect", "--format", "{{json .State}}", "test")
|
||||
out2, err2 := d.Cmd("inspect", "--format", "{{json .State}}", "fail")
|
||||
assert.Assert(c, err != nil, "Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)
|
||||
|
||||
// check for windows error as well
|
||||
// TODO Windows Post TP5. Fix the error message string
|
||||
outLowerCase := strings.ToLower(out)
|
||||
|
@ -3837,10 +3860,12 @@ func (s *DockerCLIRunSuite) TestRunAttachFailedNoLeak(c *testing.T) {
|
|||
strings.Contains(outLowerCase, "the specified port already exists") ||
|
||||
strings.Contains(outLowerCase, "hns failed with error : failed to create endpoint") ||
|
||||
strings.Contains(outLowerCase, "hns failed with error : the object already exists"), fmt.Sprintf("Output: %s", out))
|
||||
dockerCmd(c, "rm", "-f", "test")
|
||||
|
||||
out, err = d.Cmd("rm", "-f", "test")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// NGoroutines is not updated right away, so we need to wait before failing
|
||||
assert.Assert(c, waitForGoroutines(nroutines) == nil)
|
||||
waitForGoroutines(ctx, c, client, nroutines)
|
||||
}
|
||||
|
||||
// Test for one character directory name case (#20122)
|
||||
|
@ -3993,35 +4018,44 @@ exec "$@"`,
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535")
|
||||
ctx := testutil.GetContext(c)
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
|
||||
defer func() {
|
||||
d.Stop(c)
|
||||
d.Cleanup(c)
|
||||
}()
|
||||
d.StartWithBusybox(ctx, c, "--debug", "--default-ulimit=nofile=65535")
|
||||
|
||||
name := "test-A"
|
||||
_, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top")
|
||||
_, err := d.Cmd("run", "--name", name, "-d", "busybox", "top")
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(c, s.d.WaitRun(name))
|
||||
assert.NilError(c, d.WaitRun(name))
|
||||
|
||||
out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
||||
out, err := d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(out, "[nofile=65535:65535]"))
|
||||
name = "test-B"
|
||||
_, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
|
||||
_, err = d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(c, s.d.WaitRun(name))
|
||||
assert.NilError(c, d.WaitRun(name))
|
||||
|
||||
out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
||||
out, err = d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(out, "[nofile=42:42]"))
|
||||
}
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunStoppedLoggingDriverNoLeak(c *testing.T) {
|
||||
nroutines, err := getGoroutineNumber()
|
||||
client := testEnv.APIClient()
|
||||
ctx := testutil.GetContext(c)
|
||||
nroutines, err := getGoroutineNumber(ctx, client)
|
||||
assert.NilError(c, err)
|
||||
|
||||
out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true")
|
||||
assert.ErrorContains(c, err, "")
|
||||
assert.Assert(c, strings.Contains(out, "failed to initialize logging driver"), "error should be about logging driver, got output %s", out)
|
||||
|
||||
// NGoroutines is not updated right away, so we need to wait before failing
|
||||
assert.Assert(c, waitForGoroutines(nroutines) == nil)
|
||||
waitForGoroutines(ctx, c, client, nroutines)
|
||||
}
|
||||
|
||||
// Handles error conditions for --credentialspec. Validating E2E success cases
|
||||
|
|
|
@ -4,7 +4,6 @@ package main
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -24,6 +23,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/moby/sys/mount"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
|
@ -993,7 +993,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyUnshareUserns(c *testing.T)
|
|||
// with a the default seccomp profile exits with operation not permitted.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyCloneUserns(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
icmd.RunCommand(dockerBinary, "run", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
|
@ -1005,7 +1005,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyCloneUserns(c *testing.T) {
|
|||
// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompUnconfinedCloneUserns(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// make sure running w privileged is ok
|
||||
icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined",
|
||||
|
@ -1018,7 +1018,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompUnconfinedCloneUserns(c *testing.T) {
|
|||
// allows creating a userns.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompAllowPrivCloneUserns(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// make sure running w privileged is ok
|
||||
icmd.RunCommand(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{
|
||||
|
@ -1030,7 +1030,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompAllowPrivCloneUserns(c *testing.T) {
|
|||
// with the default seccomp profile.
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompProfileAllow32Bit(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, IsAmd64)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test").Assert(c, icmd.Success)
|
||||
}
|
||||
|
@ -1045,7 +1045,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompAllowSetrlimit(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileAcct(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotUserNamespace)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test")
|
||||
if err == nil || !strings.Contains(out, "Operation not permitted") {
|
||||
|
@ -1075,7 +1075,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileAcct(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileNS(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotUserNamespace)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0")
|
||||
if err == nil || !strings.Contains(out, "Operation not permitted") {
|
||||
|
@ -1112,7 +1112,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileNS(c *testing.T) {
|
|||
// effective uid transitions on executing setuid binaries.
|
||||
func (s *DockerCLIRunSuite) TestRunNoNewPrivSetuid(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
ensureNNPTest(c)
|
||||
ensureNNPTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that running a setuid binary results in no effective uid transition
|
||||
icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges=true", "--user", "1000",
|
||||
|
@ -1125,7 +1125,7 @@ func (s *DockerCLIRunSuite) TestRunNoNewPrivSetuid(c *testing.T) {
|
|||
// effective uid transitions on executing setuid binaries.
|
||||
func (s *DockerCLIRunSuite) TestLegacyRunNoNewPrivSetuid(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
ensureNNPTest(c)
|
||||
ensureNNPTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that running a setuid binary results in no effective uid transition
|
||||
icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000",
|
||||
|
@ -1136,7 +1136,7 @@ func (s *DockerCLIRunSuite) TestLegacyRunNoNewPrivSetuid(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChown(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_CHOWN
|
||||
dockerCmd(c, "run", "busybox", "chown", "100", "/tmp")
|
||||
|
@ -1154,7 +1154,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChown(c *testing.T) {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_DAC_OVERRIDE
|
||||
dockerCmd(c, "run", "busybox", "sh", "-c", "echo test > /etc/passwd")
|
||||
|
@ -1167,7 +1167,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *testin
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesFowner(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_FOWNER
|
||||
dockerCmd(c, "run", "busybox", "chmod", "777", "/etc/passwd")
|
||||
|
@ -1183,7 +1183,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesFowner(c *testing.T)
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetuid(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_SETUID
|
||||
dockerCmd(c, "run", "syscall-test", "setuid-test")
|
||||
|
@ -1201,7 +1201,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetuid(c *testing.T)
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetgid(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_SETGID
|
||||
dockerCmd(c, "run", "syscall-test", "setgid-test")
|
||||
|
@ -1229,7 +1229,7 @@ func sysctlExists(s string) bool {
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_NET_BIND_SERVICE
|
||||
dockerCmd(c, "run", "syscall-test", "socket-test")
|
||||
|
@ -1258,7 +1258,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *tes
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_NET_RAW
|
||||
dockerCmd(c, "run", "syscall-test", "raw-test")
|
||||
|
@ -1276,7 +1276,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *testing.T)
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChroot(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_SYS_CHROOT
|
||||
dockerCmd(c, "run", "busybox", "chroot", "/", "/bin/true")
|
||||
|
@ -1294,7 +1294,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChroot(c *testing.T)
|
|||
|
||||
func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesMknod(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
ensureSyscallTest(c)
|
||||
ensureSyscallTest(testutil.GetContext(c), c)
|
||||
|
||||
// test that a root user has default capability CAP_MKNOD
|
||||
dockerCmd(c, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2")
|
||||
|
@ -1428,8 +1428,9 @@ func (s *DockerCLIRunSuite) TestRunUserDeviceAllowed(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *testing.T) {
|
||||
testRequires(c, seccompEnabled)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
jsonData := `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
|
@ -1453,8 +1454,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *testing.T) {
|
||||
testRequires(c, seccompEnabled)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
jsonData := `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
|
@ -1479,8 +1481,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *testing.T) {
|
||||
testRequires(c, seccompEnabled)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
jsonData := `{
|
||||
"archMap": [
|
||||
|
@ -1516,8 +1519,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *testing.T) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *testing.T) {
|
||||
testRequires(c, seccompEnabled)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
// 1) verify I can run containers with the Docker default shipped profile which allows chmod
|
||||
_, err := s.d.Cmd("run", "busybox", "chmod", "777", ".")
|
||||
|
@ -1560,7 +1564,7 @@ func (s *DockerCLIRunSuite) TestRunWithNanoCPUs(c *testing.T) {
|
|||
|
||||
clt, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
inspect, err := clt.ContainerInspect(context.Background(), "test")
|
||||
inspect, err := clt.ContainerInspect(testutil.GetContext(c), "test")
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -17,8 +18,8 @@ type DockerCLISaveLoadSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLISaveLoadSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLISaveLoadSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLISaveLoadSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/creack/pty"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
)
|
||||
|
@ -90,7 +91,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveAndLoadWithProgressBar(c *testing.T) {
|
|||
func (s *DockerCLISaveLoadSuite) TestLoadNoStdinFail(c *testing.T) {
|
||||
pty, tty, err := pty.Open()
|
||||
assert.NilError(c, err)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(testutil.GetContext(c), 5*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, dockerBinary, "load")
|
||||
cmd.Stdin = tty
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -12,8 +13,8 @@ type DockerCLISearchSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLISearchSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLISearchSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLISearchSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -13,26 +13,28 @@ import (
|
|||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, id)
|
||||
tasks = d.GetServiceTasks(ctx, c, id)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -66,7 +68,8 @@ func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
serviceName := "test-service-secret"
|
||||
testName := "test_secret"
|
||||
|
@ -100,7 +103,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
testPaths := map[string]string{
|
||||
"app": "/etc/secret",
|
||||
|
@ -139,14 +143,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testi
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, serviceName)
|
||||
tasks = d.GetServiceTasks(ctx, c, serviceName)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -166,7 +170,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testi
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
id := d.CreateSecret(c, swarm.SecretSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
|
@ -189,14 +194,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, serviceName)
|
||||
tasks = d.GetServiceTasks(ctx, c, serviceName)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -214,7 +219,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
serviceName := "test-service-config"
|
||||
testName := "test_config"
|
||||
|
@ -248,7 +254,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
testPaths := map[string]string{
|
||||
"app": "/etc/config",
|
||||
|
@ -286,14 +293,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testi
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, serviceName)
|
||||
tasks = d.GetServiceTasks(ctx, c, serviceName)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -313,7 +320,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testi
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
id := d.CreateConfig(c, swarm.ConfigSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
|
@ -336,14 +344,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, serviceName)
|
||||
tasks = d.GetServiceTasks(ctx, c, serviceName)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -361,21 +369,22 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; exec tail -f /dev/null")
|
||||
assert.NilError(c, err, out)
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, id)
|
||||
tasks = d.GetServiceTasks(ctx, c, id)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
@ -414,7 +423,8 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
|
@ -424,14 +434,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *testing.T) {
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, id)
|
||||
tasks = d.GetServiceTasks(ctx, c, id)
|
||||
return len(tasks) > 0, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
task := tasks[0]
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
if task.NodeID == "" || task.Status.ContainerStatus == nil {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
}
|
||||
return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
|
||||
}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
|
@ -22,7 +23,8 @@ import (
|
|||
func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
|
||||
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// build image with health-check
|
||||
imageName := "testhealth"
|
||||
|
@ -41,7 +43,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, id)
|
||||
tasks = d.GetServiceTasks(ctx, c, id)
|
||||
return tasks, ""
|
||||
}, checker.HasLen(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -49,7 +51,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
|
|||
|
||||
// wait for task to start
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
return task.Status.State, ""
|
||||
}, checker.Equals(swarm.TaskStateRunning)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -71,7 +73,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
|
|||
|
||||
// Task should be terminated
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
return task.Status.State, ""
|
||||
}, checker.Equals(swarm.TaskStateFailed)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -85,7 +87,8 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
|
|||
func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
|
||||
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// service started from this image won't pass health check
|
||||
imageName := "testhealth"
|
||||
|
@ -103,7 +106,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
|
|||
|
||||
var tasks []swarm.Task
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks = d.GetServiceTasks(c, id)
|
||||
tasks = d.GetServiceTasks(ctx, c, id)
|
||||
return tasks, ""
|
||||
}, checker.HasLen(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -111,7 +114,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
|
|||
|
||||
// wait for task to start
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
return task.Status.State, ""
|
||||
}, checker.Equals(swarm.TaskStateStarting)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -125,7 +128,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
|
|||
}, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// task should be blocked at starting status
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
assert.Equal(c, task.Status.State, swarm.TaskStateStarting)
|
||||
|
||||
// make it healthy
|
||||
|
@ -133,7 +136,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
|
|||
|
||||
// Task should be at running status
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
task = d.GetTask(c, task.ID)
|
||||
task = d.GetTask(ctx, c, task.ID)
|
||||
return task.Status.State, ""
|
||||
}, checker.Equals(swarm.TaskStateRunning)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
|
@ -24,7 +25,8 @@ type logMessage struct {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogs(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// we have multiple services here for detecting the goroutine issue #28915
|
||||
services := map[string]string{
|
||||
|
@ -41,7 +43,7 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *testing.T) {
|
|||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c,
|
||||
d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{"busybox:latest": len(services)})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{"busybox:latest": len(services)})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
for name, message := range services {
|
||||
out, err := d.Cmd("service", "logs", name)
|
||||
|
@ -69,7 +71,8 @@ func countLogLines(d *daemon.Daemon, name string) func(*testing.T) (interface{},
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsCompleteness"
|
||||
|
||||
|
@ -79,7 +82,7 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// and make sure we have all the log lines
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -96,7 +99,8 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsTail"
|
||||
|
||||
|
@ -106,7 +110,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "logs", "--tail=2", name)
|
||||
|
@ -120,15 +124,16 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsSince(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
// See DockerSuite.TestLogsSince, which is where this comes from
|
||||
d := s.AddDaemon(c, true, true)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsSince"
|
||||
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do usleep 100000; echo log$i; done; exec tail -f /dev/null")
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// wait a sec for the logs to come in
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(3)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -155,7 +160,8 @@ func (s *DockerSwarmSuite) TestServiceLogsSince(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsFollow"
|
||||
|
||||
|
@ -164,7 +170,7 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
args := []string{"service", "logs", "-f", name}
|
||||
cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...)
|
||||
|
@ -207,7 +213,8 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServicelogsTaskLogs"
|
||||
replicas := 2
|
||||
|
@ -233,7 +240,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
|
|||
result.Assert(c, icmd.Expected{Out: id})
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(replicas)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(replicas)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6*replicas)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// get the task ids
|
||||
|
@ -260,7 +267,8 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsTTY"
|
||||
|
||||
|
@ -286,7 +294,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
|
|||
result.Assert(c, icmd.Expected{Out: id})
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// and make sure we have all the log lines
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -298,7 +306,8 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsNoHangDeletedContainer"
|
||||
|
||||
|
@ -320,7 +329,7 @@ func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
|
|||
assert.Assert(c, id != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// and make sure we have all the log lines
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
@ -345,7 +354,8 @@ func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceLogsDetails(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "TestServiceLogsDetails"
|
||||
|
||||
|
@ -371,7 +381,7 @@ func (s *DockerSwarmSuite) TestServiceLogsDetails(c *testing.T) {
|
|||
assert.Assert(c, id != "")
|
||||
|
||||
// make sure task has been deployed
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// and make sure we have all the log lines
|
||||
poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
|
|
|
@ -7,11 +7,13 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestServiceScale(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
service1Name := "TestService1"
|
||||
service1Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service1Name, "busybox"}, sleepCommandForDaemonPlatform()...)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -18,8 +19,8 @@ type DockerCLISNISuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLISNISuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLISNISuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLISNISuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -15,8 +16,8 @@ type DockerCLIStartSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIStartSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIStartSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIStartSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -17,8 +18,8 @@ type DockerCLIStatsSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIStatsSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIStatsSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIStatsSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
remoteipam "github.com/docker/docker/libnetwork/ipams/remote/api"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/moby/swarmkit/v2/ca/keyutils"
|
||||
"github.com/vishvananda/netlink"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -36,7 +37,8 @@ import (
|
|||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
getSpec := func() swarm.Spec {
|
||||
sw := d.GetSwarm(c)
|
||||
|
@ -84,7 +86,8 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInit(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
getSpec := func() swarm.Spec {
|
||||
sw := d.GetSwarm(c)
|
||||
|
@ -118,7 +121,7 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *testing.T) {
|
|||
assert.Equal(c, spec.CAConfig.ExternalCAs[0].CACert, "")
|
||||
assert.Equal(c, spec.CAConfig.ExternalCAs[1].CACert, string(expected))
|
||||
|
||||
assert.Assert(c, d.SwarmLeave(c, true) == nil)
|
||||
assert.Assert(c, d.SwarmLeave(ctx, c, true) == nil)
|
||||
cli.Docker(cli.Args("swarm", "init"), cli.Daemon(d)).Assert(c, icmd.Success)
|
||||
|
||||
spec = getSpec()
|
||||
|
@ -128,10 +131,11 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *testing.T) {
|
|||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *testing.T) {
|
||||
testRequires(c, IPv6)
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
cli.Docker(cli.Args("swarm", "init", "--listen-add", "::1"), cli.Daemon(d1)).Assert(c, icmd.Success)
|
||||
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
d2 := s.AddDaemon(ctx, c, false, false)
|
||||
cli.Docker(cli.Args("swarm", "join", "::1"), cli.Daemon(d2)).Assert(c, icmd.Success)
|
||||
|
||||
out := cli.Docker(cli.Args("info"), cli.Daemon(d2)).Assert(c, icmd.Success).Combined()
|
||||
|
@ -139,16 +143,18 @@ func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0")
|
||||
assert.ErrorContains(c, err, "")
|
||||
assert.Assert(c, strings.Contains(out, "advertise address must be a non-zero IP address"))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
// init swarm mode and stop a daemon
|
||||
d := s.AddDaemon(c, true, true)
|
||||
info := d.SwarmInfo(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
info := d.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
d.Stop(c)
|
||||
|
||||
|
@ -163,7 +169,8 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
hostname, err := d.Cmd("node", "inspect", "--format", "{{.Description.Hostname}}", "self")
|
||||
assert.Assert(c, err == nil, hostname)
|
||||
|
||||
|
@ -171,9 +178,9 @@ func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers := d.ActiveContainers(c)
|
||||
containers := d.ActiveContainers(testutil.GetContext(c), c)
|
||||
out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0])
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.Split(out, "\n")[0], "test-1-"+strings.Split(hostname, "\n")[0], "hostname with templating invalid")
|
||||
|
@ -181,7 +188,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *testing.T) {
|
|||
|
||||
// Test case for #24270
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name1 := "redis-cluster-md5"
|
||||
name2 := "redis-cluster"
|
||||
|
@ -220,7 +228,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -238,7 +247,8 @@ func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "redis-cluster-md5"
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
|
||||
|
@ -246,7 +256,7 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(3)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(3)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
filter := "name=redis-cluster"
|
||||
|
||||
|
@ -264,7 +274,8 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *testing.T) {
|
|||
|
||||
// Test case for #25375
|
||||
func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "top"
|
||||
// this first command does not have to be retried because service creates
|
||||
|
@ -290,7 +301,8 @@ func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "top"
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top")
|
||||
|
@ -298,7 +310,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("ps", "-q")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -312,7 +324,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -334,7 +347,8 @@ func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -359,7 +373,8 @@ func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -387,7 +402,8 @@ func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestOverlayAttachable(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -409,7 +425,8 @@ func (s *DockerSwarmSuite) TestOverlayAttachable(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create an attachable swarm network
|
||||
nwName := "attovl"
|
||||
|
@ -421,7 +438,7 @@ func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Leave the swarm
|
||||
assert.Assert(c, d.SwarmLeave(c, true) == nil)
|
||||
assert.Assert(c, d.SwarmLeave(ctx, c, true) == nil)
|
||||
|
||||
// Check the container is disconnected
|
||||
out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}")
|
||||
|
@ -435,7 +452,8 @@ func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create attachable network
|
||||
out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "--subnet", "10.10.9.0/24", "ovnet")
|
||||
|
@ -459,7 +477,8 @@ func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *tes
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Ingress network can be removed
|
||||
removeNetwork := func(name string) *icmd.Result {
|
||||
|
@ -510,7 +529,8 @@ func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Remove ingress network
|
||||
result := cli.Docker(
|
||||
|
@ -529,7 +549,8 @@ func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *testing
|
|||
// Test case for #24108, also the case from:
|
||||
// https://github.com/docker/docker/pull/24620#issuecomment-233715656
|
||||
func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "redis-cluster-md5"
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
|
||||
|
@ -582,7 +603,8 @@ func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a bare container
|
||||
out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top")
|
||||
|
@ -595,7 +617,7 @@ func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckServiceRunningTasks(name), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckServiceRunningTasks(ctx, name), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Filter non-tasks
|
||||
out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false")
|
||||
|
@ -785,6 +807,7 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
mux := http.NewServeMux()
|
||||
s.server = httptest.NewServer(mux)
|
||||
assert.Assert(c, s.server != nil) // check that HTTP server has started
|
||||
|
@ -795,7 +818,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
}()
|
||||
|
||||
d := s.AddDaemon(c, true, true)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo")
|
||||
assert.ErrorContains(c, err, "", out)
|
||||
|
@ -804,7 +827,8 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *testing.T) {
|
|||
|
||||
// Test case for #24712
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
path := filepath.Join(d.Folder, "env.txt")
|
||||
err := os.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0o644)
|
||||
|
@ -822,7 +846,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "top"
|
||||
|
||||
|
@ -834,7 +859,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// We need to get the container id.
|
||||
out, err = d.Cmd("ps", "-q", "--no-trunc")
|
||||
|
@ -848,7 +873,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *testing.T) {
|
|||
out, err = d.Cmd("service", "rm", name)
|
||||
assert.NilError(c, err, out)
|
||||
// Make sure container has been destroyed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// With --tty
|
||||
expectedOutput = "TTY"
|
||||
|
@ -856,7 +881,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// We need to get the container id.
|
||||
out, err = d.Cmd("ps", "-q", "--no-trunc")
|
||||
|
@ -869,7 +894,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a service
|
||||
name := "top"
|
||||
|
@ -877,7 +903,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name)
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -892,7 +918,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
result := icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "foo"))
|
||||
result.Assert(c, icmd.Success)
|
||||
|
@ -912,23 +939,24 @@ func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *testing.T) {
|
|||
result.Assert(c, icmd.Success)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks, checker.DeepEquals(map[string]int{fooNetwork: 1, barNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks(ctx), checker.DeepEquals(map[string]int{fooNetwork: 1, barNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Remove a network
|
||||
result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-rm", "foo", name))
|
||||
result.Assert(c, icmd.Success)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks, checker.DeepEquals(map[string]int{barNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks(ctx), checker.DeepEquals(map[string]int{barNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Add a network
|
||||
result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-add", "baz", name))
|
||||
result.Assert(c, icmd.Success)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks, checker.DeepEquals(map[string]int{barNetwork: 1, bazNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskNetworks(ctx), checker.DeepEquals(map[string]int{barNetwork: 1, bazNetwork: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestDNSConfig(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a service
|
||||
name := "top"
|
||||
|
@ -936,7 +964,7 @@ func (s *DockerSwarmSuite) TestDNSConfig(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// We need to get the container id.
|
||||
out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
|
||||
|
@ -955,7 +983,8 @@ func (s *DockerSwarmSuite) TestDNSConfig(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a service
|
||||
name := "top"
|
||||
|
@ -963,7 +992,7 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "update", "--detach", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name)
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -974,7 +1003,8 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *testing.T) {
|
|||
}
|
||||
|
||||
func getNodeStatus(c *testing.T, d *daemon.Daemon) swarm.LocalNodeState {
|
||||
info := d.SwarmInfo(c)
|
||||
ctx := testutil.GetContext(c)
|
||||
info := d.SwarmInfo(ctx, c)
|
||||
return info.LocalNodeState
|
||||
}
|
||||
|
||||
|
@ -994,24 +1024,25 @@ func checkKeyIsEncrypted(d *daemon.Daemon) func(*testing.T) (interface{}, string
|
|||
}
|
||||
}
|
||||
|
||||
func checkSwarmLockedToUnlocked(c *testing.T, d *daemon.Daemon) {
|
||||
func checkSwarmLockedToUnlocked(ctx context.Context, c *testing.T, d *daemon.Daemon) {
|
||||
// Wait for the PEM file to become unencrypted
|
||||
poll.WaitOn(c, pollCheck(c, checkKeyIsEncrypted(d), checker.Equals(false)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RestartNode(c)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
}
|
||||
|
||||
func checkSwarmUnlockedToLocked(c *testing.T, d *daemon.Daemon) {
|
||||
func checkSwarmUnlockedToLocked(ctx context.Context, c *testing.T, d *daemon.Daemon) {
|
||||
// Wait for the PEM file to become encrypted
|
||||
poll.WaitOn(c, pollCheck(c, checkKeyIsEncrypted(d), checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RestartNode(c)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStateLocked)), poll.WithTimeout(time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStateLocked)), poll.WithTimeout(time.Second))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
// unlocking a normal engine should return an error - it does not even ask for the key
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
|
@ -1037,7 +1068,8 @@ func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitLocked(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
outs, err := d.Cmd("swarm", "init", "--autolock")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
@ -1070,7 +1102,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *testing.T) {
|
|||
outs, err = d.Cmd("swarm", "update", "--autolock=false")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
||||
checkSwarmLockedToUnlocked(c, d)
|
||||
checkSwarmLockedToUnlocked(ctx, c, d)
|
||||
|
||||
outs, err = d.Cmd("node", "ls")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
@ -1078,7 +1110,8 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
outs, err := d.Cmd("swarm", "init", "--autolock")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
@ -1086,7 +1119,7 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *testing.T) {
|
|||
// It starts off locked
|
||||
d.RestartNode(c)
|
||||
|
||||
info := d.SwarmInfo(c)
|
||||
info := d.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
|
@ -1098,20 +1131,21 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *testing.T) {
|
|||
outs, err = d.Cmd("swarm", "leave", "--force")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
||||
info = d.SwarmInfo(c)
|
||||
info = d.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
|
||||
outs, err = d.Cmd("swarm", "init")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
||||
info = d.SwarmInfo(c)
|
||||
info = d.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// they start off unlocked
|
||||
d2.RestartNode(c)
|
||||
|
@ -1127,7 +1161,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *testing.T) {
|
|||
|
||||
// The ones that got the cluster update should be set to locked
|
||||
for _, d := range []*daemon.Daemon{d1, d3} {
|
||||
checkSwarmUnlockedToLocked(c, d)
|
||||
checkSwarmUnlockedToLocked(ctx, c, d)
|
||||
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
|
@ -1140,7 +1174,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *testing.T) {
|
|||
assert.Equal(c, getNodeStatus(c, d2), swarm.LocalNodeStateActive)
|
||||
|
||||
// d2 is now set to lock
|
||||
checkSwarmUnlockedToLocked(c, d2)
|
||||
checkSwarmUnlockedToLocked(ctx, c, d2)
|
||||
|
||||
// leave it locked, and set the cluster to no longer autolock
|
||||
outs, err = d1.Cmd("swarm", "update", "--autolock=false")
|
||||
|
@ -1148,7 +1182,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *testing.T) {
|
|||
|
||||
// the ones that got the update are now set to unlocked
|
||||
for _, d := range []*daemon.Daemon{d1, d3} {
|
||||
checkSwarmLockedToUnlocked(c, d)
|
||||
checkSwarmLockedToUnlocked(ctx, c, d)
|
||||
}
|
||||
|
||||
// d2 still locked
|
||||
|
@ -1161,16 +1195,17 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *testing.T) {
|
|||
assert.Equal(c, getNodeStatus(c, d2), swarm.LocalNodeStateActive)
|
||||
|
||||
// once it's caught up, d2 is set to not be locked
|
||||
checkSwarmLockedToUnlocked(c, d2)
|
||||
checkSwarmLockedToUnlocked(ctx, c, d2)
|
||||
|
||||
// managers who join now are never set to locked in the first place
|
||||
d4 := s.AddDaemon(c, true, true)
|
||||
d4 := s.AddDaemon(ctx, c, true, true)
|
||||
d4.RestartNode(c)
|
||||
assert.Equal(c, getNodeStatus(c, d4), swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// enable autolock
|
||||
outs, err := d1.Cmd("swarm", "update", "--autolock")
|
||||
|
@ -1178,20 +1213,20 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
|||
unlockKey := getUnlockKey(d1, c, outs)
|
||||
|
||||
// joined workers start off unlocked
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
d2.RestartNode(c)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
|
||||
// promote worker
|
||||
outs, err = d1.Cmd("node", "promote", d2.NodeID())
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(outs, "promoted to a manager in the swarm"), outs)
|
||||
// join new manager node
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// both new nodes are locked
|
||||
for _, d := range []*daemon.Daemon{d2, d3} {
|
||||
checkSwarmUnlockedToLocked(c, d)
|
||||
checkSwarmUnlockedToLocked(ctx, c, d)
|
||||
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
|
@ -1208,7 +1243,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
|||
// to be replaced, then the node still has the manager TLS key which is still locked
|
||||
// (because we never want a manager TLS key to be on disk unencrypted if the cluster
|
||||
// is set to autolock)
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckControlAvailable(ctx), checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
certBytes, err := os.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
|
||||
if err != nil {
|
||||
|
@ -1223,11 +1258,12 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
|||
|
||||
// by now, it should *never* be locked on restart
|
||||
d3.RestartNode(c)
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
outs, err := d.Cmd("swarm", "update", "--autolock")
|
||||
assert.Assert(c, err == nil, "out: %v", outs)
|
||||
|
@ -1311,10 +1347,11 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
if runtime.GOARCH == "ppc64le" {
|
||||
c.Skip("Disabled on ppc64le")
|
||||
}
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d1 := s.AddDaemon(ctx, c, true, true) // leader - don't restart this one, we don't want leader election delays
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
outs, err := d1.Cmd("swarm", "update", "--autolock")
|
||||
assert.Assert(c, err == nil, outs)
|
||||
|
@ -1396,7 +1433,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
// set to lock
|
||||
|
@ -1405,7 +1443,7 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *testing.T) {
|
|||
assert.Assert(c, strings.Contains(outs, "docker swarm unlock"), outs)
|
||||
unlockKey := getUnlockKey(d, c, outs)
|
||||
|
||||
checkSwarmUnlockedToLocked(c, d)
|
||||
checkSwarmUnlockedToLocked(ctx, c, d)
|
||||
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
|
@ -1416,12 +1454,13 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *testing.T) {
|
|||
outs, err = d.Cmd("swarm", "update", "--autolock=false")
|
||||
assert.Assert(c, err == nil, "out: %v", outs)
|
||||
|
||||
checkSwarmLockedToUnlocked(c, d)
|
||||
checkSwarmLockedToUnlocked(ctx, c, d)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestExtraHosts(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// Create a service
|
||||
name := "top"
|
||||
|
@ -1429,7 +1468,7 @@ func (s *DockerSwarmSuite) TestExtraHosts(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// We need to get the container id.
|
||||
out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
|
||||
|
@ -1444,9 +1483,10 @@ func (s *DockerSwarmSuite) TestExtraHosts(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
// Manager Addresses will always show Node 1's address
|
||||
expectedOutput := fmt.Sprintf("127.0.0.1:%d", d1.SwarmPort)
|
||||
|
@ -1465,7 +1505,8 @@ func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1480,7 +1521,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1491,7 +1532,8 @@ func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *testing.T) {
|
|||
// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID.
|
||||
// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1".
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress")
|
||||
assert.NilError(c, err, "out: %v", out)
|
||||
ingressID := strings.TrimSpace(out)
|
||||
|
@ -1514,7 +1556,8 @@ func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *testing.T) {
|
|||
// Note that it is to ok have multiple networks with the same name if the operations are done
|
||||
// in parallel. (#18864)
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
drivers := []string{"bridge", "overlay"}
|
||||
for i, driver1 := range drivers {
|
||||
for _, driver2 := range drivers {
|
||||
|
@ -1533,14 +1576,15 @@ func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Total len = 4, with 2 dynamic ports and 2 non-dynamic ports
|
||||
// Dynamic ports are likely to be 30000 and 30001 but doesn't matter
|
||||
|
@ -1552,7 +1596,8 @@ func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("node", "ls")
|
||||
assert.NilError(c, err)
|
||||
|
@ -1563,7 +1608,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *testing.T) {
|
|||
|
||||
token := strings.TrimSpace(out)
|
||||
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
out, err = d1.Cmd("swarm", "join", "--availability=drain", "--token", token, d.SwarmListenAddr())
|
||||
assert.NilError(c, err)
|
||||
|
@ -1578,7 +1623,8 @@ func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
out, err := d.Cmd("swarm", "init", "--availability", "drain")
|
||||
assert.NilError(c, err, "out: %v", out)
|
||||
|
@ -1590,27 +1636,29 @@ func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *testing.T) {
|
|||
|
||||
func (s *DockerSwarmSuite) TestSwarmReadonlyRootfs(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
|
||||
ctx := testutil.GetContext(c)
|
||||
|
||||
d := s.AddDaemon(c, true, true)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.ReadOnly }}", "top")
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.TrimSpace(out), "true")
|
||||
|
||||
containers := d.ActiveContainers(c)
|
||||
containers := d.ActiveContainers(testutil.GetContext(c), c)
|
||||
out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.HostConfig.ReadonlyRootfs}}", containers[0])
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.TrimSpace(out), "true")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
name := "foo"
|
||||
options := types.NetworkCreate{
|
||||
|
@ -1621,7 +1669,7 @@ func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *testing.T) {
|
|||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
|
||||
n1, err := cli.NetworkCreate(context.Background(), name, options)
|
||||
n1, err := cli.NetworkCreate(testutil.GetContext(c), name, options)
|
||||
assert.NilError(c, err)
|
||||
|
||||
// Full ID always works
|
||||
|
@ -1634,7 +1682,7 @@ func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *testing.T) {
|
|||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.TrimSpace(out), n1.ID)
|
||||
|
||||
n2, err := cli.NetworkCreate(context.Background(), name, options)
|
||||
n2, err := cli.NetworkCreate(testutil.GetContext(c), name, options)
|
||||
assert.NilError(c, err)
|
||||
// Full ID always works
|
||||
out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID)
|
||||
|
@ -1655,7 +1703,7 @@ func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *testing.T) {
|
|||
// Duplicates with name but with different driver
|
||||
options.Driver = "overlay"
|
||||
|
||||
n2, err = cli.NetworkCreate(context.Background(), name, options)
|
||||
n2, err = cli.NetworkCreate(testutil.GetContext(c), name, options)
|
||||
assert.NilError(c, err)
|
||||
|
||||
// Full ID always works
|
||||
|
@ -1674,21 +1722,22 @@ func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmStopSignal(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
|
||||
|
||||
d := s.AddDaemon(c, true, true)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top")
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.TrimSpace(out), "SIGHUP")
|
||||
|
||||
containers := d.ActiveContainers(c)
|
||||
containers := d.ActiveContainers(testutil.GetContext(c), c)
|
||||
out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.StopSignal}}", containers[0])
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, strings.TrimSpace(out), "SIGHUP")
|
||||
|
@ -1702,7 +1751,8 @@ func (s *DockerSwarmSuite) TestSwarmStopSignal(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top1", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1713,7 +1763,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *testing.T) {
|
|||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("service", "ls")
|
||||
assert.NilError(c, err, out)
|
||||
|
@ -1732,7 +1782,8 @@ func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, false, false)
|
||||
|
||||
out, err := d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0")
|
||||
assert.ErrorContains(c, err, "")
|
||||
|
@ -1743,7 +1794,8 @@ func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("swarm", "join-token", "-q", "worker")
|
||||
assert.NilError(c, err)
|
||||
|
@ -1752,7 +1804,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *testing.T) {
|
|||
token := strings.TrimSpace(out)
|
||||
|
||||
// Verify that back to back join/leave does not cause panics
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
for i := 0; i < 10; i++ {
|
||||
out, err = d1.Cmd("swarm", "join", "--token", token, d.SwarmListenAddr())
|
||||
assert.NilError(c, err)
|
||||
|
@ -1793,9 +1845,10 @@ func waitForEvent(c *testing.T, d *daemon.Daemon, since string, filter string, e
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
// create a network
|
||||
out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
|
||||
|
@ -1813,7 +1866,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// create a service
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top")
|
||||
|
@ -1833,7 +1887,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// create a service
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top")
|
||||
|
@ -1855,7 +1910,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// create a service
|
||||
out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top")
|
||||
|
@ -1892,9 +1948,10 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
s.AddDaemon(c, true, true)
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
s.AddDaemon(ctx, c, true, true)
|
||||
d3 := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
d3ID := d3.NodeID()
|
||||
waitForEvent(c, d1, "0", "-f scope=swarm", "node create "+d3ID, defaultRetryCount)
|
||||
|
@ -1921,7 +1978,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
// create a network
|
||||
out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
|
||||
|
@ -1940,7 +1998,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
testName := "test_secret"
|
||||
id := d.CreateSecret(c, swarm.SecretSpec{
|
||||
|
@ -1960,7 +2019,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *testing.T) {
|
|||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmClusterEventsConfig(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
testName := "test_config"
|
||||
id := d.CreateConfig(c, swarm.ConfigSpec{
|
||||
|
|
|
@ -10,18 +10,20 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// Make sure task stays pending before plugin is available
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckServiceTasksInStateWithError("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckServiceTasksInStateWithError(ctx, "top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
plugin := newVolumePlugin(c, "customvolumedriver")
|
||||
defer plugin.Close()
|
||||
|
@ -35,7 +37,7 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
|
|||
// this long delay.
|
||||
|
||||
// make sure task has been deployed.
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
out, err = d.Cmd("ps", "-q")
|
||||
assert.NilError(c, err)
|
||||
|
@ -58,8 +60,9 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
|
|||
// Test network plugin filter in swarm
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
|
||||
testRequires(c, IsAmd64)
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
ctx := testutil.GetContext(c)
|
||||
d1 := s.AddDaemon(ctx, c, true, true)
|
||||
d2 := s.AddDaemon(ctx, c, true, false)
|
||||
|
||||
// install plugin on d1 and d2
|
||||
pluginName := "aragunathan/global-net-plugin:latest"
|
||||
|
@ -81,7 +84,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
|
|||
assert.NilError(c, err)
|
||||
|
||||
// wait for tasks ready
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// remove service
|
||||
_, err = d1.Cmd("service", "rm", serviceName)
|
||||
|
@ -89,7 +92,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
|
|||
|
||||
// wait to ensure all containers have exited before removing the plugin. Else there's a
|
||||
// possibility of container exits erroring out due to plugins being unavailable.
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// disable plugin on worker
|
||||
_, err = d2.Cmd("plugin", "disable", "-f", pluginName)
|
||||
|
@ -102,5 +105,5 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
|
|||
_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top")
|
||||
assert.NilError(c, err)
|
||||
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image: 1})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -12,8 +13,8 @@ type DockerCLITopSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLITopSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLITopSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLITopSuite) OnTimeout(c *testing.T) {
|
||||
|
|
|
@ -14,12 +14,13 @@ import (
|
|||
"github.com/creack/pty"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func (s *DockerCLIUpdateSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIUpdateSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIUpdateSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -180,7 +181,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateStats(c *testing.T) {
|
|||
assert.NilError(c, waitRun(name))
|
||||
|
||||
getMemLimit := func(id string) uint64 {
|
||||
resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id))
|
||||
resp, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/stats?stream=false", id))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.Header.Get("Content-Type"), "application/json")
|
||||
|
||||
|
@ -255,7 +256,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
|
|||
|
||||
clt, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
inspect, err := clt.ContainerInspect(context.Background(), "top")
|
||||
inspect, err := clt.ContainerInspect(testutil.GetContext(c), "top")
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
|
||||
|
||||
|
@ -269,7 +270,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
|
|||
assert.Assert(c, strings.Contains(out, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set"))
|
||||
|
||||
dockerCmd(c, "update", "--cpus", "0.8", "top")
|
||||
inspect, err = clt.ContainerInspect(context.Background(), "top")
|
||||
inspect, err = clt.ContainerInspect(testutil.GetContext(c), "top")
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(800000000))
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
|
@ -23,7 +24,8 @@ import (
|
|||
func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *testing.T) {
|
||||
testRequires(c, UserNamespaceInKernel)
|
||||
|
||||
s.d.StartWithBusybox(c, "--userns-remap", "default")
|
||||
ctx := testutil.GetContext(c)
|
||||
s.d.StartWithBusybox(ctx, c, "--userns-remap", "default")
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "userns")
|
||||
assert.NilError(c, err)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
)
|
||||
|
@ -22,8 +23,8 @@ type DockerCLIVolumeSuite struct {
|
|||
ds *DockerSuite
|
||||
}
|
||||
|
||||
func (s *DockerCLIVolumeSuite) TearDownTest(c *testing.T) {
|
||||
s.ds.TearDownTest(c)
|
||||
func (s *DockerCLIVolumeSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
func (s *DockerCLIVolumeSuite) OnTimeout(c *testing.T) {
|
||||
|
@ -589,7 +590,7 @@ func (s *DockerCLIVolumeSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "app")
|
||||
_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "app")
|
||||
|
||||
assert.NilError(c, err)
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -24,7 +25,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartHostConfig(c *testing.T)
|
|||
config := map[string]interface{}{
|
||||
"Binds": []string{"/aa:/bb"},
|
||||
}
|
||||
res, body, err := request.Post("/containers/"+name+"/start", request.JSONBody(config))
|
||||
res, body, err := request.Post(testutil.GetContext(c), "/containers/"+name+"/start", request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
|
@ -50,7 +51,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumeBinds(c *testing.T
|
|||
"Volumes": map[string]struct{}{path: {}},
|
||||
}
|
||||
|
||||
res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
|
||||
|
@ -58,7 +59,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumeBinds(c *testing.T
|
|||
config = map[string]interface{}{
|
||||
"Binds": []string{bindPath + ":" + path},
|
||||
}
|
||||
res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
res, _, err = request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
|
||||
|
@ -77,7 +78,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *testin
|
|||
"Volumes": map[string]struct{}{"/tmp": {}},
|
||||
}
|
||||
|
||||
res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
|
||||
|
@ -87,7 +88,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *testin
|
|||
config = map[string]interface{}{
|
||||
"Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"},
|
||||
}
|
||||
res, body, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
res, body, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
|
||||
buf, err := request.ReadBody(body)
|
||||
|
@ -115,14 +116,14 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumesFrom(c *testing.T
|
|||
"Volumes": map[string]struct{}{volPath: {}},
|
||||
}
|
||||
|
||||
res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusCreated)
|
||||
|
||||
config = map[string]interface{}{
|
||||
"VolumesFrom": []string{volName},
|
||||
}
|
||||
res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
res, _, err = request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
|
||||
|
@ -145,7 +146,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainerBindNormalVolume(c *testing.
|
|||
dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox")
|
||||
|
||||
bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}}
|
||||
res, _, err := request.Post(formatV123StartAPIURL("/containers/two/start"), request.JSONBody(bindSpec))
|
||||
res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/two/start"), request.JSONBody(bindSpec))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
|
||||
|
@ -166,7 +167,7 @@ func (s *DockerAPISuite) TestDeprecatedStartWithTooLowMemoryLimit(c *testing.T)
|
|||
"Memory": 524287
|
||||
}`
|
||||
|
||||
res, body, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
|
||||
res, body, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
|
@ -189,7 +190,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithoutLinksInHostConf
|
|||
hc := inspectFieldJSON(c, name, "HostConfig")
|
||||
config := `{"HostConfig":` + hc + `}`
|
||||
|
||||
res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
b.Close()
|
||||
|
@ -207,7 +208,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(
|
|||
hc := inspectFieldJSON(c, name, "HostConfig")
|
||||
config := `{"HostConfig":` + hc + `}`
|
||||
|
||||
res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
b.Close()
|
||||
|
@ -227,7 +228,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithLinksInHostConfigI
|
|||
hc := inspectFieldJSON(c, name, "HostConfig")
|
||||
config := `{"HostConfig":` + hc + `}`
|
||||
|
||||
res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
b.Close()
|
||||
|
@ -241,7 +242,7 @@ func (s *DockerAPISuite) TestDeprecatedStartWithNilDNS(c *testing.T) {
|
|||
|
||||
config := `{"HostConfig": {"Dns": null}}`
|
||||
|
||||
res, b, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
|
||||
res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusNoContent)
|
||||
b.Close()
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -22,7 +23,7 @@ func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c
|
|||
"NetworkMode": netName,
|
||||
},
|
||||
}
|
||||
_, _, err := request.Post(formatV123StartAPIURL("/containers/"+conName+"/start"), request.JSONBody(config))
|
||||
_, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+conName+"/start"), request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(c, waitRun(conName))
|
||||
networks := inspectField(c, conName, "NetworkSettings.Networks")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -30,31 +31,31 @@ func newDockerHubPullSuite() *DockerHubPullSuite {
|
|||
}
|
||||
|
||||
// SetUpSuite starts the suite daemon.
|
||||
func (s *DockerHubPullSuite) SetUpSuite(c *testing.T) {
|
||||
func (s *DockerHubPullSuite) SetUpSuite(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
s.d.Start(c)
|
||||
}
|
||||
|
||||
// TearDownSuite stops the suite daemon.
|
||||
func (s *DockerHubPullSuite) TearDownSuite(c *testing.T) {
|
||||
func (s *DockerHubPullSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpTest declares that all tests of this suite require network.
|
||||
func (s *DockerHubPullSuite) SetUpTest(c *testing.T) {
|
||||
func (s *DockerHubPullSuite) SetUpTest(ctx context.Context, c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
}
|
||||
|
||||
// TearDownTest removes all images from the suite daemon.
|
||||
func (s *DockerHubPullSuite) TearDownTest(c *testing.T) {
|
||||
func (s *DockerHubPullSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
out := s.Cmd(c, "images", "-aq")
|
||||
images := strings.Split(out, "\n")
|
||||
images = append([]string{"rmi", "-f"}, images...)
|
||||
s.d.Cmd(images...)
|
||||
s.ds.TearDownTest(c)
|
||||
s.ds.TearDownTest(ctx, c)
|
||||
}
|
||||
|
||||
// Cmd executes a command against the suite daemon and returns the combined
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/icmd"
|
||||
|
@ -249,7 +250,7 @@ func daemonTime(c *testing.T) time.Time {
|
|||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
|
||||
info, err := apiClient.Info(context.Background())
|
||||
info, err := apiClient.Info(testutil.GetContext(c))
|
||||
assert.NilError(c, err)
|
||||
|
||||
dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
|
||||
|
@ -327,7 +328,7 @@ func getInspectBody(c *testing.T, version, id string) []byte {
|
|||
apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion(version))
|
||||
assert.NilError(c, err)
|
||||
defer apiClient.Close()
|
||||
_, body, err := apiClient.ContainerInspectWithRaw(context.Background(), id, false)
|
||||
_, body, err := apiClient.ContainerInspectWithRaw(testutil.GetContext(c), id, false)
|
||||
assert.NilError(c, err)
|
||||
return body
|
||||
}
|
||||
|
@ -356,45 +357,71 @@ func minimalBaseImage() string {
|
|||
return testEnv.PlatformDefaults.BaseImage
|
||||
}
|
||||
|
||||
func getGoroutineNumber() (int, error) {
|
||||
apiClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer apiClient.Close()
|
||||
|
||||
info, err := apiClient.Info(context.Background())
|
||||
func getGoroutineNumber(ctx context.Context, apiClient client.APIClient) (int, error) {
|
||||
info, err := apiClient.Info(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.NGoroutines, nil
|
||||
}
|
||||
|
||||
func waitForGoroutines(expected int) error {
|
||||
t := time.After(30 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-t:
|
||||
n, err := getGoroutineNumber()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n > expected {
|
||||
return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n)
|
||||
}
|
||||
default:
|
||||
n, err := getGoroutineNumber()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n <= expected {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
func waitForStableGourtineCount(ctx context.Context, t poll.TestingT, apiClient client.APIClient) int {
|
||||
var out int
|
||||
poll.WaitOn(t, stableGoroutineCount(ctx, apiClient, &out), poll.WithTimeout(30*time.Second))
|
||||
return out
|
||||
}
|
||||
|
||||
func stableGoroutineCount(ctx context.Context, apiClient client.APIClient, count *int) poll.Check {
|
||||
var (
|
||||
numStable int
|
||||
nRoutines int
|
||||
)
|
||||
|
||||
return func(t poll.LogT) poll.Result {
|
||||
n, err := getGoroutineNumber(ctx, apiClient)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
|
||||
last := nRoutines
|
||||
|
||||
if nRoutines == n {
|
||||
numStable++
|
||||
} else {
|
||||
numStable = 0
|
||||
nRoutines = n
|
||||
}
|
||||
|
||||
if numStable > 3 {
|
||||
*count = n
|
||||
return poll.Success()
|
||||
}
|
||||
return poll.Continue("goroutine count is not stable: last %d, current %d, stable iters: %d", last, n, numStable)
|
||||
}
|
||||
}
|
||||
|
||||
func checkGoroutineCount(ctx context.Context, apiClient client.APIClient, expected int) poll.Check {
|
||||
first := true
|
||||
return func(t poll.LogT) poll.Result {
|
||||
n, err := getGoroutineNumber(ctx, apiClient)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
if n > expected {
|
||||
if first {
|
||||
t.Log("Waiting for goroutines to stabilize")
|
||||
first = false
|
||||
}
|
||||
return poll.Continue("exepcted %d goroutines, got %d", expected, n)
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
}
|
||||
|
||||
func waitForGoroutines(ctx context.Context, t poll.TestingT, apiClient client.APIClient, expected int) {
|
||||
poll.WaitOn(t, checkGoroutineCount(ctx, apiClient, expected), poll.WithDelay(500*time.Millisecond), poll.WithTimeout(30*time.Second))
|
||||
}
|
||||
|
||||
// getErrorMessage returns the error message from an error API response
|
||||
func getErrorMessage(c *testing.T, body []byte) string {
|
||||
c.Helper()
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package environment // import "github.com/docker/docker/integration-cli/environment"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
|
@ -29,8 +30,8 @@ func (e *Execution) DockerBinary() string {
|
|||
}
|
||||
|
||||
// New returns details about the testing environment
|
||||
func New() (*Execution, error) {
|
||||
env, err := environment.New()
|
||||
func New(ctx context.Context) (*Execution, error) {
|
||||
env, err := environment.New(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func ensureSyscallTest(c *testing.T) {
|
||||
func ensureSyscallTest(ctx context.Context, c *testing.T) {
|
||||
defer testEnv.ProtectImage(c, "syscall-test:latest")
|
||||
|
||||
// If the image already exists, there's nothing left to do.
|
||||
|
@ -24,7 +25,7 @@ func ensureSyscallTest(c *testing.T) {
|
|||
// if no match, must build in docker, which is significantly slower
|
||||
// (slower mostly because of the vfs graphdriver)
|
||||
if testEnv.DaemonInfo.OSType != runtime.GOOS {
|
||||
ensureSyscallTestBuild(c)
|
||||
ensureSyscallTestBuild(ctx, c)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -63,8 +64,8 @@ func ensureSyscallTest(c *testing.T) {
|
|||
dockerCmd(c, buildArgs...)
|
||||
}
|
||||
|
||||
func ensureSyscallTestBuild(c *testing.T) {
|
||||
err := load.FrozenImagesLinux(testEnv.APIClient(), "debian:bullseye-slim")
|
||||
func ensureSyscallTestBuild(ctx context.Context, c *testing.T) {
|
||||
err := load.FrozenImagesLinux(ctx, testEnv.APIClient(), "debian:bullseye-slim")
|
||||
assert.NilError(c, err)
|
||||
|
||||
var buildArgs []string
|
||||
|
@ -76,7 +77,7 @@ func ensureSyscallTestBuild(c *testing.T) {
|
|||
dockerCmd(c, buildArgs...)
|
||||
}
|
||||
|
||||
func ensureNNPTest(c *testing.T) {
|
||||
func ensureNNPTest(ctx context.Context, c *testing.T) {
|
||||
defer testEnv.ProtectImage(c, "nnp-test:latest")
|
||||
|
||||
// If the image already exists, there's nothing left to do.
|
||||
|
@ -87,7 +88,7 @@ func ensureNNPTest(c *testing.T) {
|
|||
// if no match, must build in docker, which is significantly slower
|
||||
// (slower mostly because of the vfs graphdriver)
|
||||
if testEnv.DaemonInfo.OSType != runtime.GOOS {
|
||||
ensureNNPTestBuild(c)
|
||||
ensureNNPTestBuild(ctx, c)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -118,8 +119,8 @@ func ensureNNPTest(c *testing.T) {
|
|||
dockerCmd(c, buildArgs...)
|
||||
}
|
||||
|
||||
func ensureNNPTestBuild(c *testing.T) {
|
||||
err := load.FrozenImagesLinux(testEnv.APIClient(), "debian:bullseye-slim")
|
||||
func ensureNNPTestBuild(ctx context.Context, c *testing.T) {
|
||||
err := load.FrozenImagesLinux(ctx, testEnv.APIClient(), "debian:bullseye-slim")
|
||||
assert.NilError(c, err)
|
||||
|
||||
var buildArgs []string
|
||||
|
|
|
@ -33,12 +33,12 @@ func MinimumAPIVersion(version string) func() bool {
|
|||
}
|
||||
}
|
||||
|
||||
func OnlyDefaultNetworks() bool {
|
||||
func OnlyDefaultNetworks(ctx context.Context) bool {
|
||||
apiClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
networks, err := apiClient.NetworkList(context.TODO(), types.NetworkListOptions{})
|
||||
networks, err := apiClient.NetworkList(ctx, types.NetworkListOptions{})
|
||||
if err != nil || len(networks) > 0 {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -11,8 +11,11 @@ import (
|
|||
"github.com/docker/docker/pkg/sysinfo"
|
||||
)
|
||||
|
||||
// SysInfo stores information about which features a kernel supports.
|
||||
var SysInfo *sysinfo.SysInfo
|
||||
var sysInfo *sysinfo.SysInfo
|
||||
|
||||
func setupLocalInfo() {
|
||||
sysInfo = sysinfo.New()
|
||||
}
|
||||
|
||||
func cpuCfsPeriod() bool {
|
||||
return testEnv.DaemonInfo.CPUCfsPeriod
|
||||
|
@ -31,7 +34,7 @@ func oomControl() bool {
|
|||
}
|
||||
|
||||
func pidsLimit() bool {
|
||||
return SysInfo.PidsLimit
|
||||
return sysInfo.PidsLimit
|
||||
}
|
||||
|
||||
func memoryLimitSupport() bool {
|
||||
|
@ -39,7 +42,7 @@ func memoryLimitSupport() bool {
|
|||
}
|
||||
|
||||
func memoryReservationSupport() bool {
|
||||
return SysInfo.MemoryReservation
|
||||
return sysInfo.MemoryReservation
|
||||
}
|
||||
|
||||
func swapMemorySupport() bool {
|
||||
|
@ -47,11 +50,11 @@ func swapMemorySupport() bool {
|
|||
}
|
||||
|
||||
func memorySwappinessSupport() bool {
|
||||
return testEnv.IsLocalDaemon() && SysInfo.MemorySwappiness
|
||||
return testEnv.IsLocalDaemon() && sysInfo.MemorySwappiness
|
||||
}
|
||||
|
||||
func blkioWeight() bool {
|
||||
return testEnv.IsLocalDaemon() && SysInfo.BlkioWeight
|
||||
return testEnv.IsLocalDaemon() && sysInfo.BlkioWeight
|
||||
}
|
||||
|
||||
func cgroupCpuset() bool {
|
||||
|
@ -59,11 +62,11 @@ func cgroupCpuset() bool {
|
|||
}
|
||||
|
||||
func seccompEnabled() bool {
|
||||
return SysInfo.Seccomp
|
||||
return sysInfo.Seccomp
|
||||
}
|
||||
|
||||
func bridgeNfIptables() bool {
|
||||
return !SysInfo.BridgeNFCallIPTablesDisabled
|
||||
return !sysInfo.BridgeNFCallIPTablesDisabled
|
||||
}
|
||||
|
||||
func unprivilegedUsernsClone() bool {
|
||||
|
@ -79,9 +82,3 @@ func overlayFSSupported() bool {
|
|||
}
|
||||
return bytes.Contains(out, []byte("overlay\n"))
|
||||
}
|
||||
|
||||
func init() {
|
||||
if testEnv.IsLocalDaemon() {
|
||||
SysInfo = sysinfo.New()
|
||||
}
|
||||
}
|
||||
|
|
4
integration-cli/requirements_windows_test.go
Normal file
4
integration-cli/requirements_windows_test.go
Normal file
|
@ -0,0 +1,4 @@
|
|||
package main
|
||||
|
||||
func setupLocalInfo() {
|
||||
}
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration/internal/requirement"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -38,16 +39,15 @@ func getCgroupFromBuildOutput(buildOutput io.Reader) (string, error) {
|
|||
|
||||
// Runs a docker build against a daemon with the given cgroup namespace default value.
|
||||
// Returns the container cgroup and daemon cgroup.
|
||||
func testBuildWithCgroupNs(t *testing.T, daemonNsMode string) (string, string) {
|
||||
func testBuildWithCgroupNs(ctx context.Context, t *testing.T, daemonNsMode string) (string, string) {
|
||||
d := daemon.New(t, daemon.WithDefaultCgroupNamespaceMode(daemonNsMode))
|
||||
d.StartWithBusybox(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
|
||||
dockerfile := `
|
||||
FROM busybox
|
||||
RUN readlink /proc/self/ns/cgroup
|
||||
`
|
||||
ctx := context.Background()
|
||||
source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
|
||||
defer source.Close()
|
||||
|
||||
|
@ -74,9 +74,11 @@ func TestCgroupNamespacesBuild(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !requirement.CgroupNamespacesEnabled())
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
// When the daemon defaults to private cgroup namespaces, containers launched
|
||||
// should be in their own private cgroup namespace by default
|
||||
containerCgroup, daemonCgroup := testBuildWithCgroupNs(t, "private")
|
||||
containerCgroup, daemonCgroup := testBuildWithCgroupNs(ctx, t, "private")
|
||||
assert.Assert(t, daemonCgroup != containerCgroup)
|
||||
}
|
||||
|
||||
|
@ -85,8 +87,10 @@ func TestCgroupNamespacesBuildDaemonHostMode(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !requirement.CgroupNamespacesEnabled())
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
// When the daemon defaults to host cgroup namespaces, containers
|
||||
// launched should not be inside their own cgroup namespaces
|
||||
containerCgroup, daemonCgroup := testBuildWithCgroupNs(t, "host")
|
||||
containerCgroup, daemonCgroup := testBuildWithCgroupNs(ctx, t, "host")
|
||||
assert.Assert(t, daemonCgroup == containerCgroup)
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
dclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"github.com/moby/buildkit/session"
|
||||
|
@ -26,6 +27,8 @@ func TestBuildWithSession(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"), "experimental in older versions")
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
client := testEnv.APIClient()
|
||||
|
||||
dockerfile := `
|
||||
|
@ -39,7 +42,7 @@ func TestBuildWithSession(t *testing.T) {
|
|||
)
|
||||
defer fctx.Close()
|
||||
|
||||
out := testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
out := testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
assert.Check(t, is.Contains(out, "some content"))
|
||||
|
||||
fctx.Add("second", "contentcontent")
|
||||
|
@ -49,25 +52,25 @@ func TestBuildWithSession(t *testing.T) {
|
|||
RUN cat /second
|
||||
`
|
||||
|
||||
out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
out = testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 2))
|
||||
assert.Check(t, is.Contains(out, "contentcontent"))
|
||||
|
||||
du, err := client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
|
||||
du, err := client.DiskUsage(ctx, types.DiskUsageOptions{})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, du.BuilderSize > 10)
|
||||
|
||||
out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
out = testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
|
||||
assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 4))
|
||||
|
||||
du2, err := client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
|
||||
du2, err := client.DiskUsage(ctx, types.DiskUsageOptions{})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(du.BuilderSize, du2.BuilderSize))
|
||||
|
||||
// rebuild with regular tar, confirm cache still applies
|
||||
fctx.Add("Dockerfile", dockerfile)
|
||||
// FIXME(vdemeester) use sock here
|
||||
res, body, err := request.Do(
|
||||
res, body, err := request.Do(ctx,
|
||||
"/build",
|
||||
request.Host(client.DaemonHost()),
|
||||
request.Method(http.MethodPost),
|
||||
|
@ -81,17 +84,16 @@ func TestBuildWithSession(t *testing.T) {
|
|||
assert.Check(t, is.Contains(string(outBytes), "Successfully built"))
|
||||
assert.Check(t, is.Equal(strings.Count(string(outBytes), "Using cache"), 4))
|
||||
|
||||
_, err = client.BuildCachePrune(context.TODO(), types.BuildCachePruneOptions{All: true})
|
||||
_, err = client.BuildCachePrune(ctx, types.BuildCachePruneOptions{All: true})
|
||||
assert.Check(t, err)
|
||||
|
||||
du, err = client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
|
||||
du, err = client.DiskUsage(ctx, types.DiskUsageOptions{})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(du.BuilderSize, int64(0)))
|
||||
}
|
||||
|
||||
//nolint:unused // false positive: linter detects this as "unused"
|
||||
func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) {
|
||||
ctx := context.Background()
|
||||
func testBuildWithSession(ctx context.Context, t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) {
|
||||
sess, err := session.NewSession(ctx, "foo1", "foo")
|
||||
assert.Check(t, err)
|
||||
|
||||
|
@ -110,7 +112,7 @@ func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost str
|
|||
|
||||
g.Go(func() error {
|
||||
// FIXME use sock here
|
||||
res, body, err := request.Do(
|
||||
res, body, err := request.Do(ctx,
|
||||
"/build?remote=client-session&session="+sess.ID(),
|
||||
request.Host(daemonHost),
|
||||
request.Method(http.MethodPost),
|
||||
|
|
|
@ -2,7 +2,6 @@ package build
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -11,6 +10,7 @@ import (
|
|||
dclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -21,12 +21,14 @@ import (
|
|||
func TestBuildSquashParent(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
var client dclient.APIClient
|
||||
if !testEnv.DaemonInfo.ExperimentalBuild {
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
|
||||
d := daemon.New(t, daemon.WithExperimental())
|
||||
d.StartWithBusybox(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
client = d.NewClientT(t)
|
||||
} else {
|
||||
|
@ -43,7 +45,6 @@ func TestBuildSquashParent(t *testing.T) {
|
|||
`
|
||||
|
||||
// build and get the ID that we can use later for history comparison
|
||||
ctx := context.Background()
|
||||
source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
|
||||
defer source.Close()
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package build // import "github.com/docker/docker/integration/build"
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -15,6 +14,7 @@ import (
|
|||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -22,7 +22,7 @@ import (
|
|||
)
|
||||
|
||||
func TestBuildWithRemoveAndForceRemove(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
ctx := setupTest(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
|
@ -88,11 +88,11 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) {
|
|||
}
|
||||
|
||||
client := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
dockerfile := []byte(c.dockerfile)
|
||||
|
||||
buff := bytes.NewBuffer(nil)
|
||||
|
@ -143,7 +143,7 @@ func buildContainerIdsFilter(buildOutput io.Reader) (filters.Args, error) {
|
|||
// GUID path (\\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\newdir\hello}),
|
||||
// which currently isn't supported by Golang.
|
||||
func TestBuildMultiStageCopy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
dockerfile, err := os.ReadFile("testdata/Dockerfile." + t.Name())
|
||||
assert.NilError(t, err)
|
||||
|
@ -201,7 +201,7 @@ func TestBuildMultiStageParentConfig(t *testing.T) {
|
|||
FROM stage0
|
||||
WORKDIR sub2
|
||||
`
|
||||
ctx := context.Background()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
|
||||
defer source.Close()
|
||||
|
||||
|
@ -249,7 +249,7 @@ func TestBuildLabelWithTargets(t *testing.T) {
|
|||
LABEL label-b=inline-b
|
||||
`
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
|
||||
defer source.Close()
|
||||
|
||||
|
@ -314,7 +314,7 @@ func TestBuildWithEmptyLayers(t *testing.T) {
|
|||
COPY 2/ /target/
|
||||
COPY 3/ /target/
|
||||
`
|
||||
ctx := context.Background()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
source := fakecontext.New(t, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
fakecontext.WithFile("1/a", "asdf"),
|
||||
|
@ -340,7 +340,8 @@ func TestBuildWithEmptyLayers(t *testing.T) {
|
|||
// #35652
|
||||
func TestBuildMultiStageOnBuild(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.33"), "broken in earlier versions")
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
// test both metadata and layer based commands as they may be implemented differently
|
||||
dockerfile := `FROM busybox AS stage1
|
||||
ONBUILD RUN echo 'foo' >somefile
|
||||
|
@ -353,7 +354,6 @@ RUN cat somefile
|
|||
FROM stage1
|
||||
RUN cat somefile`
|
||||
|
||||
ctx := context.Background()
|
||||
source := fakecontext.New(t, "",
|
||||
fakecontext.WithDockerfile(dockerfile))
|
||||
defer source.Close()
|
||||
|
@ -378,7 +378,7 @@ RUN cat somefile`
|
|||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Equal(3, len(imageIDs)))
|
||||
|
||||
image, _, err := apiclient.ImageInspectWithRaw(context.Background(), imageIDs[2])
|
||||
image, _, err := apiclient.ImageInspectWithRaw(ctx, imageIDs[2])
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Contains(image.Config.Env, "bar=baz"))
|
||||
}
|
||||
|
@ -388,8 +388,7 @@ func TestBuildUncleanTarFilenames(t *testing.T) {
|
|||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
dockerfile := `FROM scratch
|
||||
COPY foo /
|
||||
|
@ -447,8 +446,7 @@ COPY bar /`
|
|||
// #35641
|
||||
func TestBuildMultiStageLayerLeak(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions")
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
// all commands need to match until COPY
|
||||
dockerfile := `FROM busybox
|
||||
|
@ -487,8 +485,7 @@ RUN [ ! -f foo ]
|
|||
// #37581
|
||||
// #40444 (Windows Containers only)
|
||||
func TestBuildWithHugeFile(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
dockerfile := `FROM busybox
|
||||
`
|
||||
|
@ -527,8 +524,7 @@ RUN for g in $(seq 0 8); do dd if=/dev/urandom of=rnd bs=1K count=1 seek=$((1024
|
|||
func TestBuildWCOWSandboxSize(t *testing.T) {
|
||||
t.Skip("FLAKY_TEST that needs to be fixed; see https://github.com/moby/moby/issues/42743")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "windows", "only Windows has sandbox size control")
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
dockerfile := `FROM busybox AS intermediate
|
||||
WORKDIR C:\\stuff
|
||||
|
@ -576,8 +572,7 @@ COPY --from=intermediate C:\\stuff C:\\stuff
|
|||
|
||||
func TestBuildWithEmptyDockerfile(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "broken in earlier versions")
|
||||
ctx := context.TODO()
|
||||
t.Cleanup(setupTest(t))
|
||||
ctx := setupTest(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -634,7 +629,7 @@ func TestBuildPreserveOwnership(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "broken in earlier versions")
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
dockerfile, err := os.ReadFile("testdata/Dockerfile." + t.Name())
|
||||
assert.NilError(t, err)
|
||||
|
@ -646,6 +641,8 @@ func TestBuildPreserveOwnership(t *testing.T) {
|
|||
|
||||
for _, target := range []string{"copy_from", "copy_from_chowned"} {
|
||||
t.Run(target, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
resp, err := apiclient.ImageBuild(
|
||||
ctx,
|
||||
source.AsTarReader(t),
|
||||
|
@ -671,8 +668,7 @@ func TestBuildPreserveOwnership(t *testing.T) {
|
|||
func TestBuildPlatformInvalid(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "experimental in older versions")
|
||||
|
||||
ctx := context.Background()
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
dockerfile := `FROM busybox
|
||||
`
|
||||
|
|
|
@ -3,7 +3,6 @@ package build // import "github.com/docker/docker/integration/build"
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -13,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/docker/docker/testutil/fixtures/load"
|
||||
|
@ -30,6 +30,8 @@ func TestBuildUserNamespaceValidateCapabilitiesAreV2(t *testing.T) {
|
|||
skip.If(t, !testEnv.IsUserNamespaceInKernel())
|
||||
skip.If(t, testEnv.IsRootless())
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
const imageTag = "capabilities:1.0"
|
||||
|
||||
tmp, err := os.MkdirTemp("", "integration-")
|
||||
|
@ -38,11 +40,10 @@ func TestBuildUserNamespaceValidateCapabilitiesAreV2(t *testing.T) {
|
|||
|
||||
dUserRemap := daemon.New(t)
|
||||
dUserRemap.Start(t, "--userns-remap", "default")
|
||||
ctx := context.Background()
|
||||
clientUserRemap := dUserRemap.NewClientT(t)
|
||||
defer clientUserRemap.Close()
|
||||
|
||||
err = load.FrozenImagesLinux(clientUserRemap, "debian:bullseye-slim")
|
||||
err = load.FrozenImagesLinux(ctx, clientUserRemap, "debian:bullseye-slim")
|
||||
assert.NilError(t, err)
|
||||
|
||||
dUserRemapRunning := true
|
||||
|
|
|
@ -1,33 +1,56 @@
|
|||
package build // import "github.com/docker/docker/integration/build"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
var testEnv *environment.Execution
|
||||
var (
|
||||
testEnv *environment.Execution
|
||||
baseContext context.Context
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
shutdown := testutil.ConfigureTracing()
|
||||
|
||||
ctx, span := otel.Tracer("").Start(context.Background(), "integration/build/TestMain")
|
||||
baseContext = ctx
|
||||
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
testEnv, err = environment.New(ctx)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
err = environment.EnsureFrozenImagesLinux(testEnv)
|
||||
err = environment.EnsureFrozenImagesLinux(ctx, testEnv)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testEnv.Print()
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
if code != 0 {
|
||||
span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
|
||||
}
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setupTest(t *testing.T) func() {
|
||||
environment.ProtectAll(t, testEnv)
|
||||
return func() { testEnv.Clean(t) }
|
||||
func setupTest(t *testing.T) context.Context {
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
environment.ProtectAll(ctx, t, testEnv)
|
||||
t.Cleanup(func() { testEnv.Clean(ctx, t) })
|
||||
return ctx
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package capabilities
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -11,6 +10,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNoNewPrivileges(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
withFileCapability := `
|
||||
FROM debian:bullseye-slim
|
||||
|
@ -35,7 +35,6 @@ func TestNoNewPrivileges(t *testing.T) {
|
|||
client := testEnv.APIClient()
|
||||
|
||||
// Build image
|
||||
ctx := context.TODO()
|
||||
resp, err := client.ImageBuild(ctx,
|
||||
source.AsTarReader(t),
|
||||
types.ImageBuildOptions{
|
||||
|
@ -72,6 +71,8 @@ func TestNoNewPrivileges(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
// Run the container with the image
|
||||
opts := append(tc.opts,
|
||||
container.WithImage(imageTag),
|
||||
|
|
|
@ -1,33 +1,56 @@
|
|||
package capabilities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
var testEnv *environment.Execution
|
||||
var (
|
||||
testEnv *environment.Execution
|
||||
baseContext context.Context
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
shutdown := testutil.ConfigureTracing()
|
||||
|
||||
ctx, span := otel.Tracer("").Start(context.Background(), "integration/capabilities/TestMain")
|
||||
baseContext = ctx
|
||||
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
testEnv, err = environment.New(ctx)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
err = environment.EnsureFrozenImagesLinux(testEnv)
|
||||
err = environment.EnsureFrozenImagesLinux(ctx, testEnv)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testEnv.Print()
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
if code != 0 {
|
||||
span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
|
||||
}
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setupTest(t *testing.T) func() {
|
||||
environment.ProtectAll(t, testEnv)
|
||||
return func() { testEnv.Clean(t) }
|
||||
func setupTest(t *testing.T) context.Context {
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
environment.ProtectAll(ctx, t, testEnv)
|
||||
t.Cleanup(func() { testEnv.Clean(ctx, t) })
|
||||
return ctx
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/integration/internal/swarm"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/poll"
|
||||
|
@ -24,14 +25,13 @@ import (
|
|||
func TestConfigInspect(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
testName := t.Name()
|
||||
configID := createConfig(ctx, t, c, testName, []byte("TESTINGDATA"), nil)
|
||||
|
||||
|
@ -48,12 +48,12 @@ func TestConfigInspect(t *testing.T) {
|
|||
func TestConfigList(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// This test case is ported from the original TestConfigsEmptyList
|
||||
configs, err := c.ConfigList(ctx, types.ConfigListOptions{})
|
||||
|
@ -76,39 +76,46 @@ func TestConfigList(t *testing.T) {
|
|||
assert.Check(t, is.DeepEqual(configNamesFromList(entries), testNames))
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
filters filters.Args
|
||||
expected []string
|
||||
}{
|
||||
// test filter by name `config ls --filter name=xxx`
|
||||
{
|
||||
desc: "test filter by name",
|
||||
filters: filters.NewArgs(filters.Arg("name", testName0)),
|
||||
expected: []string{testName0},
|
||||
},
|
||||
// test filter by id `config ls --filter id=xxx`
|
||||
{
|
||||
desc: "test filter by id",
|
||||
filters: filters.NewArgs(filters.Arg("id", config1ID)),
|
||||
expected: []string{testName1},
|
||||
},
|
||||
// test filter by label `config ls --filter label=xxx`
|
||||
{
|
||||
desc: "test filter by label key only",
|
||||
filters: filters.NewArgs(filters.Arg("label", "type")),
|
||||
expected: testNames,
|
||||
},
|
||||
{
|
||||
desc: "test filter by label key=value " + testName0,
|
||||
filters: filters.NewArgs(filters.Arg("label", "type=test")),
|
||||
expected: []string{testName0},
|
||||
},
|
||||
{
|
||||
desc: "test filter by label key=value " + testName1,
|
||||
filters: filters.NewArgs(filters.Arg("label", "type=production")),
|
||||
expected: []string{testName1},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
entries, err = c.ConfigList(ctx, types.ConfigListOptions{
|
||||
Filters: tc.filters,
|
||||
tc := tc
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
entries, err = c.ConfigList(ctx, types.ConfigListOptions{
|
||||
Filters: tc.filters,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(configNamesFromList(entries), tc.expected))
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(configNamesFromList(entries), tc.expected))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,12 +135,11 @@ func createConfig(ctx context.Context, t *testing.T, client client.APIClient, na
|
|||
func TestConfigsCreateAndDelete(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := setupTest(t)
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
testName := "test_config-" + t.Name()
|
||||
configID := createConfig(ctx, t, c, testName, []byte("TESTINGDATA"), nil)
|
||||
|
@ -166,12 +172,12 @@ func TestConfigsCreateAndDelete(t *testing.T) {
|
|||
func TestConfigsUpdate(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
testName := "test_config-" + t.Name()
|
||||
configID := createConfig(ctx, t, c, testName, []byte("TESTINGDATA"), nil)
|
||||
|
@ -217,11 +223,12 @@ func TestConfigsUpdate(t *testing.T) {
|
|||
|
||||
func TestTemplatedConfig(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
referencedSecretName := "referencedsecret-" + t.Name()
|
||||
referencedSecretSpec := swarmtypes.SecretSpec{
|
||||
|
@ -261,7 +268,7 @@ func TestTemplatedConfig(t *testing.T) {
|
|||
assert.Check(t, err)
|
||||
|
||||
serviceName := "svc_" + t.Name()
|
||||
serviceID := swarm.CreateService(t, d,
|
||||
serviceID := swarm.CreateService(ctx, t, d,
|
||||
swarm.ServiceWithConfig(
|
||||
&swarmtypes.ConfigReference{
|
||||
File: &swarmtypes.ConfigReferenceFileTarget{
|
||||
|
@ -301,12 +308,12 @@ func TestTemplatedConfig(t *testing.T) {
|
|||
swarm.ServiceWithName(serviceName),
|
||||
)
|
||||
|
||||
poll.WaitOn(t, swarm.RunningTasksCount(c, serviceID, 1), swarm.ServicePoll, poll.WithTimeout(1*time.Minute))
|
||||
poll.WaitOn(t, swarm.RunningTasksCount(ctx, c, serviceID, 1), swarm.ServicePoll, poll.WithTimeout(1*time.Minute))
|
||||
|
||||
tasks := swarm.GetRunningTasks(t, c, serviceID)
|
||||
tasks := swarm.GetRunningTasks(ctx, t, c, serviceID)
|
||||
assert.Assert(t, len(tasks) > 0, "no running tasks found for service %s", serviceID)
|
||||
|
||||
attach := swarm.ExecTask(t, d, tasks[0], types.ExecConfig{
|
||||
attach := swarm.ExecTask(ctx, t, d, tasks[0], types.ExecConfig{
|
||||
Cmd: []string{"/bin/cat", "/templated_config"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
|
@ -317,7 +324,7 @@ func TestTemplatedConfig(t *testing.T) {
|
|||
"this is a config\n"
|
||||
assertAttachedStream(t, attach, expect)
|
||||
|
||||
attach = swarm.ExecTask(t, d, tasks[0], types.ExecConfig{
|
||||
attach = swarm.ExecTask(ctx, t, d, tasks[0], types.ExecConfig{
|
||||
Cmd: []string{"mount"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
|
@ -329,14 +336,13 @@ func TestTemplatedConfig(t *testing.T) {
|
|||
func TestConfigCreateResolve(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
configName := "test_config_" + t.Name()
|
||||
configID := createConfig(ctx, t, c, configName, []byte("foo"), nil)
|
||||
|
||||
|
|
|
@ -1,33 +1,55 @@
|
|||
package config // import "github.com/docker/docker/integration/config"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
var testEnv *environment.Execution
|
||||
var (
|
||||
testEnv *environment.Execution
|
||||
baseContext context.Context
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
shutdown := testutil.ConfigureTracing()
|
||||
ctx, span := otel.Tracer("").Start(context.Background(), "integration/config/TestMain")
|
||||
baseContext = ctx
|
||||
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
testEnv, err = environment.New(ctx)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
err = environment.EnsureFrozenImagesLinux(testEnv)
|
||||
err = environment.EnsureFrozenImagesLinux(ctx, testEnv)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
testEnv.Print()
|
||||
code := m.Run()
|
||||
if code != 0 {
|
||||
span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
|
||||
}
|
||||
span.End()
|
||||
shutdown(ctx)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func setupTest(t *testing.T) func() {
|
||||
environment.ProtectAll(t, testEnv)
|
||||
return func() { testEnv.Clean(t) }
|
||||
func setupTest(t *testing.T) context.Context {
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
environment.ProtectAll(ctx, t, testEnv)
|
||||
t.Cleanup(func() { testEnv.Clean(ctx, t) })
|
||||
return ctx
|
||||
}
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
package container // import "github.com/docker/docker/integration/container"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestAttach(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := setupTest(t)
|
||||
client := testEnv.APIClient()
|
||||
|
||||
tests := []struct {
|
||||
doc string
|
||||
|
@ -34,7 +34,9 @@ func TestAttach(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
resp, err := apiClient.ContainerCreate(context.Background(),
|
||||
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
resp, err := client.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
Image: "busybox",
|
||||
Cmd: []string{"echo", "hello"},
|
||||
|
@ -46,7 +48,7 @@ func TestAttach(t *testing.T) {
|
|||
"",
|
||||
)
|
||||
assert.NilError(t, err)
|
||||
attach, err := apiClient.ContainerAttach(context.Background(), resp.ID, types.ContainerAttachOptions{
|
||||
attach, err := client.ContainerAttach(ctx, resp.ID, types.ContainerAttachOptions{
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
|
|
|
@ -2,7 +2,6 @@ package container // import "github.com/docker/docker/integration/container"
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -14,6 +13,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -24,15 +24,16 @@ func TestCreateWithCDIDevices(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType != "linux", "CDI devices are only supported on Linux")
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run cdi tests with a remote daemon")
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
assert.NilError(t, err)
|
||||
d := daemon.New(t, daemon.WithExperimental())
|
||||
d.StartWithBusybox(t, "--cdi-spec-dir="+filepath.Join(cwd, "testdata", "cdi"))
|
||||
d.StartWithBusybox(ctx, t, "--cdi-spec-dir="+filepath.Join(cwd, "testdata", "cdi"))
|
||||
defer d.Stop(t)
|
||||
|
||||
apiClient := d.NewClientT(t)
|
||||
|
||||
ctx := context.Background()
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithCmd("/bin/sh", "-c", "env"),
|
||||
container.WithCDIDevices("vendor1.com/device=foo"),
|
||||
|
|
|
@ -21,9 +21,8 @@ import (
|
|||
)
|
||||
|
||||
//nolint:unused // false positive: linter detects this as "unused"
|
||||
func containerExec(t *testing.T, client client.APIClient, cID string, cmd []string) {
|
||||
func containerExec(ctx context.Context, t *testing.T, client client.APIClient, cID string, cmd []string) {
|
||||
t.Logf("Exec: %s", cmd)
|
||||
ctx := context.Background()
|
||||
r, err := container.Exec(ctx, client, cID, cmd)
|
||||
assert.NilError(t, err)
|
||||
t.Log(r.Combined())
|
||||
|
@ -35,13 +34,12 @@ func TestCheckpoint(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild)
|
||||
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
stdoutStderr, err := exec.Command("criu", "check").CombinedOutput()
|
||||
t.Logf("%s", stdoutStderr)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := request.NewAPIClient(t)
|
||||
|
||||
t.Log("Start a container")
|
||||
|
@ -101,7 +99,7 @@ func TestCheckpoint(t *testing.T) {
|
|||
assert.Equal(t, checkpoints[0].Name, "test")
|
||||
|
||||
// Create a test file on a tmpfs mount.
|
||||
containerExec(t, apiClient, cID, []string{"touch", "/tmp/test-file"})
|
||||
containerExec(ctx, t, apiClient, cID, []string{"touch", "/tmp/test-file"})
|
||||
|
||||
// Do a second checkpoint
|
||||
t.Log("Do a checkpoint and stop the container")
|
||||
|
@ -144,7 +142,7 @@ func TestCheckpoint(t *testing.T) {
|
|||
assert.Check(t, is.Equal(true, inspect.State.Running))
|
||||
|
||||
// Check that the test file has been restored.
|
||||
containerExec(t, apiClient, cID, []string{"test", "-f", "/tmp/test-file"})
|
||||
containerExec(ctx, t, apiClient, cID, []string{"test", "-f", "/tmp/test-file"})
|
||||
|
||||
for _, id := range []string{"test", "test2"} {
|
||||
err = apiClient.CheckpointDelete(ctx, cID, checkpoint.DeleteOptions{
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
// TestContainerInvalidJSON tests that POST endpoints that expect a body return
|
||||
// the correct error when sending invalid JSON requests.
|
||||
func TestContainerInvalidJSON(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
ctx := setupTest(t)
|
||||
|
||||
// POST endpoints that accept / expect a JSON body;
|
||||
endpoints := []string{
|
||||
|
@ -39,7 +40,8 @@ func TestContainerInvalidJSON(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
t.Run("invalid content type", func(t *testing.T) {
|
||||
res, body, err := request.Post(ep, request.RawString("{}"), request.ContentType("text/plain"))
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
res, body, err := request.Post(ctx, ep, request.RawString("{}"), request.ContentType("text/plain"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(res.StatusCode, http.StatusBadRequest))
|
||||
|
||||
|
@ -49,7 +51,8 @@ func TestContainerInvalidJSON(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("invalid JSON", func(t *testing.T) {
|
||||
res, body, err := request.Post(ep, request.RawString("{invalid json"), request.JSON)
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
res, body, err := request.Post(ctx, ep, request.RawString("{invalid json"), request.JSON)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(res.StatusCode, http.StatusBadRequest))
|
||||
|
||||
|
@ -59,7 +62,8 @@ func TestContainerInvalidJSON(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("extra content after JSON", func(t *testing.T) {
|
||||
res, body, err := request.Post(ep, request.RawString(`{} trailing content`), request.JSON)
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
res, body, err := request.Post(ctx, ep, request.RawString(`{} trailing content`), request.JSON)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(res.StatusCode, http.StatusBadRequest))
|
||||
|
||||
|
@ -69,10 +73,11 @@ func TestContainerInvalidJSON(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("empty body", func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
// empty body should not produce an 500 internal server error, or
|
||||
// any 5XX error (this is assuming the request does not produce
|
||||
// an internal server error for another reason, but it shouldn't)
|
||||
res, _, err := request.Post(ep, request.RawString(``), request.JSON)
|
||||
res, _, err := request.Post(ctx, ep, request.RawString(``), request.JSON)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, res.StatusCode < http.StatusInternalServerError)
|
||||
})
|
||||
|
|
|
@ -3,7 +3,6 @@ package container // import "github.com/docker/docker/integration/container"
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -22,9 +21,8 @@ import (
|
|||
)
|
||||
|
||||
func TestCopyFromContainerPathDoesNotExist(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
|
@ -34,9 +32,8 @@ func TestCopyFromContainerPathDoesNotExist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCopyFromContainerPathIsNotDir(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
|
@ -51,9 +48,8 @@ func TestCopyFromContainerPathIsNotDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCopyToContainerPathDoesNotExist(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
|
@ -63,9 +59,8 @@ func TestCopyToContainerPathDoesNotExist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCopyEmptyFile(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
|
@ -120,9 +115,8 @@ func makeEmptyArchive(t *testing.T) (string, io.ReadCloser) {
|
|||
}
|
||||
|
||||
func TestCopyToContainerPathIsNotDir(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
|
@ -136,9 +130,8 @@ func TestCopyToContainerPathIsNotDir(t *testing.T) {
|
|||
|
||||
func TestCopyFromContainer(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
dir, err := os.MkdirTemp("", t.Name())
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
ctr "github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/testutil"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
@ -24,8 +25,8 @@ import (
|
|||
)
|
||||
|
||||
func TestCreateFailsWhenIdentifierDoesNotExist(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := setupTest(t)
|
||||
client := testEnv.APIClient()
|
||||
|
||||
testCases := []struct {
|
||||
doc string
|
||||
|
@ -53,7 +54,8 @@ func TestCreateFailsWhenIdentifierDoesNotExist(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := apiClient.ContainerCreate(context.Background(),
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
_, err := client.ContainerCreate(ctx,
|
||||
&container.Config{Image: tc.image},
|
||||
&container.HostConfig{},
|
||||
&network.NetworkingConfig{},
|
||||
|
@ -71,10 +73,10 @@ func TestCreateFailsWhenIdentifierDoesNotExist(t *testing.T) {
|
|||
// "non exists" (404).
|
||||
func TestCreateLinkToNonExistingContainer(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "legacy links are not supported on windows")
|
||||
defer setupTest(t)()
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
_, err := apiClient.ContainerCreate(context.Background(),
|
||||
_, err := c.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
Image: "busybox",
|
||||
},
|
||||
|
@ -90,8 +92,8 @@ func TestCreateLinkToNonExistingContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateWithInvalidEnv(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := setupTest(t)
|
||||
client := testEnv.APIClient()
|
||||
|
||||
testCases := []struct {
|
||||
env string
|
||||
|
@ -115,7 +117,8 @@ func TestCreateWithInvalidEnv(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(strconv.Itoa(index), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := apiClient.ContainerCreate(context.Background(),
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
_, err := client.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
Image: "busybox",
|
||||
Env: []string{tc.env},
|
||||
|
@ -134,9 +137,9 @@ func TestCreateWithInvalidEnv(t *testing.T) {
|
|||
// Test case for #30166 (target was not validated)
|
||||
func TestCreateTmpfsMountsTarget(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
ctx := setupTest(t)
|
||||
|
||||
defer setupTest(t)()
|
||||
apiClient := testEnv.APIClient()
|
||||
client := testEnv.APIClient()
|
||||
|
||||
testCases := []struct {
|
||||
target string
|
||||
|
@ -161,7 +164,7 @@ func TestCreateTmpfsMountsTarget(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, err := apiClient.ContainerCreate(context.Background(),
|
||||
_, err := client.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
Image: "busybox",
|
||||
},
|
||||
|
@ -180,9 +183,8 @@ func TestCreateTmpfsMountsTarget(t *testing.T) {
|
|||
func TestCreateWithCustomMaskedPaths(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := []struct {
|
||||
maskedPaths []string
|
||||
|
@ -224,6 +226,8 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
|
|||
assert.DeepEqual(t, expected, mps)
|
||||
}
|
||||
|
||||
// TODO: This should be using subtests
|
||||
|
||||
for i, tc := range testCases {
|
||||
name := fmt.Sprintf("create-masked-paths-%d", i)
|
||||
config := container.Config{
|
||||
|
@ -236,7 +240,7 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create the container.
|
||||
c, err := apiClient.ContainerCreate(context.Background(),
|
||||
c, err := apiClient.ContainerCreate(ctx,
|
||||
&config,
|
||||
&hc,
|
||||
&network.NetworkingConfig{},
|
||||
|
@ -260,9 +264,8 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
|
|||
func TestCreateWithCustomReadonlyPaths(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := []struct {
|
||||
readonlyPaths []string
|
||||
|
@ -315,7 +318,7 @@ func TestCreateWithCustomReadonlyPaths(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create the container.
|
||||
c, err := apiClient.ContainerCreate(context.Background(),
|
||||
c, err := apiClient.ContainerCreate(ctx,
|
||||
&config,
|
||||
&hc,
|
||||
&network.NetworkingConfig{},
|
||||
|
@ -337,9 +340,8 @@ func TestCreateWithCustomReadonlyPaths(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateWithInvalidHealthcheckParams(t *testing.T) {
|
||||
t.Cleanup(setupTest(t))
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := []struct {
|
||||
doc string
|
||||
|
@ -391,6 +393,7 @@ func TestCreateWithInvalidHealthcheckParams(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
cfg := container.Config{
|
||||
Image: "busybox",
|
||||
Healthcheck: &container.HealthConfig{
|
||||
|
@ -420,9 +423,8 @@ func TestCreateWithInvalidHealthcheckParams(t *testing.T) {
|
|||
// https://github.com/moby/moby/issues/40446
|
||||
func TestCreateTmpfsOverrideAnonymousVolume(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "windows does not support tmpfs")
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
id := ctr.Create(ctx, t, apiClient,
|
||||
ctr.WithVolume("/foo"),
|
||||
|
@ -466,15 +468,15 @@ func TestCreateTmpfsOverrideAnonymousVolume(t *testing.T) {
|
|||
// Test that if the referenced image platform does not match the requested platform on container create that we get an
|
||||
// error.
|
||||
func TestCreateDifferentPlatform(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
img, _, err := apiClient.ImageInspectWithRaw(ctx, "busybox:latest")
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, img.Architecture != "")
|
||||
|
||||
t.Run("different os", func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
p := ocispec.Platform{
|
||||
OS: img.Os + "DifferentOS",
|
||||
Architecture: img.Architecture,
|
||||
|
@ -484,6 +486,7 @@ func TestCreateDifferentPlatform(t *testing.T) {
|
|||
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
||||
})
|
||||
t.Run("different cpu arch", func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
p := ocispec.Platform{
|
||||
OS: img.Os,
|
||||
Architecture: img.Architecture + "DifferentArch",
|
||||
|
@ -495,11 +498,11 @@ func TestCreateDifferentPlatform(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateVolumesFromNonExistingContainer(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := setupTest(t)
|
||||
cli := testEnv.APIClient()
|
||||
|
||||
_, err := apiClient.ContainerCreate(
|
||||
context.Background(),
|
||||
_, err := cli.ContainerCreate(
|
||||
ctx,
|
||||
&container.Config{Image: "busybox"},
|
||||
&container.HostConfig{VolumesFrom: []string{"nosuchcontainer"}},
|
||||
nil,
|
||||
|
@ -512,14 +515,14 @@ func TestCreateVolumesFromNonExistingContainer(t *testing.T) {
|
|||
// Test that we can create a container from an image that is for a different platform even if a platform was not specified
|
||||
// This is for the regression detailed here: https://github.com/moby/moby/issues/41552
|
||||
func TestCreatePlatformSpecificImageNoPlatform(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
ctx := setupTest(t)
|
||||
|
||||
skip.If(t, testEnv.DaemonInfo.Architecture == "arm", "test only makes sense to run on non-arm systems")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux", "test image is only available on linux")
|
||||
apiClient := testEnv.APIClient()
|
||||
cli := testEnv.APIClient()
|
||||
|
||||
_, err := apiClient.ContainerCreate(
|
||||
context.Background(),
|
||||
_, err := cli.ContainerCreate(
|
||||
ctx,
|
||||
&container.Config{Image: "arm32v7/hello-world"},
|
||||
&container.HostConfig{},
|
||||
nil,
|
||||
|
@ -532,9 +535,8 @@ func TestCreatePlatformSpecificImageNoPlatform(t *testing.T) {
|
|||
func TestCreateInvalidHostConfig(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
t.Cleanup(setupTest(t))
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := []struct {
|
||||
doc string
|
||||
|
@ -572,6 +574,7 @@ func TestCreateInvalidHostConfig(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
cfg := container.Config{
|
||||
Image: "busybox",
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
realcontainer "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -37,14 +38,14 @@ func TestContainerStartOnDaemonRestart(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRootless)
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t, "--iptables=false")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cID := container.Create(ctx, t, c)
|
||||
defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
|
||||
|
||||
|
@ -91,12 +92,13 @@ func TestDaemonRestartIpcMode(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t, "--iptables=false", "--default-ipc-mode=private")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false", "--default-ipc-mode=private")
|
||||
defer d.Stop(t)
|
||||
|
||||
c := d.NewClientT(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// check the container is created with private ipc mode as per daemon default
|
||||
cID := container.Run(ctx, t, c,
|
||||
|
@ -137,12 +139,13 @@ func TestDaemonHostGatewayIP(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRootless, "rootless mode has different view of network")
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
// Verify the IP in /etc/hosts is same as host-gateway-ip
|
||||
d := daemon.New(t)
|
||||
// Verify the IP in /etc/hosts is same as the default bridge's IP
|
||||
d.StartWithBusybox(t, "--iptables=false")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false")
|
||||
c := d.NewClientT(t)
|
||||
ctx := context.Background()
|
||||
cID := container.Run(ctx, t, c,
|
||||
container.WithExtraHost("host.docker.internal:host-gateway"),
|
||||
)
|
||||
|
@ -157,7 +160,7 @@ func TestDaemonHostGatewayIP(t *testing.T) {
|
|||
d.Stop(t)
|
||||
|
||||
// Verify the IP in /etc/hosts is same as host-gateway-ip
|
||||
d.StartWithBusybox(t, "--iptables=false", "--host-gateway-ip=6.7.8.9")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false", "--host-gateway-ip=6.7.8.9")
|
||||
cID = container.Run(ctx, t, c,
|
||||
container.WithExtraHost("host.docker.internal:host-gateway"),
|
||||
)
|
||||
|
@ -187,13 +190,14 @@ func TestRestartDaemonWithRestartingContainer(t *testing.T) {
|
|||
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := daemon.New(t)
|
||||
defer d.Cleanup(t)
|
||||
|
||||
d.StartWithBusybox(t, "--iptables=false")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := d.NewClientT(t)
|
||||
|
||||
// Just create the container, no need to start it to be started.
|
||||
|
@ -232,13 +236,14 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
|
|||
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := daemon.New(t)
|
||||
defer d.Cleanup(t)
|
||||
|
||||
d.StartWithBusybox(t, "--iptables=false")
|
||||
d.StartWithBusybox(ctx, t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
ctx := context.Background()
|
||||
apiClient := d.NewClientT(t)
|
||||
|
||||
// Just create the containers, no need to start them.
|
||||
|
@ -259,6 +264,7 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
|
|||
d.Start(t, "--iptables=false")
|
||||
|
||||
t.Run("RestartPolicy=none", func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
inspect, err := apiClient.ContainerInspect(ctx, noPolicy)
|
||||
|
@ -272,6 +278,7 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("RestartPolicy=on-failure", func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
inspect, err := apiClient.ContainerInspect(ctx, onFailure)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue