Browse Source

Merge pull request #45652 from cpuguy83/otel

Add otel tracing
Bjorn Neergaard 1 year ago
parent
commit
ce4e325504
100 changed files with 1988 additions and 1385 deletions
  1. 9 0
      .github/actions/setup-runner/action.yml
  2. 12 0
      .github/actions/setup-tracing/action.yml
  3. 6 0
      .github/workflows/test.yml
  4. 4 1
      Makefile
  5. 5 26
      api/server/router/grpc/grpc.go
  6. 4 2
      api/server/server.go
  7. 23 10
      builder/builder-next/controller.go
  8. 48 16
      client/client.go
  9. 13 1
      client/client_mock_test.go
  10. 3 3
      client/client_test.go
  11. 30 1
      client/hijack.go
  12. 10 0
      client/options.go
  13. 41 0
      cmd/dockerd/daemon.go
  14. 16 3
      daemon/daemon.go
  15. 6 2
      daemon/id.go
  16. 2 0
      hack/make/.integration-test-helpers
  17. 3 2
      integration-cli/benchmark_test.go
  18. 223 143
      integration-cli/check_test.go
  19. 10 7
      integration-cli/daemon/daemon.go
  20. 80 68
      integration-cli/daemon/daemon_swarm.go
  21. 7 5
      integration-cli/docker_api_attach_test.go
  22. 60 35
      integration-cli/docker_api_build_test.go
  23. 2 1
      integration-cli/docker_api_build_windows_test.go
  24. 89 86
      integration-cli/docker_api_containers_test.go
  25. 2 2
      integration-cli/docker_api_containers_windows_test.go
  26. 4 3
      integration-cli/docker_api_exec_resize_test.go
  27. 31 24
      integration-cli/docker_api_exec_test.go
  28. 13 11
      integration-cli/docker_api_images_test.go
  29. 2 2
      integration-cli/docker_api_inspect_test.go
  30. 8 8
      integration-cli/docker_api_logs_test.go
  31. 8 7
      integration-cli/docker_api_network_test.go
  32. 9 9
      integration-cli/docker_api_stats_test.go
  33. 45 37
      integration-cli/docker_api_swarm_node_test.go
  34. 142 129
      integration-cli/docker_api_swarm_service_test.go
  35. 187 161
      integration-cli/docker_api_swarm_test.go
  36. 11 9
      integration-cli/docker_api_test.go
  37. 3 2
      integration-cli/docker_cli_attach_test.go
  38. 3 2
      integration-cli/docker_cli_build_test.go
  39. 3 2
      integration-cli/docker_cli_commit_test.go
  40. 3 2
      integration-cli/docker_cli_cp_test.go
  41. 3 2
      integration-cli/docker_cli_create_test.go
  42. 72 71
      integration-cli/docker_cli_daemon_test.go
  43. 4 3
      integration-cli/docker_cli_events_test.go
  44. 7 5
      integration-cli/docker_cli_exec_test.go
  45. 25 14
      integration-cli/docker_cli_external_volume_driver_test.go
  46. 3 2
      integration-cli/docker_cli_health_test.go
  47. 3 2
      integration-cli/docker_cli_history_test.go
  48. 3 2
      integration-cli/docker_cli_images_test.go
  49. 3 2
      integration-cli/docker_cli_import_test.go
  50. 3 2
      integration-cli/docker_cli_info_test.go
  51. 2 2
      integration-cli/docker_cli_info_unix_test.go
  52. 3 2
      integration-cli/docker_cli_inspect_test.go
  53. 3 2
      integration-cli/docker_cli_links_test.go
  54. 3 2
      integration-cli/docker_cli_login_test.go
  55. 3 1
      integration-cli/docker_cli_logout_test.go
  56. 56 20
      integration-cli/docker_cli_logs_test.go
  57. 3 2
      integration-cli/docker_cli_netmode_test.go
  58. 3 2
      integration-cli/docker_cli_network_test.go
  59. 23 12
      integration-cli/docker_cli_network_unix_test.go
  60. 4 3
      integration-cli/docker_cli_plugins_logdriver_test.go
  61. 7 6
      integration-cli/docker_cli_plugins_test.go
  62. 26 47
      integration-cli/docker_cli_port_test.go
  63. 3 2
      integration-cli/docker_cli_proxy_test.go
  64. 12 7
      integration-cli/docker_cli_prune_unix_test.go
  65. 3 2
      integration-cli/docker_cli_ps_test.go
  66. 3 2
      integration-cli/docker_cli_pull_test.go
  67. 3 2
      integration-cli/docker_cli_push_test.go
  68. 3 1
      integration-cli/docker_cli_registry_user_agent_test.go
  69. 3 2
      integration-cli/docker_cli_restart_test.go
  70. 3 2
      integration-cli/docker_cli_rmi_test.go
  71. 54 20
      integration-cli/docker_cli_run_test.go
  72. 27 23
      integration-cli/docker_cli_run_unix_test.go
  73. 3 2
      integration-cli/docker_cli_save_load_test.go
  74. 2 1
      integration-cli/docker_cli_save_load_unix_test.go
  75. 3 2
      integration-cli/docker_cli_search_test.go
  76. 33 23
      integration-cli/docker_cli_service_create_test.go
  77. 12 9
      integration-cli/docker_cli_service_health_test.go
  78. 28 18
      integration-cli/docker_cli_service_logs_test.go
  79. 3 1
      integration-cli/docker_cli_service_scale_test.go
  80. 3 2
      integration-cli/docker_cli_sni_test.go
  81. 3 2
      integration-cli/docker_cli_start_test.go
  82. 3 2
      integration-cli/docker_cli_stats_test.go
  83. 179 120
      integration-cli/docker_cli_swarm_test.go
  84. 11 8
      integration-cli/docker_cli_swarm_unix_test.go
  85. 3 2
      integration-cli/docker_cli_top_test.go
  86. 6 5
      integration-cli/docker_cli_update_unix_test.go
  87. 3 1
      integration-cli/docker_cli_userns_test.go
  88. 4 3
      integration-cli/docker_cli_volume_test.go
  89. 14 13
      integration-cli/docker_deprecated_api_v124_test.go
  90. 2 1
      integration-cli/docker_deprecated_api_v124_unix_test.go
  91. 6 5
      integration-cli/docker_hub_pull_suite_test.go
  92. 56 29
      integration-cli/docker_utils_test.go
  93. 3 2
      integration-cli/environment/environment.go
  94. 9 8
      integration-cli/fixtures_linux_daemon_test.go
  95. 2 2
      integration-cli/requirements_test.go
  96. 11 14
      integration-cli/requirements_unix_test.go
  97. 4 0
      integration-cli/requirements_windows_test.go
  98. 9 5
      integration/build/build_cgroupns_linux_test.go
  99. 13 11
      integration/build/build_session_test.go
  100. 4 3
      integration/build/build_squash_test.go

+ 9 - 0
.github/actions/setup-runner/action.yml

@@ -25,3 +25,12 @@ runs:
     - run: |
         docker info
       shell: bash
+    # TODO: Remove this step once the separate action is merged
+    # Github doesn't let you add a composite action and use it in the same PR (apparently)
+    # Ref: https://github.com/moby/moby/actions/runs/5581571995/jobs/10199909170?pr=45652#step:9:1
+    - run: |
+        set -e
+        docker run -d --net=host --name jaeger -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.46
+        docker0_ip="$(ip -f inet addr show docker0 | grep -Po 'inet \K[\d.]+')"
+        echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://${docker0_ip}:4318" >> "${GITHUB_ENV}"
+      shell: bash

+ 12 - 0
.github/actions/setup-tracing/action.yml

@@ -0,0 +1,12 @@
+name: 'Setup Tracing'
+description: 'Composite action to set up the tracing for test jobs'
+
+runs:
+  using: composite
+  steps:
+    - run: |
+        set -e
+        docker run -d --net=host --name jaeger -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.46
+        docker0_ip="$(ip -f inet addr show docker0 | grep -Po 'inet \K[\d.]+')"
+        echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://${docker0_ip}:4318" >> "${GITHUB_ENV}"
+      shell: bash

+ 6 - 0
.github/workflows/test.yml

@@ -226,6 +226,8 @@ jobs:
           tar -xzf /tmp/reports.tar.gz -C /tmp/reports
           sudo chown -R $(id -u):$(id -g) /tmp/reports
           tree -nh /tmp/reports
+
+          curl -sSLf localhost:16686/api/traces?service=integration-test-client > /tmp/reports/jaeger-trace.json
       -
         name: Test daemon logs
         if: always()
@@ -333,6 +335,8 @@ jobs:
           tar -xzf /tmp/reports.tar.gz -C $reportsPath
           sudo chown -R $(id -u):$(id -g) $reportsPath
           tree -nh $reportsPath
+
+          curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
       -
         name: Send to Codecov
         uses: codecov/codecov-action@v3
@@ -462,6 +466,8 @@ jobs:
           tar -xzf /tmp/reports.tar.gz -C $reportsPath
           sudo chown -R $(id -u):$(id -g) $reportsPath
           tree -nh $reportsPath
+
+          curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
       -
         name: Send to Codecov
         uses: codecov/codecov-action@v3

+ 4 - 1
Makefile

@@ -75,7 +75,10 @@ DOCKER_ENVS := \
 	-e PLATFORM \
 	-e DEFAULT_PRODUCT_LICENSE \
 	-e PRODUCT \
-	-e PACKAGER_NAME
+	-e PACKAGER_NAME \
+	-e OTEL_EXPORTER_OTLP_ENDPOINT \
+	-e OTEL_EXPORTER_OTLP_PROTOCOL \
+	-e OTEL_SERVICE_NAME
 # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
 
 # to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`

+ 5 - 26
api/server/router/grpc/grpc.go

@@ -4,49 +4,28 @@ import (
 	"context"
 	"strings"
 
-	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/router"
 	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	"github.com/moby/buildkit/util/grpcerrors"
-	"github.com/moby/buildkit/util/tracing/detect"
 	"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-	"go.opentelemetry.io/otel/propagation"
-	"go.opentelemetry.io/otel/trace"
 	"golang.org/x/net/http2"
 	"google.golang.org/grpc"
 )
 
-func init() {
-	// enable in memory recording for grpc traces
-	detect.Recorder = detect.NewTraceRecorder()
-}
-
 type grpcRouter struct {
 	routes     []router.Route
 	grpcServer *grpc.Server
 	h2Server   *http2.Server
 }
 
-var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
-
 // NewRouter initializes a new grpc http router
 func NewRouter(backends ...Backend) router.Router {
-	tp, err := detect.TracerProvider()
-	if err != nil {
-		log.G(context.TODO()).WithError(err).Error("failed to detect trace provider")
-	}
-
-	opts := []grpc.ServerOption{grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor)}
-	if tp != nil {
-		streamTracer := otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(tp), otelgrpc.WithPropagators(propagators))
-		unary := grpc_middleware.ChainUnaryServer(unaryInterceptor(tp), grpcerrors.UnaryServerInterceptor)
-		stream := grpc_middleware.ChainStreamServer(streamTracer, grpcerrors.StreamServerInterceptor)
-		opts = []grpc.ServerOption{grpc.UnaryInterceptor(unary), grpc.StreamInterceptor(stream)}
-	}
+	unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
+	stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor))
 
 	r := &grpcRouter{
 		h2Server:   &http2.Server{},
-		grpcServer: grpc.NewServer(opts...),
+		grpcServer: grpc.NewServer(unary, stream),
 	}
 	for _, b := range backends {
 		b.RegisterGRPC(r.grpcServer)
@@ -66,8 +45,8 @@ func (gr *grpcRouter) initRoutes() {
 	}
 }
 
-func unaryInterceptor(tp trace.TracerProvider) grpc.UnaryServerInterceptor {
-	withTrace := otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(tp), otelgrpc.WithPropagators(propagators))
+func unaryInterceptor() grpc.UnaryServerInterceptor {
+	withTrace := otelgrpc.UnaryServerInterceptor()
 
 	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
 		// This method is used by the clients to send their traces to buildkit so they can be included

+ 4 - 2
api/server/server.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/api/server/router/debug"
 	"github.com/docker/docker/dockerversion"
 	"github.com/gorilla/mux"
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
 )
 
 // versionMatcher defines a variable matcher to be parsed by the router
@@ -30,7 +31,7 @@ func (s *Server) UseMiddleware(m middleware.Middleware) {
 }
 
 func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
-	return func(w http.ResponseWriter, r *http.Request) {
+	return otelhttp.NewHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		// Define the context that we'll pass around to share info
 		// like the docker-request-id.
 		//
@@ -42,6 +43,7 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
 		// use intermediate variable to prevent "should not use basic type
 		// string as key in context.WithValue" golint errors
 		ctx := context.WithValue(r.Context(), dockerversion.UAStringKey{}, r.Header.Get("User-Agent"))
+
 		r = r.WithContext(ctx)
 		handlerFunc := s.handlerWithGlobalMiddlewares(handler)
 
@@ -57,7 +59,7 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
 			}
 			makeErrorHandler(err)(w, r)
 		}
-	}
+	}), "").ServeHTTP
 }
 
 type pageNotFoundError struct{}

+ 23 - 10
builder/builder-next/controller.go

@@ -9,6 +9,7 @@ import (
 
 	ctd "github.com/containerd/containerd"
 	"github.com/containerd/containerd/content/local"
+	"github.com/containerd/containerd/log"
 	ctdmetadata "github.com/containerd/containerd/metadata"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/docker/docker/api/types"
@@ -43,12 +44,14 @@ import (
 	"github.com/moby/buildkit/util/entitlements"
 	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/network/netproviders"
+	"github.com/moby/buildkit/util/tracing/detect"
 	"github.com/moby/buildkit/worker"
 	"github.com/moby/buildkit/worker/containerd"
 	"github.com/moby/buildkit/worker/label"
 	"github.com/pkg/errors"
 	"go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
+	"go.opentelemetry.io/otel/sdk/trace"
 
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
@@ -61,6 +64,14 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
 	return newGraphDriverController(ctx, rt, opt)
 }
 
+func getTraceExporter(ctx context.Context) trace.SpanExporter {
+	exp, err := detect.Exporter()
+	if err != nil {
+		log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
+	}
+	return exp
+}
+
 func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 	if err := os.MkdirAll(opt.Root, 0o711); err != nil {
 		return nil, err
@@ -136,11 +147,12 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
 			"local":    localremotecache.ResolveCacheExporterFunc(opt.SessionManager),
 			"registry": registryremotecache.ResolveCacheExporterFunc(opt.SessionManager, opt.RegistryHosts),
 		},
-		Entitlements:  getEntitlements(opt.BuilderConfig),
-		HistoryDB:     historyDB,
-		HistoryConfig: historyConf,
-		LeaseManager:  wo.LeaseManager,
-		ContentStore:  wo.ContentStore,
+		Entitlements:   getEntitlements(opt.BuilderConfig),
+		HistoryDB:      historyDB,
+		HistoryConfig:  historyConf,
+		LeaseManager:   wo.LeaseManager,
+		ContentStore:   wo.ContentStore,
+		TraceCollector: getTraceExporter(ctx),
 	})
 }
 
@@ -354,11 +366,12 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
 		ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
 			"inline": inlineremotecache.ResolveCacheExporterFunc(),
 		},
-		Entitlements:  getEntitlements(opt.BuilderConfig),
-		LeaseManager:  lm,
-		ContentStore:  store,
-		HistoryDB:     historyDB,
-		HistoryConfig: historyConf,
+		Entitlements:   getEntitlements(opt.BuilderConfig),
+		LeaseManager:   lm,
+		ContentStore:   store,
+		HistoryDB:      historyDB,
+		HistoryConfig:  historyConf,
+		TraceCollector: getTraceExporter(ctx),
 	})
 }
 

+ 48 - 16
client/client.go

@@ -56,6 +56,8 @@ import (
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/go-connections/sockets"
 	"github.com/pkg/errors"
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+	"go.opentelemetry.io/otel/trace"
 )
 
 // DummyHost is a hostname used for local communication.
@@ -123,6 +125,12 @@ type Client struct {
 
 	// negotiated indicates that API version negotiation took place
 	negotiated bool
+
+	tp trace.TracerProvider
+
+	// When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
+	// Store the original transport as the http.Client transport will be wrapped with tracing libs.
+	baseTransport *http.Transport
 }
 
 // ErrRedirect is the error returned by checkRedirect when the request is non-GET.
@@ -188,6 +196,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
 		}
 	}
 
+	if tr, ok := c.client.Transport.(*http.Transport); ok {
+		// Store the base transport before we wrap it in tracing libs below
+		// This is used, as an example, to close idle connections when the client is closed
+		c.baseTransport = tr
+	}
+
 	if c.scheme == "" {
 		// TODO(stevvooe): This isn't really the right way to write clients in Go.
 		// `NewClient` should probably only take an `*http.Client` and work from there.
@@ -201,9 +215,24 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
 		}
 	}
 
+	c.client.Transport = otelhttp.NewTransport(
+		c.client.Transport,
+		otelhttp.WithTracerProvider(c.tp),
+		otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string {
+			return req.Method + " " + req.URL.Path
+		}),
+	)
+
 	return c, nil
 }
 
+func (cli *Client) tlsConfig() *tls.Config {
+	if cli.baseTransport == nil {
+		return nil
+	}
+	return cli.baseTransport.TLSClientConfig
+}
+
 func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
 	transport := &http.Transport{}
 	err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
@@ -216,20 +245,11 @@ func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
 	}, nil
 }
 
-// tlsConfig returns the TLS configuration from the client's transport.
-// It returns nil if the transport is not a [http.Transport], or if no
-// TLSClientConfig is set.
-func (cli *Client) tlsConfig() *tls.Config {
-	if tr, ok := cli.client.Transport.(*http.Transport); ok {
-		return tr.TLSClientConfig
-	}
-	return nil
-}
-
 // Close the transport used by the client
 func (cli *Client) Close() error {
-	if t, ok := cli.client.Transport.(*http.Transport); ok {
-		t.CloseIdleConnections()
+	if cli.baseTransport != nil {
+		cli.baseTransport.CloseIdleConnections()
+		return nil
 	}
 	return nil
 }
@@ -356,6 +376,20 @@ func ParseHostURL(host string) (*url.URL, error) {
 	}, nil
 }
 
+func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) {
+	if cli.baseTransport == nil || cli.baseTransport.DialContext == nil {
+		return nil
+	}
+
+	if cli.baseTransport.TLSClientConfig != nil {
+		// When using a tls config we don't use the configured dialer but instead a fallback dialer...
+		// Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn
+		// I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit.
+		return nil
+	}
+	return cli.baseTransport.DialContext
+}
+
 // Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
 // that can be used for proxying the daemon connection. It is used by
 // ["docker dial-stdio"].
@@ -363,10 +397,8 @@ func ParseHostURL(host string) (*url.URL, error) {
 // ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
 	return func(ctx context.Context) (net.Conn, error) {
-		if transport, ok := cli.client.Transport.(*http.Transport); ok {
-			if transport.DialContext != nil && transport.TLSClientConfig == nil {
-				return transport.DialContext(ctx, cli.proto, cli.addr)
-			}
+		if dialFn := cli.dialerFromTransport(); dialFn != nil {
+			return dialFn(ctx, cli.proto, cli.addr)
 		}
 		switch cli.proto {
 		case "unix":

+ 13 - 1
client/client_mock_test.go

@@ -17,9 +17,21 @@ func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
 	return tf(req)
 }
 
+func transportEnsureBody(f transportFunc) transportFunc {
+	return func(req *http.Request) (*http.Response, error) {
+		resp, err := f(req)
+		if resp != nil && resp.Body == nil {
+			resp.Body = http.NoBody
+		}
+		return resp, err
+	}
+}
+
 func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {
 	return &http.Client{
-		Transport: transportFunc(doer),
+		// Some tests return a response with a nil body, this is incorrect semantically and causes a panic with wrapper transports (such as otelhttp's)
+		// Wrap the doer to ensure a body is always present even if it is empty.
+		Transport: transportEnsureBody(transportFunc(doer)),
 	}
 }
 

+ 3 - 3
client/client_test.go

@@ -101,9 +101,9 @@ func TestNewClientWithOpsFromEnv(t *testing.T) {
 
 			if tc.envs["DOCKER_TLS_VERIFY"] != "" {
 				// pedantic checking that this is handled correctly
-				tr := client.client.Transport.(*http.Transport)
-				assert.Assert(t, tr.TLSClientConfig != nil)
-				assert.Check(t, is.Equal(tr.TLSClientConfig.InsecureSkipVerify, false))
+				tlsConfig := client.tlsConfig()
+				assert.Assert(t, tlsConfig != nil)
+				assert.Check(t, is.Equal(tlsConfig.InsecureSkipVerify, false))
 			}
 		})
 	}

+ 30 - 1
client/hijack.go

@@ -13,6 +13,11 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/versions"
 	"github.com/pkg/errors"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/propagation"
+	semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
+	"go.opentelemetry.io/otel/trace"
 )
 
 // postHijacked sends a POST request and hijacks the connection.
@@ -45,11 +50,32 @@ func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[s
 	return conn, err
 }
 
-func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, string, error) {
+func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) {
 	ctx := req.Context()
 	req.Header.Set("Connection", "Upgrade")
 	req.Header.Set("Upgrade", proto)
 
+	// We aren't using the configured RoundTripper here so manually inject the trace context
+	tp := cli.tp
+	if tp == nil {
+		if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
+			tp = span.TracerProvider()
+		} else {
+			tp = otel.GetTracerProvider()
+		}
+	}
+
+	ctx, span := tp.Tracer("").Start(ctx, req.Method+" "+req.URL.Path)
+	span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(req)...)
+	defer func() {
+		if retErr != nil {
+			span.RecordError(retErr)
+			span.SetStatus(codes.Error, retErr.Error())
+		}
+		span.End()
+	}()
+	otel.GetTextMapPropagator().Inject(ctx, propagation.HeaderCarrier(req.Header))
+
 	dialer := cli.Dialer()
 	conn, err := dialer(ctx)
 	if err != nil {
@@ -71,6 +97,9 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, s
 
 	// Server hijacks the connection, error 'connection closed' expected
 	resp, err := clientconn.Do(req)
+	if resp != nil {
+		span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(resp.StatusCode))
+	}
 
 	//nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons
 	if err != httputil.ErrPersistEOF {

+ 10 - 0
client/options.go

@@ -11,6 +11,7 @@ import (
 	"github.com/docker/go-connections/sockets"
 	"github.com/docker/go-connections/tlsconfig"
 	"github.com/pkg/errors"
+	"go.opentelemetry.io/otel/trace"
 )
 
 // Opt is a configuration option to initialize a [Client].
@@ -221,3 +222,12 @@ func WithAPIVersionNegotiation() Opt {
 		return nil
 	}
 }
+
+// WithTraceProvider sets the trace provider for the client.
+// If this is not set then the global trace provider will be used.
+func WithTraceProvider(provider trace.TracerProvider) Opt {
+	return func(c *Client) error {
+		c.tp = provider
+		return nil
+	}
+}

+ 41 - 0
cmd/dockerd/daemon.go

@@ -17,6 +17,7 @@ import (
 	"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
 	containerddefaults "github.com/containerd/containerd/defaults"
 	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/tracing"
 	"github.com/docker/docker/api"
 	apiserver "github.com/docker/docker/api/server"
 	buildbackend "github.com/docker/docker/api/server/backend/build"
@@ -56,10 +57,14 @@ import (
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/go-connections/tlsconfig"
 	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/util/bklog"
+	"github.com/moby/buildkit/util/tracing/detect"
 	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/spf13/pflag"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/propagation"
 )
 
 // DaemonCli represents the daemon CLI.
@@ -227,6 +232,24 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 	// Notify that the API is active, but before daemon is set up.
 	preNotifyReady()
 
+	const otelServiceNameEnv = "OTEL_SERVICE_NAME"
+	if _, ok := os.LookupEnv(otelServiceNameEnv); !ok {
+		os.Setenv(otelServiceNameEnv, filepath.Base(os.Args[0]))
+	}
+
+	setOTLPProtoDefault()
+	otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
+	detect.Recorder = detect.NewTraceRecorder()
+
+	tp, err := detect.TracerProvider()
+	if err != nil {
+		log.G(ctx).WithError(err).Warn("Failed to initialize tracing, skipping")
+	} else {
+		otel.SetTracerProvider(tp)
+		log.G(ctx).Logger.AddHook(tracing.NewLogrusHook())
+		bklog.G(ctx).Logger.AddHook(tracing.NewLogrusHook())
+	}
+
 	pluginStore := plugin.NewStore()
 
 	var apiServer apiserver.Server
@@ -279,6 +302,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 	if err != nil {
 		return err
 	}
+
 	routerOptions.cluster = c
 
 	httpServer.Handler = apiServer.CreateMux(routerOptions.Build()...)
@@ -330,10 +354,27 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		return errors.Wrap(err, "shutting down due to ServeAPI error")
 	}
 
+	detect.Shutdown(context.Background())
+
 	log.G(ctx).Info("Daemon shutdown complete")
 	return nil
 }
 
+// The buildkit "detect" package uses grpc as the default proto, which is in conformance with the old spec.
+// For a little while now http/protobuf is the default spec, so this function sets the protocol to http/protobuf when the env var is unset
+// so that the detect package will use http/protobuf as a default.
+// TODO: This can be removed after buildkit is updated to use http/protobuf as the default.
+func setOTLPProtoDefault() {
+	const (
+		tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
+		protoEnv  = "OTEL_EXPORTER_OTLP_PROTOCOL"
+	)
+
+	if os.Getenv(tracesEnv) == "" && os.Getenv(protoEnv) == "" {
+		os.Setenv(tracesEnv, "http/protobuf")
+	}
+}
+
 type routerOptions struct {
 	sessionManager *session.Manager
 	buildBackend   *buildbackend.Backend

+ 16 - 3
daemon/daemon.go

@@ -71,6 +71,7 @@ import (
 	"github.com/moby/locker"
 	"github.com/pkg/errors"
 	"go.etcd.io/bbolt"
+	"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
 	"golang.org/x/sync/semaphore"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/backoff"
@@ -935,10 +936,17 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		// TODO(stevvooe): We may need to allow configuration of this on the client.
 		grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
 		grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
+		grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),
+		grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),
 	}
 
 	if cfgStore.ContainerdAddr != "" {
-		d.containerdClient, err = containerd.New(cfgStore.ContainerdAddr, containerd.WithDefaultNamespace(cfgStore.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
+		d.containerdClient, err = containerd.New(
+			cfgStore.ContainerdAddr,
+			containerd.WithDefaultNamespace(cfgStore.ContainerdNamespace),
+			containerd.WithDialOpts(gopts),
+			containerd.WithTimeout(60*time.Second),
+		)
 		if err != nil {
 			return nil, errors.Wrapf(err, "failed to dial %q", cfgStore.ContainerdAddr)
 		}
@@ -948,7 +956,12 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		var pluginCli *containerd.Client
 
 		if cfgStore.ContainerdAddr != "" {
-			pluginCli, err = containerd.New(cfgStore.ContainerdAddr, containerd.WithDefaultNamespace(cfgStore.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
+			pluginCli, err = containerd.New(
+				cfgStore.ContainerdAddr,
+				containerd.WithDefaultNamespace(cfgStore.ContainerdPluginNamespace),
+				containerd.WithDialOpts(gopts),
+				containerd.WithTimeout(60*time.Second),
+			)
 			if err != nil {
 				return nil, errors.Wrapf(err, "failed to dial %q", cfgStore.ContainerdAddr)
 			}
@@ -1005,7 +1018,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		return nil, errors.New("Devices cgroup isn't mounted")
 	}
 
-	d.id, err = loadOrCreateID(filepath.Join(cfgStore.Root, "engine-id"))
+	d.id, err = LoadOrCreateID(cfgStore.Root)
 	if err != nil {
 		return nil, err
 	}

+ 6 - 2
daemon/id.go

@@ -2,21 +2,25 @@ package daemon // import "github.com/docker/docker/daemon"
 
 import (
 	"os"
+	"path/filepath"
 
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/google/uuid"
 	"github.com/pkg/errors"
 )
 
-// loadOrCreateID loads the engine's ID from idPath, or generates a new ID
+const idFilename = "engine-id"
+
+// LoadOrCreateID loads the engine's ID from the given root, or generates a new ID
 // if it doesn't exist. It returns the ID, and any error that occurred when
 // saving the file.
 //
 // Note that this function expects the daemon's root directory to already have
 // been created with the right permissions and ownership (usually this would
 // be done by daemon.CreateDaemonRoot().
-func loadOrCreateID(idPath string) (string, error) {
+func LoadOrCreateID(root string) (string, error) {
 	var id string
+	idPath := filepath.Join(root, idFilename)
 	idb, err := os.ReadFile(idPath)
 	if os.IsNotExist(err) {
 		id = uuid.New().String()

+ 2 - 0
hack/make/.integration-test-helpers

@@ -197,6 +197,8 @@ test_env() {
 			TEMP="$TEMP" \
 			TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \
 			TEST_INTEGRATION_USE_SNAPSHOTTER="$TEST_INTEGRATION_USE_SNAPSHOTTER" \
+			OTEL_EXPORTER_OTLP_ENDPOINT="$OTEL_EXPORTER_OTLP_ENDPOINT" \
+			OTEL_SERVICE_NAME="$OTEL_SERVICE_NAME" \
 			"$@"
 	)
 }

+ 3 - 2
integration-cli/benchmark_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"runtime"
@@ -16,8 +17,8 @@ type DockerBenchmarkSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerBenchmarkSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerBenchmarkSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerBenchmarkSuite) OnTimeout(c *testing.T) {

+ 223 - 143
integration-cli/check_test.go

@@ -18,11 +18,15 @@ import (
 	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/integration-cli/environment"
 	"github.com/docker/docker/internal/test/suite"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	ienv "github.com/docker/docker/testutil/environment"
 	"github.com/docker/docker/testutil/fakestorage"
 	"github.com/docker/docker/testutil/fixtures/plugin"
 	"github.com/docker/docker/testutil/registry"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
 	"gotest.tools/v3/assert"
 )
 
@@ -38,37 +42,64 @@ const (
 )
 
 var (
-	testEnv *environment.Execution
+	testEnvOnce sync.Once
+	testEnv     *environment.Execution
 
 	// the docker client binary to use
 	dockerBinary = ""
 
-	testEnvOnce sync.Once
+	baseContext context.Context
 )
 
-func init() {
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	os.Exit(testRun(m))
+}
+
+func testRun(m *testing.M) (ret int) {
+	// Global set up
+
 	var err error
 
-	testEnv, err = environment.New()
+	shutdown := testutil.ConfigureTracing()
+	ctx, span := otel.Tracer("").Start(context.Background(), "integration-cli/TestMain")
+	defer func() {
+		if err != nil {
+			span.SetStatus(codes.Error, err.Error())
+			ret = 255
+		} else {
+			if ret != 0 {
+				span.SetAttributes(attribute.Int("exitCode", ret))
+				span.SetStatus(codes.Error, "m.Run() exited with non-zero code")
+			}
+		}
+		span.End()
+		shutdown(ctx)
+	}()
+
+	baseContext = ctx
+
+	testEnv, err = environment.New(ctx)
 	if err != nil {
-		panic(err)
+		return
 	}
-}
 
-func TestMain(m *testing.M) {
-	flag.Parse()
+	if testEnv.IsLocalDaemon() {
+		setupLocalInfo()
+	}
 
-	// Global set up
 	dockerBinary = testEnv.DockerBinary()
-	err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
+
+	err = ienv.EnsureFrozenImagesLinux(ctx, &testEnv.Execution)
 	if err != nil {
-		fmt.Println(err)
-		os.Exit(1)
+		return
 	}
 
 	testEnv.Print()
 	printCliVersion()
-	os.Exit(m.Run())
+
+	return m.Run()
 }
 
 func printCliVersion() {
@@ -84,262 +115,311 @@ func printCliVersion() {
 	fmt.Println(cmd.Stdout())
 }
 
-func ensureTestEnvSetup(t *testing.T) {
+func ensureTestEnvSetup(ctx context.Context, t *testing.T) {
 	testEnvOnce.Do(func() {
 		cli.SetTestEnvironment(testEnv)
 		fakestorage.SetTestEnvironment(&testEnv.Execution)
-		ienv.ProtectAll(t, &testEnv.Execution)
+		ienv.ProtectAll(ctx, t, &testEnv.Execution)
 	})
 }
 
 func TestDockerAPISuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerAPISuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerAPISuite{ds: &DockerSuite{}})
 }
 
 func TestDockerBenchmarkSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerBenchmarkSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerBenchmarkSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIAttachSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIAttachSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIAttachSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIBuildSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIBuildSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIBuildSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLICommitSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLICommitSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLICommitSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLICpSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLICpSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLICpSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLICreateSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLICreateSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLICreateSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIEventSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIEventSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIEventSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIExecSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIExecSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIExecSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIHealthSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIHealthSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIHealthSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIHistorySuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIHistorySuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIHistorySuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIImagesSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIImagesSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIImagesSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIImportSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIImportSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIImportSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIInfoSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIInfoSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIInfoSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIInspectSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIInspectSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIInspectSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLILinksSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLILinksSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLILinksSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLILoginSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLILoginSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLILoginSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLILogsSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLILogsSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLILogsSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLINetmodeSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLINetmodeSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLINetmodeSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLINetworkSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLINetworkSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLINetworkSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPluginLogDriverSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPluginLogDriverSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPluginLogDriverSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPluginsSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPluginsSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPluginsSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPortSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPortSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPortSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIProxySuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIProxySuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIProxySuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPruneSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPruneSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPruneSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPsSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPsSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPsSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPullSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPullSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPullSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIPushSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIPushSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIPushSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIRestartSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIRestartSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIRestartSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIRmiSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIRmiSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIRmiSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIRunSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIRunSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIRunSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLISaveLoadSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLISaveLoadSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLISaveLoadSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLISearchSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLISearchSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLISearchSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLISNISuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLISNISuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLISNISuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIStartSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIStartSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIStartSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIStatsSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIStatsSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIStatsSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLITopSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLITopSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLITopSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIUpdateSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIUpdateSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIUpdateSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerCLIVolumeSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerCLIVolumeSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerCLIVolumeSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerRegistrySuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerRegistrySuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerRegistrySuite{ds: &DockerSuite{}})
 }
 
 func TestDockerSchema1RegistrySuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerSchema1RegistrySuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerSchema1RegistrySuite{ds: &DockerSuite{}})
 }
 
 func TestDockerRegistryAuthHtpasswdSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerRegistryAuthHtpasswdSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerRegistryAuthHtpasswdSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerRegistryAuthTokenSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerRegistryAuthTokenSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerRegistryAuthTokenSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerDaemonSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerDaemonSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerDaemonSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerSwarmSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerSwarmSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerSwarmSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerPluginSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
-	suite.Run(t, &DockerPluginSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerPluginSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerExternalVolumeSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
 	testRequires(t, DaemonIsLinux)
-	suite.Run(t, &DockerExternalVolumeSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerExternalVolumeSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerNetworkSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
 	testRequires(t, DaemonIsLinux)
-	suite.Run(t, &DockerNetworkSuite{ds: &DockerSuite{}})
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
+	suite.Run(ctx, t, &DockerNetworkSuite{ds: &DockerSuite{}})
 }
 
 func TestDockerHubPullSuite(t *testing.T) {
-	ensureTestEnvSetup(t)
+	ctx := testutil.StartSpan(baseContext, t)
+	ensureTestEnvSetup(ctx, t)
 	// FIXME. Temporarily turning this off for Windows as GH16039 was breaking
 	// Windows to Linux CI @icecrime
 	testRequires(t, DaemonIsLinux)
-	suite.Run(t, newDockerHubPullSuite())
+	suite.Run(ctx, t, newDockerHubPullSuite())
 }
 
 type DockerSuite struct{}
@@ -365,8 +445,8 @@ func (s *DockerSuite) OnTimeout(c *testing.T) {
 	}
 }
 
-func (s *DockerSuite) TearDownTest(c *testing.T) {
-	testEnv.Clean(c)
+func (s *DockerSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	testEnv.Clean(ctx, c)
 }
 
 type DockerRegistrySuite struct {
@@ -379,21 +459,21 @@ func (s *DockerRegistrySuite) OnTimeout(c *testing.T) {
 	s.d.DumpStackAndQuit()
 }
 
-func (s *DockerRegistrySuite) SetUpTest(c *testing.T) {
+func (s *DockerRegistrySuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
 	s.reg = registry.NewV2(c)
 	s.reg.WaitReady(c)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerRegistrySuite) TearDownTest(c *testing.T) {
+func (s *DockerRegistrySuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.reg != nil {
 		s.reg.Close()
 	}
 	if s.d != nil {
 		s.d.Stop(c)
 	}
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 type DockerSchema1RegistrySuite struct {
@@ -406,21 +486,21 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *testing.T) {
 	s.d.DumpStackAndQuit()
 }
 
-func (s *DockerSchema1RegistrySuite) SetUpTest(c *testing.T) {
+func (s *DockerSchema1RegistrySuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
 	s.reg = registry.NewV2(c, registry.Schema1)
 	s.reg.WaitReady(c)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerSchema1RegistrySuite) TearDownTest(c *testing.T) {
+func (s *DockerSchema1RegistrySuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.reg != nil {
 		s.reg.Close()
 	}
 	if s.d != nil {
 		s.d.Stop(c)
 	}
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 type DockerRegistryAuthHtpasswdSuite struct {
@@ -433,14 +513,14 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *testing.T) {
 	s.d.DumpStackAndQuit()
 }
 
-func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *testing.T) {
+func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
 	s.reg = registry.NewV2(c, registry.Htpasswd)
 	s.reg.WaitReady(c)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *testing.T) {
+func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.reg != nil {
 		out, err := s.d.Cmd("logout", privateRegistryURL)
 		assert.NilError(c, err, out)
@@ -449,7 +529,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *testing.T) {
 	if s.d != nil {
 		s.d.Stop(c)
 	}
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 type DockerRegistryAuthTokenSuite struct {
@@ -462,12 +542,12 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *testing.T) {
 	s.d.DumpStackAndQuit()
 }
 
-func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *testing.T) {
+func (s *DockerRegistryAuthTokenSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *testing.T) {
+func (s *DockerRegistryAuthTokenSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.reg != nil {
 		out, err := s.d.Cmd("logout", privateRegistryURL)
 		assert.NilError(c, err, out)
@@ -476,7 +556,7 @@ func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *testing.T) {
 	if s.d != nil {
 		s.d.Stop(c)
 	}
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *testing.T, tokenURL string) {
@@ -496,20 +576,20 @@ func (s *DockerDaemonSuite) OnTimeout(c *testing.T) {
 	s.d.DumpStackAndQuit()
 }
 
-func (s *DockerDaemonSuite) SetUpTest(c *testing.T) {
+func (s *DockerDaemonSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerDaemonSuite) TearDownTest(c *testing.T) {
+func (s *DockerDaemonSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
 	if s.d != nil {
 		s.d.Stop(c)
 	}
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
-func (s *DockerDaemonSuite) TearDownSuite(c *testing.T) {
+func (s *DockerDaemonSuite) TearDownSuite(ctx context.Context, c *testing.T) {
 	filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
 		if err != nil {
 			// ignore errors here
@@ -542,11 +622,11 @@ func (s *DockerSwarmSuite) OnTimeout(c *testing.T) {
 	}
 }
 
-func (s *DockerSwarmSuite) SetUpTest(c *testing.T) {
+func (s *DockerSwarmSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
 }
 
-func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *daemon.Daemon {
+func (s *DockerSwarmSuite) AddDaemon(ctx context.Context, c *testing.T, joinSwarm, manager bool) *daemon.Daemon {
 	c.Helper()
 	d := daemon.New(c, dockerBinary, dockerdBinary,
 		testdaemon.WithEnvironment(testEnv.Execution),
@@ -554,12 +634,12 @@ func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *dae
 	)
 	if joinSwarm {
 		if len(s.daemons) > 0 {
-			d.StartAndSwarmJoin(c, s.daemons[0].Daemon, manager)
+			d.StartAndSwarmJoin(ctx, c, s.daemons[0].Daemon, manager)
 		} else {
-			d.StartAndSwarmInit(c)
+			d.StartAndSwarmInit(ctx, c)
 		}
 	} else {
-		d.StartNodeWithBusybox(c)
+		d.StartNodeWithBusybox(ctx, c)
 	}
 
 	s.daemonsLock.Lock()
@@ -570,7 +650,7 @@ func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *dae
 	return d
 }
 
-func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
+func (s *DockerSwarmSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux)
 	s.daemonsLock.Lock()
 	for _, d := range s.daemons {
@@ -582,7 +662,7 @@ func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
 	s.daemons = nil
 	s.portIndex = 0
 	s.daemonsLock.Unlock()
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 type DockerPluginSuite struct {
@@ -602,26 +682,26 @@ func (ps *DockerPluginSuite) getPluginRepoWithTag() string {
 	return ps.getPluginRepo() + ":" + "latest"
 }
 
-func (ps *DockerPluginSuite) SetUpSuite(c *testing.T) {
+func (ps *DockerPluginSuite) SetUpSuite(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, RegistryHosting)
 	ps.registry = registry.NewV2(c)
 	ps.registry.WaitReady(c)
 
-	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
 	defer cancel()
 
 	err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil)
 	assert.NilError(c, err, "failed to create plugin")
 }
 
-func (ps *DockerPluginSuite) TearDownSuite(c *testing.T) {
+func (ps *DockerPluginSuite) TearDownSuite(ctx context.Context, c *testing.T) {
 	if ps.registry != nil {
 		ps.registry.Close()
 	}
 }
 
-func (ps *DockerPluginSuite) TearDownTest(c *testing.T) {
-	ps.ds.TearDownTest(c)
+func (ps *DockerPluginSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	ps.ds.TearDownTest(ctx, c)
 }
 
 func (ps *DockerPluginSuite) OnTimeout(c *testing.T) {

+ 10 - 7
integration-cli/daemon/daemon.go

@@ -1,6 +1,7 @@
 package daemon // import "github.com/docker/docker/integration-cli/daemon"
 
 import (
+	"context"
 	"fmt"
 	"strings"
 	"testing"
@@ -79,14 +80,16 @@ func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
 
 // CheckActiveContainerCount returns the number of active containers
 // FIXME(vdemeester) should re-use ActivateContainers in some way
-func (d *Daemon) CheckActiveContainerCount(t *testing.T) (interface{}, string) {
-	t.Helper()
-	out, err := d.Cmd("ps", "-q")
-	assert.NilError(t, err)
-	if len(strings.TrimSpace(out)) == 0 {
-		return 0, ""
+func (d *Daemon) CheckActiveContainerCount(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		t.Helper()
+		out, err := d.Cmd("ps", "-q")
+		assert.NilError(t, err)
+		if len(strings.TrimSpace(out)) == 0 {
+			return 0, ""
+		}
+		return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
 	}
-	return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
 }
 
 // WaitRun waits for a container to be running for 10s

+ 80 - 68
integration-cli/daemon/daemon_swarm.go

@@ -15,9 +15,9 @@ import (
 
 // CheckServiceTasksInState returns the number of tasks with a matching state,
 // and optional message substring.
-func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*testing.T) (interface{}, string) {
+func (d *Daemon) CheckServiceTasksInState(ctx context.Context, service string, state swarm.TaskState, message string) func(*testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
-		tasks := d.GetServiceTasks(c, service)
+		tasks := d.GetServiceTasks(ctx, c, service)
 		var count int
 		for _, task := range tasks {
 			if task.Status.State == state {
@@ -32,9 +32,9 @@ func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState,
 
 // CheckServiceTasksInStateWithError returns the number of tasks with a matching state,
 // and optional message substring.
-func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*testing.T) (interface{}, string) {
+func (d *Daemon) CheckServiceTasksInStateWithError(ctx context.Context, service string, state swarm.TaskState, errorMessage string) func(*testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
-		tasks := d.GetServiceTasks(c, service)
+		tasks := d.GetServiceTasks(ctx, c, service)
 		var count int
 		for _, task := range tasks {
 			if task.Status.State == state {
@@ -48,14 +48,14 @@ func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.T
 }
 
 // CheckServiceRunningTasks returns the number of running tasks for the specified service
-func (d *Daemon) CheckServiceRunningTasks(service string) func(*testing.T) (interface{}, string) {
-	return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "")
+func (d *Daemon) CheckServiceRunningTasks(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
+	return d.CheckServiceTasksInState(ctx, service, swarm.TaskStateRunning, "")
 }
 
 // CheckServiceUpdateState returns the current update state for the specified service
-func (d *Daemon) CheckServiceUpdateState(service string) func(*testing.T) (interface{}, string) {
+func (d *Daemon) CheckServiceUpdateState(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
-		service := d.GetService(c, service)
+		service := d.GetService(ctx, c, service)
 		if service.UpdateStatus == nil {
 			return "", ""
 		}
@@ -64,10 +64,10 @@ func (d *Daemon) CheckServiceUpdateState(service string) func(*testing.T) (inter
 }
 
 // CheckPluginRunning returns the runtime state of the plugin
-func (d *Daemon) CheckPluginRunning(plugin string) func(c *testing.T) (interface{}, string) {
+func (d *Daemon) CheckPluginRunning(ctx context.Context, plugin string) func(c *testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
 		apiclient := d.NewClientT(c)
-		resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
+		resp, _, err := apiclient.PluginInspectWithRaw(ctx, plugin)
 		if errdefs.IsNotFound(err) {
 			return false, fmt.Sprintf("%v", err)
 		}
@@ -77,10 +77,10 @@ func (d *Daemon) CheckPluginRunning(plugin string) func(c *testing.T) (interface
 }
 
 // CheckPluginImage returns the runtime state of the plugin
-func (d *Daemon) CheckPluginImage(plugin string) func(c *testing.T) (interface{}, string) {
+func (d *Daemon) CheckPluginImage(ctx context.Context, plugin string) func(c *testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
 		apiclient := d.NewClientT(c)
-		resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
+		resp, _, err := apiclient.PluginInspectWithRaw(ctx, plugin)
 		if errdefs.IsNotFound(err) {
 			return false, fmt.Sprintf("%v", err)
 		}
@@ -90,94 +90,106 @@ func (d *Daemon) CheckPluginImage(plugin string) func(c *testing.T) (interface{}
 }
 
 // CheckServiceTasks returns the number of tasks for the specified service
-func (d *Daemon) CheckServiceTasks(service string) func(*testing.T) (interface{}, string) {
+func (d *Daemon) CheckServiceTasks(ctx context.Context, service string) func(*testing.T) (interface{}, string) {
 	return func(c *testing.T) (interface{}, string) {
-		tasks := d.GetServiceTasks(c, service)
+		tasks := d.GetServiceTasks(ctx, c, service)
 		return len(tasks), ""
 	}
 }
 
 // CheckRunningTaskNetworks returns the number of times each network is referenced from a task.
-func (d *Daemon) CheckRunningTaskNetworks(c *testing.T) (interface{}, string) {
-	cli := d.NewClientT(c)
-	defer cli.Close()
-
-	tasks, err := cli.TaskList(context.Background(), types.TaskListOptions{
-		Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
-	})
-	assert.NilError(c, err)
-
-	result := make(map[string]int)
-	for _, task := range tasks {
-		for _, network := range task.Spec.Networks {
-			result[network.Target]++
+func (d *Daemon) CheckRunningTaskNetworks(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		cli := d.NewClientT(t)
+		defer cli.Close()
+
+		tasks, err := cli.TaskList(ctx, types.TaskListOptions{
+			Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
+		})
+		assert.NilError(t, err)
+
+		result := make(map[string]int)
+		for _, task := range tasks {
+			for _, network := range task.Spec.Networks {
+				result[network.Target]++
+			}
 		}
+		return result, ""
 	}
-	return result, ""
 }
 
 // CheckRunningTaskImages returns the times each image is running as a task.
-func (d *Daemon) CheckRunningTaskImages(c *testing.T) (interface{}, string) {
-	cli := d.NewClientT(c)
-	defer cli.Close()
-
-	tasks, err := cli.TaskList(context.Background(), types.TaskListOptions{
-		Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
-	})
-	assert.NilError(c, err)
-
-	result := make(map[string]int)
-	for _, task := range tasks {
-		if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil {
-			result[task.Spec.ContainerSpec.Image]++
+func (d *Daemon) CheckRunningTaskImages(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		cli := d.NewClientT(t)
+		defer cli.Close()
+
+		tasks, err := cli.TaskList(ctx, types.TaskListOptions{
+			Filters: filters.NewArgs(filters.Arg("desired-state", "running")),
+		})
+		assert.NilError(t, err)
+
+		result := make(map[string]int)
+		for _, task := range tasks {
+			if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil {
+				result[task.Spec.ContainerSpec.Image]++
+			}
 		}
+		return result, ""
 	}
-	return result, ""
 }
 
 // CheckNodeReadyCount returns the number of ready node on the swarm
-func (d *Daemon) CheckNodeReadyCount(c *testing.T) (interface{}, string) {
-	nodes := d.ListNodes(c)
-	var readyCount int
-	for _, node := range nodes {
-		if node.Status.State == swarm.NodeStateReady {
-			readyCount++
+func (d *Daemon) CheckNodeReadyCount(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		nodes := d.ListNodes(ctx, t)
+		var readyCount int
+		for _, node := range nodes {
+			if node.Status.State == swarm.NodeStateReady {
+				readyCount++
+			}
 		}
+		return readyCount, ""
 	}
-	return readyCount, ""
 }
 
 // CheckLocalNodeState returns the current swarm node state
-func (d *Daemon) CheckLocalNodeState(c *testing.T) (interface{}, string) {
-	info := d.SwarmInfo(c)
-	return info.LocalNodeState, ""
+func (d *Daemon) CheckLocalNodeState(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		info := d.SwarmInfo(ctx, t)
+		return info.LocalNodeState, ""
+	}
 }
 
 // CheckControlAvailable returns the current swarm control available
-func (d *Daemon) CheckControlAvailable(c *testing.T) (interface{}, string) {
-	info := d.SwarmInfo(c)
-	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
-	return info.ControlAvailable, ""
+func (d *Daemon) CheckControlAvailable(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		info := d.SwarmInfo(ctx, t)
+		assert.Equal(t, info.LocalNodeState, swarm.LocalNodeStateActive)
+		return info.ControlAvailable, ""
+	}
 }
 
 // CheckLeader returns whether there is a leader on the swarm or not
-func (d *Daemon) CheckLeader(c *testing.T) (interface{}, string) {
-	cli := d.NewClientT(c)
-	defer cli.Close()
+func (d *Daemon) CheckLeader(ctx context.Context) func(t *testing.T) (interface{}, string) {
+	return func(t *testing.T) (interface{}, string) {
+		cli := d.NewClientT(t)
+		defer cli.Close()
 
-	errList := "could not get node list"
+		errList := "could not get node list"
 
-	ls, err := cli.NodeList(context.Background(), types.NodeListOptions{})
-	if err != nil {
-		return err, errList
-	}
+		ls, err := cli.NodeList(ctx, types.NodeListOptions{})
+		if err != nil {
+			return err, errList
+		}
 
-	for _, node := range ls {
-		if node.ManagerStatus != nil && node.ManagerStatus.Leader {
-			return nil, ""
+		for _, node := range ls {
+			if node.ManagerStatus != nil && node.ManagerStatus.Leader {
+				return nil, ""
+			}
 		}
+		return fmt.Errorf("no leader"), "could not find leader"
 	}
-	return fmt.Errorf("no leader"), "could not find leader"
 }
 
 // CmdRetryOutOfSequence tries the specified command against the current daemon

+ 7 - 5
integration-cli/docker_api_attach_test.go

@@ -3,7 +3,6 @@ package main
 import (
 	"bufio"
 	"bytes"
-	"context"
 	"io"
 	"net"
 	"net/http"
@@ -14,6 +13,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/pkg/stdcopy"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"github.com/docker/go-connections/sockets"
 	"github.com/pkg/errors"
@@ -75,7 +75,8 @@ func (s *DockerAPISuite) TestGetContainersAttachWebsocket(c *testing.T) {
 
 // regression gh14320
 func (s *DockerAPISuite) TestPostContainersAttachContainerNotFound(c *testing.T) {
-	resp, _, err := request.Post("/containers/doesnotexist/attach")
+	ctx := testutil.GetContext(c)
+	resp, _, err := request.Post(ctx, "/containers/doesnotexist/attach")
 	assert.NilError(c, err)
 	// connection will shutdown, err should be "persistent connection closed"
 	assert.Equal(c, resp.StatusCode, http.StatusNotFound)
@@ -86,7 +87,8 @@ func (s *DockerAPISuite) TestPostContainersAttachContainerNotFound(c *testing.T)
 }
 
 func (s *DockerAPISuite) TestGetContainersWsAttachContainerNotFound(c *testing.T) {
-	res, body, err := request.Get("/containers/doesnotexist/attach/ws")
+	ctx := testutil.GetContext(c)
+	res, body, err := request.Get(ctx, "/containers/doesnotexist/attach/ws")
 	assert.Equal(c, res.StatusCode, http.StatusNotFound)
 	assert.NilError(c, err)
 	b, err := request.ReadBody(body)
@@ -190,7 +192,7 @@ func (s *DockerAPISuite) TestPostContainersAttach(c *testing.T) {
 		Logs:   false,
 	}
 
-	resp, err := apiClient.ContainerAttach(context.Background(), cid, attachOpts)
+	resp, err := apiClient.ContainerAttach(testutil.GetContext(c), cid, attachOpts)
 	assert.NilError(c, err)
 	mediaType, b := resp.MediaType()
 	assert.Check(c, b)
@@ -199,7 +201,7 @@ func (s *DockerAPISuite) TestPostContainersAttach(c *testing.T) {
 
 	// Make sure we do see "hello" if Logs is true
 	attachOpts.Logs = true
-	resp, err = apiClient.ContainerAttach(context.Background(), cid, attachOpts)
+	resp, err = apiClient.ContainerAttach(testutil.GetContext(c), cid, attachOpts)
 	assert.NilError(c, err)
 
 	defer resp.Conn.Close()

+ 60 - 35
integration-cli/docker_api_build_test.go

@@ -3,7 +3,6 @@ package main
 import (
 	"archive/tar"
 	"bytes"
-	"context"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -13,6 +12,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/fakecontext"
 	"github.com/docker/docker/testutil/fakegit"
 	"github.com/docker/docker/testutil/fakestorage"
@@ -23,6 +23,7 @@ import (
 
 func (s *DockerAPISuite) TestBuildAPIDockerFileRemote(c *testing.T) {
 	testRequires(c, NotUserNamespace)
+	ctx := testutil.GetContext(c)
 
 	// -xdev is required because sysfs can cause EPERM
 	testD := `FROM busybox
@@ -31,7 +32,7 @@ RUN find /tmp/`
 	server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD}))
 	defer server.Close()
 
-	res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON)
+	res, body, err := request.Post(ctx, "/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -46,6 +47,8 @@ RUN find /tmp/`
 }
 
 func (s *DockerAPISuite) TestBuildAPIRemoteTarballContext(c *testing.T) {
+	ctx := testutil.GetContext(c)
+
 	buffer := new(bytes.Buffer)
 	tw := tar.NewWriter(buffer)
 	defer tw.Close()
@@ -66,7 +69,7 @@ func (s *DockerAPISuite) TestBuildAPIRemoteTarballContext(c *testing.T) {
 	}))
 	defer server.Close()
 
-	res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar"))
+	res, b, err := request.Post(ctx, "/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar"))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 	b.Close()
@@ -113,8 +116,9 @@ RUN echo 'right'
 	}))
 	defer server.Close()
 
+	ctx := testutil.GetContext(c)
 	url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar"
-	res, body, err := request.Post(url, request.ContentType("application/tar"))
+	res, body, err := request.Post(ctx, url, request.ContentType("application/tar"))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -133,7 +137,8 @@ RUN echo from dockerfile`,
 	}, false)
 	defer git.Close()
 
-	res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON)
+	ctx := testutil.GetContext(c)
+	res, body, err := request.Post(ctx, "/build?remote="+git.RepoURL, request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -153,8 +158,9 @@ RUN echo from Dockerfile`,
 	}, false)
 	defer git.Close()
 
+	ctx := testutil.GetContext(c)
 	// Make sure it tries to 'dockerfile' query param value
-	res, body, err := request.Post("/build?dockerfile=baz&remote="+git.RepoURL, request.JSON)
+	res, body, err := request.Post(ctx, "/build?dockerfile=baz&remote="+git.RepoURL, request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -175,8 +181,10 @@ RUN echo from dockerfile`,
 	}, false)
 	defer git.Close()
 
+	ctx := testutil.GetContext(c)
+
 	// Make sure it tries to 'dockerfile' query param value
-	res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON)
+	res, body, err := request.Post(ctx, "/build?remote="+git.RepoURL, request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -218,7 +226,9 @@ func (s *DockerAPISuite) TestBuildAPIUnnormalizedTarPaths(c *testing.T) {
 
 		assert.NilError(c, tw.Close(), "failed to close tar archive")
 
-		res, body, err := request.Post("/build", request.RawContent(io.NopCloser(buffer)), request.ContentType("application/x-tar"))
+		ctx := testutil.GetContext(c)
+
+		res, body, err := request.Post(ctx, "/build", request.RawContent(io.NopCloser(buffer)), request.ContentType("application/x-tar"))
 		assert.NilError(c, err)
 		assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -248,15 +258,17 @@ func (s *DockerAPISuite) TestBuildOnBuildWithCopy(c *testing.T) {
 
 		FROM onbuildbase
 	`
-	ctx := fakecontext.New(c, "",
+	bCtx := fakecontext.New(c, "",
 		fakecontext.WithDockerfile(dockerfile),
 		fakecontext.WithFile("file", "some content"),
 	)
-	defer ctx.Close()
+	defer bCtx.Close()
 
+	ctx := testutil.GetContext(c)
 	res, body, err := request.Post(
+		ctx,
 		"/build",
-		request.RawContent(ctx.AsTarReader(c)),
+		request.RawContent(bCtx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
@@ -268,14 +280,16 @@ func (s *DockerAPISuite) TestBuildOnBuildWithCopy(c *testing.T) {
 
 func (s *DockerAPISuite) TestBuildOnBuildCache(c *testing.T) {
 	build := func(dockerfile string) []byte {
-		ctx := fakecontext.New(c, "",
+		bCtx := fakecontext.New(c, "",
 			fakecontext.WithDockerfile(dockerfile),
 		)
-		defer ctx.Close()
+		defer bCtx.Close()
 
+		ctx := testutil.GetContext(c)
 		res, body, err := request.Post(
+			ctx,
 			"/build",
-			request.RawContent(ctx.AsTarReader(c)),
+			request.RawContent(bCtx.AsTarReader(c)),
 			request.ContentType("application/x-tar"))
 		assert.NilError(c, err)
 		assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
@@ -301,11 +315,12 @@ func (s *DockerAPISuite) TestBuildOnBuildCache(c *testing.T) {
 	parentID, childID := imageIDs[0], imageIDs[1]
 
 	client := testEnv.APIClient()
+	ctx := testutil.GetContext(c)
 
 	// check parentID is correct
 	// Parent is graphdriver-only
 	if !testEnv.UsingSnapshotter() {
-		image, _, err := client.ImageInspectWithRaw(context.Background(), childID)
+		image, _, err := client.ImageInspectWithRaw(ctx, childID)
 		assert.NilError(c, err)
 
 		assert.Check(c, is.Equal(parentID, image.Parent))
@@ -317,10 +332,11 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *testing.T) {
 
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	// tag the image to upload it to the private registry
-	err := client.ImageTag(context.TODO(), "busybox", repoName)
+	ctx := testutil.GetContext(c)
+	err := client.ImageTag(ctx, "busybox", repoName)
 	assert.Check(c, err)
 	// push the image to the registry
-	rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"})
+	rc, err := client.ImagePush(ctx, repoName, types.ImagePushOptions{RegistryAuth: "{}"})
 	assert.Check(c, err)
 	_, err = io.Copy(io.Discard, rc)
 	assert.Check(c, err)
@@ -332,14 +348,15 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *testing.T) {
 		COPY --from=foo /abc /
 		`, repoName, repoName)
 
-	ctx := fakecontext.New(c, "",
+	bCtx := fakecontext.New(c, "",
 		fakecontext.WithDockerfile(dockerfile),
 	)
-	defer ctx.Close()
+	defer bCtx.Close()
 
 	res, body, err := request.Post(
+		ctx,
 		"/build?pull=1",
-		request.RawContent(ctx.AsTarReader(c)),
+		request.RawContent(bCtx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
@@ -376,14 +393,16 @@ func (s *DockerAPISuite) TestBuildAddRemoteNoDecompress(c *testing.T) {
 		RUN [ -f test.tar ]
 		`, server.URL())
 
-	ctx := fakecontext.New(c, "",
+	bCtx := fakecontext.New(c, "",
 		fakecontext.WithDockerfile(dockerfile),
 	)
-	defer ctx.Close()
+	defer bCtx.Close()
 
+	ctx := testutil.GetContext(c)
 	res, body, err := request.Post(
+		ctx,
 		"/build",
-		request.RawContent(ctx.AsTarReader(c)),
+		request.RawContent(bCtx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode))
@@ -405,15 +424,17 @@ func (s *DockerAPISuite) TestBuildChownOnCopy(c *testing.T) {
 		RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'test1:test2' ]
 		RUN [ $(ls -nl / | grep new_dir | awk '{print $3":"$4}') = '1001:1002' ]
 	`
-	ctx := fakecontext.New(c, "",
+	bCtx := fakecontext.New(c, "",
 		fakecontext.WithDockerfile(dockerfile),
 		fakecontext.WithFile("test_file1", "some test content"),
 	)
-	defer ctx.Close()
+	defer bCtx.Close()
 
+	ctx := testutil.GetContext(c)
 	res, body, err := request.Post(
+		ctx,
 		"/build",
-		request.RawContent(ctx.AsTarReader(c)),
+		request.RawContent(bCtx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
@@ -434,9 +455,10 @@ COPY file /file`
 		fakecontext.WithDockerfile(dockerfile),
 		fakecontext.WithFile("file", "bar"))
 
-	build := func(ctx *fakecontext.Fake) string {
-		res, body, err := request.Post("/build",
-			request.RawContent(ctx.AsTarReader(c)),
+	ctx := testutil.GetContext(c)
+	build := func(bCtx *fakecontext.Fake) string {
+		res, body, err := request.Post(ctx, "/build",
+			request.RawContent(bCtx.AsTarReader(c)),
 			request.ContentType("application/x-tar"))
 
 		assert.NilError(c, err)
@@ -474,9 +496,10 @@ ADD file /file`
 		fakecontext.WithDockerfile(dockerfile),
 		fakecontext.WithFile("file", "bar"))
 
-	build := func(ctx *fakecontext.Fake) string {
-		res, body, err := request.Post("/build",
-			request.RawContent(ctx.AsTarReader(c)),
+	ctx := testutil.GetContext(c)
+	build := func(bCtx *fakecontext.Fake) string {
+		res, body, err := request.Post(ctx, "/build",
+			request.RawContent(bCtx.AsTarReader(c)),
 			request.ContentType("application/x-tar"))
 
 		assert.NilError(c, err)
@@ -508,14 +531,16 @@ func (s *DockerAPISuite) TestBuildScratchCopy(c *testing.T) {
 	dockerfile := `FROM scratch
 ADD Dockerfile /
 ENV foo bar`
-	ctx := fakecontext.New(c, "",
+	bCtx := fakecontext.New(c, "",
 		fakecontext.WithDockerfile(dockerfile),
 	)
-	defer ctx.Close()
+	defer bCtx.Close()
 
+	ctx := testutil.GetContext(c)
 	res, body, err := request.Post(
+		ctx,
 		"/build",
-		request.RawContent(ctx.AsTarReader(c)),
+		request.RawContent(bCtx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)

+ 2 - 1
integration-cli/docker_api_build_windows_test.go

@@ -6,6 +6,7 @@ import (
 	"net/http"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/fakecontext"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
@@ -24,7 +25,7 @@ func (s *DockerAPISuite) TestBuildWithRecycleBin(c *testing.T) {
 	ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
 	defer ctx.Close()
 
-	res, body, err := request.Post(
+	res, body, err := request.Post(testutil.GetContext(c),
 		"/build",
 		request.RawContent(ctx.AsTarReader(c)),
 		request.ContentType("application/x-tar"))

+ 89 - 86
integration-cli/docker_api_containers_test.go

@@ -27,6 +27,7 @@ import (
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"github.com/docker/docker/volume"
 	"github.com/docker/go-connections/nat"
@@ -47,7 +48,8 @@ func (s *DockerAPISuite) TestContainerAPIGetAll(c *testing.T) {
 	options := types.ContainerListOptions{
 		All: true,
 	}
-	containers, err := apiClient.ContainerList(context.Background(), options)
+	ctx := testutil.GetContext(c)
+	containers, err := apiClient.ContainerList(ctx, options)
 	assert.NilError(c, err)
 	assert.Equal(c, len(containers), startCount+1)
 	actual := containers[0].Names[0]
@@ -66,7 +68,8 @@ func (s *DockerAPISuite) TestContainerAPIGetJSONNoFieldsOmitted(c *testing.T) {
 	options := types.ContainerListOptions{
 		All: true,
 	}
-	containers, err := apiClient.ContainerList(context.Background(), options)
+	ctx := testutil.GetContext(c)
+	containers, err := apiClient.ContainerList(ctx, options)
 	assert.NilError(c, err)
 	assert.Equal(c, len(containers), startCount+1)
 	actual := fmt.Sprintf("%+v", containers[0])
@@ -105,7 +108,7 @@ func (s *DockerAPISuite) TestContainerAPIGetExport(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	body, err := apiClient.ContainerExport(context.Background(), name)
+	body, err := apiClient.ContainerExport(testutil.GetContext(c), name)
 	assert.NilError(c, err)
 	defer body.Close()
 	found := false
@@ -132,7 +135,7 @@ func (s *DockerAPISuite) TestContainerAPIGetChanges(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	changes, err := apiClient.ContainerDiff(context.Background(), name)
+	changes, err := apiClient.ContainerDiff(testutil.GetContext(c), name)
 	assert.NilError(c, err)
 
 	// Check the changelog for removal of /etc/passwd
@@ -160,7 +163,7 @@ func (s *DockerAPISuite) TestGetContainerStats(c *testing.T) {
 		assert.NilError(c, err)
 		defer apiClient.Close()
 
-		stats, err := apiClient.ContainerStats(context.Background(), name, true)
+		stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, true)
 		assert.NilError(c, err)
 		bc <- b{stats, err}
 	}()
@@ -194,7 +197,7 @@ func (s *DockerAPISuite) TestGetContainerStatsRmRunning(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	stats, err := apiClient.ContainerStats(context.Background(), id, true)
+	stats, err := apiClient.ContainerStats(testutil.GetContext(c), id, true)
 	assert.NilError(c, err)
 	defer stats.Body.Close()
 
@@ -265,7 +268,7 @@ func (s *DockerAPISuite) TestGetContainerStatsStream(c *testing.T) {
 		assert.NilError(c, err)
 		defer apiClient.Close()
 
-		stats, err := apiClient.ContainerStats(context.Background(), name, true)
+		stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, true)
 		assert.NilError(c, err)
 		bc <- b{stats, err}
 	}()
@@ -307,7 +310,7 @@ func (s *DockerAPISuite) TestGetContainerStatsNoStream(c *testing.T) {
 		assert.NilError(c, err)
 		defer apiClient.Close()
 
-		stats, err := apiClient.ContainerStats(context.Background(), name, false)
+		stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
 		assert.NilError(c, err)
 		bc <- b{stats, err}
 	}()
@@ -344,7 +347,7 @@ func (s *DockerAPISuite) TestGetStoppedContainerStats(c *testing.T) {
 		assert.NilError(c, err)
 		defer apiClient.Close()
 
-		resp, err := apiClient.ContainerStats(context.Background(), name, false)
+		resp, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
 		assert.NilError(c, err)
 		defer resp.Body.Close()
 		chResp <- err
@@ -373,7 +376,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerPause(context.Background(), ContainerID)
+	err = apiClient.ContainerPause(testutil.GetContext(c), ContainerID)
 	assert.NilError(c, err)
 
 	pausedContainers := getPaused(c)
@@ -382,7 +385,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
 		c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
 	}
 
-	err = apiClient.ContainerUnpause(context.Background(), ContainerID)
+	err = apiClient.ContainerUnpause(testutil.GetContext(c), ContainerID)
 	assert.NilError(c, err)
 
 	pausedContainers = getPaused(c)
@@ -400,7 +403,7 @@ func (s *DockerAPISuite) TestContainerAPITop(c *testing.T) {
 	defer apiClient.Close()
 
 	// sort by comm[andline] to make sure order stays the same in case of PID rollover
-	top, err := apiClient.ContainerTop(context.Background(), id, []string{"aux", "--sort=comm"})
+	top, err := apiClient.ContainerTop(testutil.GetContext(c), id, []string{"aux", "--sort=comm"})
 	assert.NilError(c, err)
 	assert.Equal(c, len(top.Titles), 11, fmt.Sprintf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles))
 
@@ -422,7 +425,7 @@ func (s *DockerAPISuite) TestContainerAPITopWindows(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	top, err := apiClient.ContainerTop(context.Background(), id, nil)
+	top, err := apiClient.ContainerTop(testutil.GetContext(c), id, nil)
 	assert.NilError(c, err)
 	assert.Equal(c, len(top.Titles), 4, "expected 4 titles, found %d: %v", len(top.Titles), top.Titles)
 
@@ -455,7 +458,7 @@ func (s *DockerAPISuite) TestContainerAPICommit(c *testing.T) {
 		Reference: "testcontainerapicommit:testtag",
 	}
 
-	img, err := apiClient.ContainerCommit(context.Background(), cName, options)
+	img, err := apiClient.ContainerCommit(testutil.GetContext(c), cName, options)
 	assert.NilError(c, err)
 
 	cmd := inspectField(c, img.ID, "Config.Cmd")
@@ -482,7 +485,7 @@ func (s *DockerAPISuite) TestContainerAPICommitWithLabelInConfig(c *testing.T) {
 		Config:    &config,
 	}
 
-	img, err := apiClient.ContainerCommit(context.Background(), cName, options)
+	img, err := apiClient.ContainerCommit(testutil.GetContext(c), cName, options)
 	assert.NilError(c, err)
 
 	label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1")
@@ -522,7 +525,7 @@ func (s *DockerAPISuite) TestContainerAPIBadPort(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
 	assert.ErrorContains(c, err, `invalid port specification: "aa80"`)
 }
 
@@ -536,7 +539,7 @@ func (s *DockerAPISuite) TestContainerAPICreate(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
 	out, _ := dockerCmd(c, "start", "-a", ctr.ID)
@@ -548,7 +551,7 @@ func (s *DockerAPISuite) TestContainerAPICreateEmptyConfig(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &container.Config{}, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &container.Config{}, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 
 	assert.ErrorContains(c, err, "no command specified")
 }
@@ -571,7 +574,7 @@ func (s *DockerAPISuite) TestContainerAPICreateMultipleNetworksConfig(c *testing
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &networkingConfig, nil, "")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &networkingConfig, nil, "")
 	msg := err.Error()
 	// network name order in error message is not deterministic
 	assert.Assert(c, strings.Contains(msg, "container cannot be connected to network endpoints"))
@@ -606,10 +609,10 @@ func UtilCreateNetworkMode(c *testing.T, networkMode container.NetworkMode) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	assert.Equal(c, containerJSON.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
@@ -633,10 +636,10 @@ func (s *DockerAPISuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T)
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares")
@@ -654,7 +657,7 @@ func (s *DockerAPISuite) TestContainerAPIVerifyHeader(c *testing.T) {
 	create := func(ct string) (*http.Response, io.ReadCloser, error) {
 		jsonData := bytes.NewBuffer(nil)
 		assert.Assert(c, json.NewEncoder(jsonData).Encode(config) == nil)
-		return request.Post("/containers/create", request.RawContent(io.NopCloser(jsonData)), request.ContentType(ct))
+		return request.Post(testutil.GetContext(c), "/containers/create", request.RawContent(io.NopCloser(jsonData)), request.ContentType(ct))
 	}
 
 	// Try with no content-type
@@ -700,7 +703,7 @@ func (s *DockerAPISuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
 				  }
 				}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusBadRequest)
@@ -724,7 +727,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *testi
 		}
 	}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusBadRequest)
@@ -748,7 +751,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyRetryMismatch(c *testing.T
 		}
 	}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusBadRequest)
@@ -772,7 +775,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *test
 		}
 	}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusBadRequest)
@@ -796,7 +799,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *testi
 		}
 	}`
 
-	res, _, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, _, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 }
@@ -827,7 +830,7 @@ func (s *DockerAPISuite) TestContainerAPIPostCreateNull(c *testing.T) {
 		"NetworkDisabled":false,
 		"OnBuild":null}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 
@@ -858,7 +861,7 @@ func (s *DockerAPISuite) TestCreateWithTooLowMemoryLimit(c *testing.T) {
 		"Memory":    524287
 	}`
 
-	res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	b, err2 := request.ReadBody(body)
 	assert.Assert(c, err2 == nil)
@@ -881,7 +884,7 @@ func (s *DockerAPISuite) TestContainerAPIRename(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRename(context.Background(), containerID, newName)
+	err = apiClient.ContainerRename(testutil.GetContext(c), containerID, newName)
 	assert.NilError(c, err)
 
 	name := inspectField(c, containerID, "Name")
@@ -896,7 +899,7 @@ func (s *DockerAPISuite) TestContainerAPIKill(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerKill(context.Background(), name, "SIGKILL")
+	err = apiClient.ContainerKill(testutil.GetContext(c), name, "SIGKILL")
 	assert.NilError(c, err)
 
 	state := inspectField(c, name, "State.Running")
@@ -911,7 +914,7 @@ func (s *DockerAPISuite) TestContainerAPIRestart(c *testing.T) {
 	defer apiClient.Close()
 
 	timeout := 1
-	err = apiClient.ContainerRestart(context.Background(), name, container.StopOptions{Timeout: &timeout})
+	err = apiClient.ContainerRestart(testutil.GetContext(c), name, container.StopOptions{Timeout: &timeout})
 	assert.NilError(c, err)
 
 	assert.Assert(c, waitInspect(name, "{{ .State.Restarting  }} {{ .State.Running  }}", "false true", 15*time.Second) == nil)
@@ -927,7 +930,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartNotimeoutParam(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRestart(context.Background(), name, container.StopOptions{})
+	err = apiClient.ContainerRestart(testutil.GetContext(c), name, container.StopOptions{})
 	assert.NilError(c, err)
 
 	assert.Assert(c, waitInspect(name, "{{ .State.Restarting  }} {{ .State.Running  }}", "false true", 15*time.Second) == nil)
@@ -945,15 +948,15 @@ func (s *DockerAPISuite) TestContainerAPIStart(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
 	assert.NilError(c, err)
 
-	err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
+	err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
 	assert.NilError(c, err)
 
 	// second call to start should give 304
 	// maybe add ContainerStartWithRaw to test it
-	err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
+	err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
 	assert.NilError(c, err)
 
 	// TODO(tibor): figure out why this doesn't work on windows
@@ -968,7 +971,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerStop(context.Background(), name, container.StopOptions{
+	err = apiClient.ContainerStop(testutil.GetContext(c), name, container.StopOptions{
 		Timeout: &timeout,
 	})
 	assert.NilError(c, err)
@@ -976,7 +979,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
 
 	// second call to start should give 304
 	// maybe add ContainerStartWithRaw to test it
-	err = apiClient.ContainerStop(context.Background(), name, container.StopOptions{
+	err = apiClient.ContainerStop(testutil.GetContext(c), name, container.StopOptions{
 		Timeout: &timeout,
 	})
 	assert.NilError(c, err)
@@ -995,7 +998,7 @@ func (s *DockerAPISuite) TestContainerAPIWait(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	waitResC, errC := apiClient.ContainerWait(context.Background(), name, "")
+	waitResC, errC := apiClient.ContainerWait(testutil.GetContext(c), name, "")
 
 	select {
 	case err = <-errC:
@@ -1013,7 +1016,7 @@ func (s *DockerAPISuite) TestContainerAPICopyNotExistsAnyMore(c *testing.T) {
 		Resource: "/test.txt",
 	}
 	// no copy in client/
-	res, _, err := request.Post("/containers/"+name+"/copy", request.JSONBody(postData))
+	res, _, err := request.Post(testutil.GetContext(c), "/containers/"+name+"/copy", request.JSONBody(postData))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNotFound)
 }
@@ -1027,7 +1030,7 @@ func (s *DockerAPISuite) TestContainerAPICopyPre124(c *testing.T) {
 		Resource: "/test.txt",
 	}
 
-	res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
+	res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -1057,7 +1060,7 @@ func (s *DockerAPISuite) TestContainerAPICopyResourcePathEmptyPre124(c *testing.
 		Resource: "",
 	}
 
-	res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
+	res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
 	assert.NilError(c, err)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusBadRequest)
@@ -1078,7 +1081,7 @@ func (s *DockerAPISuite) TestContainerAPICopyResourcePathNotFoundPre124(c *testi
 		Resource: "/notexist",
 	}
 
-	res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
+	res, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/"+name+"/copy", request.JSONBody(postData))
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
@@ -1096,7 +1099,7 @@ func (s *DockerAPISuite) TestContainerAPICopyContainerNotFoundPr124(c *testing.T
 		Resource: "/something",
 	}
 
-	res, _, err := request.Post("/v1.23/containers/notexists/copy", request.JSONBody(postData))
+	res, _, err := request.Post(testutil.GetContext(c), "/v1.23/containers/notexists/copy", request.JSONBody(postData))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNotFound)
 }
@@ -1113,7 +1116,7 @@ func (s *DockerAPISuite) TestContainerAPIDelete(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{})
+	err = apiClient.ContainerRemove(testutil.GetContext(c), id, types.ContainerRemoveOptions{})
 	assert.NilError(c, err)
 }
 
@@ -1122,7 +1125,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteNotExist(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), "doesnotexist", types.ContainerRemoveOptions{})
+	err = apiClient.ContainerRemove(testutil.GetContext(c), "doesnotexist", types.ContainerRemoveOptions{})
 	assert.ErrorContains(c, err, "No such container: doesnotexist")
 }
 
@@ -1139,7 +1142,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteForce(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), id, removeOptions)
+	err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
 	assert.NilError(c, err)
 }
 
@@ -1167,7 +1170,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveLinks(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), "tlink2/tlink1", removeOptions)
+	err = apiClient.ContainerRemove(testutil.GetContext(c), "tlink2/tlink1", removeOptions)
 	assert.NilError(c, err)
 
 	linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links")
@@ -1201,7 +1204,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveVolume(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), id, removeOptions)
+	err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
 	assert.NilError(c, err)
 
 	_, err = os.Stat(source)
@@ -1216,7 +1219,7 @@ func (s *DockerAPISuite) TestContainerAPIChunkedEncoding(c *testing.T) {
 		"OpenStdin": true,
 	}
 
-	resp, _, err := request.Post("/containers/create", request.JSONBody(config), request.With(func(req *http.Request) error {
+	resp, _, err := request.Post(testutil.GetContext(c), "/containers/create", request.JSONBody(config), request.With(func(req *http.Request) error {
 		// This is a cheat to make the http request do chunked encoding
 		// Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
 		// https://golang.org/src/pkg/net/http/request.go?s=11980:12172
@@ -1238,7 +1241,7 @@ func (s *DockerAPISuite) TestContainerAPIPostContainerStop(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerStop(context.Background(), containerID, container.StopOptions{})
+	err = apiClient.ContainerStop(testutil.GetContext(c), containerID, container.StopOptions{})
 	assert.NilError(c, err)
 	assert.Assert(c, waitInspect(containerID, "{{ .State.Running  }}", "false", 60*time.Second) == nil)
 }
@@ -1255,7 +1258,7 @@ func (s *DockerAPISuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
 	assert.NilError(c, err)
 	out, _ := dockerCmd(c, "start", "-a", "echotest")
 	assert.Equal(c, strings.TrimSpace(out), "hello world")
@@ -1265,7 +1268,7 @@ func (s *DockerAPISuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c
 		Entrypoint string
 		Cmd        []string
 	}{"busybox", "echo", []string{"hello", "world"}}
-	_, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2))
+	_, _, err = request.Post(testutil.GetContext(c), "/containers/create?name=echotest2", request.JSONBody(config2))
 	assert.NilError(c, err)
 	out, _ = dockerCmd(c, "start", "-a", "echotest2")
 	assert.Equal(c, strings.TrimSpace(out), "hello world")
@@ -1282,7 +1285,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCmd(c *testing
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "echotest")
 	assert.NilError(c, err)
 	out, _ := dockerCmd(c, "start", "-a", "echotest")
 	assert.Equal(c, strings.TrimSpace(out), "hello world")
@@ -1292,7 +1295,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCmd(c *testing
 		Entrypoint string
 		Cmd        string
 	}{"busybox", "echo", "hello world"}
-	_, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2))
+	_, _, err = request.Post(testutil.GetContext(c), "/containers/create?name=echotest2", request.JSONBody(config2))
 	assert.NilError(c, err)
 	out, _ = dockerCmd(c, "start", "-a", "echotest2")
 	assert.Equal(c, strings.TrimSpace(out), "hello world")
@@ -1309,7 +1312,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *
 		CapAdd  string
 		CapDrop string
 	}{"busybox", "NET_ADMIN", "cap_sys_admin"}
-	res, _, err := request.Post("/containers/create?name=capaddtest0", request.JSONBody(config))
+	res, _, err := request.Post(testutil.GetContext(c), "/containers/create?name=capaddtest0", request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 
@@ -1325,7 +1328,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config2, &hostConfig, &network.NetworkingConfig{}, nil, "capaddtest1")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config2, &hostConfig, &network.NetworkingConfig{}, nil, "capaddtest1")
 	assert.NilError(c, err)
 }
 
@@ -1339,7 +1342,7 @@ func (s *DockerAPISuite) TestContainerAPICreateNoHostConfig118(c *testing.T) {
 	apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.18"))
 	assert.NilError(c, err)
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 }
 
@@ -1368,7 +1371,7 @@ func (s *DockerAPISuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRoot
 	apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.20"))
 	assert.NilError(c, err)
 
-	err = apiClient.CopyToContainer(context.Background(), cID, "/vol2/symlinkToAbsDir", nil, types.CopyToContainerOptions{})
+	err = apiClient.CopyToContainer(testutil.GetContext(c), cID, "/vol2/symlinkToAbsDir", nil, types.CopyToContainerOptions{})
 	assert.ErrorContains(c, err, "container rootfs is marked read-only")
 }
 
@@ -1390,7 +1393,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithWrongCpusetValues(c *testin
 	}
 	name := "wrong-cpuset-cpus"
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig1, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig1, &network.NetworkingConfig{}, nil, name)
 	expected := "Invalid value 1-42,, for cpuset cpus"
 	assert.ErrorContains(c, err, expected)
 
@@ -1400,7 +1403,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithWrongCpusetValues(c *testin
 		},
 	}
 	name = "wrong-cpuset-mems"
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig2, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig2, &network.NetworkingConfig{}, nil, name)
 	expected = "Invalid value 42-3,1-- for cpuset mems"
 	assert.ErrorContains(c, err, expected)
 }
@@ -1419,7 +1422,7 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeNegative(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
 	assert.ErrorContains(c, err, "SHM size can not be less than 0")
 }
 
@@ -1436,10 +1439,10 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *tes
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	assert.Equal(c, containerJSON.HostConfig.ShmSize, dconfig.DefaultShmSize)
@@ -1463,10 +1466,10 @@ func (s *DockerAPISuite) TestPostContainersCreateShmSizeOmitted(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(67108864))
@@ -1494,10 +1497,10 @@ func (s *DockerAPISuite) TestPostContainersCreateWithShmSize(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(1073741824))
@@ -1520,10 +1523,10 @@ func (s *DockerAPISuite) TestPostContainersCreateMemorySwappinessHostConfigOmitt
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	ctr, err := apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+	ctr, err := apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
 	assert.NilError(c, err)
 
-	containerJSON, err := apiClient.ContainerInspect(context.Background(), ctr.ID)
+	containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
 	assert.NilError(c, err)
 
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.31") {
@@ -1551,7 +1554,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *
 	defer apiClient.Close()
 
 	name := "oomscoreadj-over"
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
 
 	expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]"
 	assert.ErrorContains(c, err, expected)
@@ -1561,7 +1564,7 @@ func (s *DockerAPISuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *
 	}
 
 	name = "oomscoreadj-low"
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, name)
 
 	expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]"
 	assert.ErrorContains(c, err, expected)
@@ -1573,7 +1576,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteWithEmptyName(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	err = apiClient.ContainerRemove(context.Background(), "", types.ContainerRemoveOptions{})
+	err = apiClient.ContainerRemove(testutil.GetContext(c), "", types.ContainerRemoveOptions{})
 	assert.Check(c, errdefs.IsNotFound(err))
 }
 
@@ -1593,10 +1596,10 @@ func (s *DockerAPISuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T)
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &container.HostConfig{}, &network.NetworkingConfig{}, nil, name)
 	assert.NilError(c, err)
 
-	err = apiClient.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
+	err = apiClient.ContainerStart(testutil.GetContext(c), name, types.ContainerStartOptions{})
 	assert.NilError(c, err)
 
 	assert.Assert(c, waitRun(name) == nil)
@@ -1607,7 +1610,7 @@ func (s *DockerAPISuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T)
 	}
 	bc := make(chan b, 1)
 	go func() {
-		stats, err := apiClient.ContainerStats(context.Background(), name, false)
+		stats, err := apiClient.ContainerStats(testutil.GetContext(c), name, false)
 		bc <- b{stats, err}
 	}()
 
@@ -1931,7 +1934,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsValidation(c *testing.T) {
 	for i, x := range cases {
 		x := x
 		c.Run(fmt.Sprintf("case %d", i), func(c *testing.T) {
-			_, err = apiClient.ContainerCreate(context.Background(), &x.config, &x.hostConfig, &network.NetworkingConfig{}, nil, "")
+			_, err = apiClient.ContainerCreate(testutil.GetContext(c), &x.config, &x.hostConfig, &network.NetworkingConfig{}, nil, "")
 			if len(x.msg) > 0 {
 				assert.ErrorContains(c, err, x.msg, "%v", cases[i].config)
 			} else {
@@ -1964,7 +1967,7 @@ func (s *DockerAPISuite) TestContainerAPICreateMountsBindRead(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "test")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "test")
 	assert.NilError(c, err)
 
 	out, _ := dockerCmd(c, "start", "-a", "test")
@@ -2099,7 +2102,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
 		}...)
 	}
 
-	ctx := context.Background()
+	ctx := testutil.GetContext(c)
 	apiclient := testEnv.APIClient()
 	for i, x := range cases {
 		x := x
@@ -2138,7 +2141,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
 
 			err = apiclient.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{})
 			assert.NilError(c, err)
-			poll.WaitOn(c, containerExit(apiclient, ctr.ID), poll.WithDelay(time.Second))
+			poll.WaitOn(c, containerExit(ctx, apiclient, ctr.ID), poll.WithDelay(time.Second))
 
 			err = apiclient.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{
 				RemoveVolumes: true,
@@ -2164,9 +2167,9 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
 	}
 }
 
-func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
+func containerExit(ctx context.Context, apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
 	return func(logT poll.LogT) poll.Result {
-		ctr, err := apiclient.ContainerInspect(context.Background(), name)
+		ctr, err := apiclient.ContainerInspect(ctx, name)
 		if err != nil {
 			return poll.Error(err)
 		}
@@ -2219,7 +2222,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsTmpfs(c *testing.T) {
 			Mounts: []mount.Mount{x.cfg},
 		}
 
-		_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, cName)
+		_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, cName)
 		assert.NilError(c, err)
 		out, _ := dockerCmd(c, "start", "-a", cName)
 		for _, option := range x.expectedOptions {
@@ -2233,7 +2236,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsTmpfs(c *testing.T) {
 // gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled.
 func (s *DockerAPISuite) TestContainerKillCustomStopSignal(c *testing.T) {
 	id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always"))
-	res, _, err := request.Post("/containers/" + id + "/kill")
+	res, _, err := request.Post(testutil.GetContext(c), "/containers/"+id+"/kill")
 	assert.NilError(c, err)
 	defer res.Body.Close()
 

+ 2 - 2
integration-cli/docker_api_containers_windows_test.go

@@ -3,7 +3,6 @@
 package main
 
 import (
-	"context"
 	"fmt"
 	"io"
 	"math/rand"
@@ -14,6 +13,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/mount"
+	"github.com/docker/docker/testutil"
 	"github.com/pkg/errors"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -48,7 +48,7 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsBindNamedPipe(c *testing.T
 	cmd := fmt.Sprintf("echo %s > %s", text, containerPipeName)
 	name := "test-bind-npipe"
 
-	ctx := context.Background()
+	ctx := testutil.GetContext(c)
 	client := testEnv.APIClient()
 	_, err = client.ContainerCreate(ctx,
 		&container.Config{

+ 4 - 3
integration-cli/docker_api_exec_resize_test.go

@@ -11,6 +11,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/api/types/versions"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"github.com/pkg/errors"
 	"gotest.tools/v3/assert"
@@ -22,7 +23,7 @@ func (s *DockerAPISuite) TestExecResizeAPIHeightWidthNoInt(c *testing.T) {
 	cleanedContainerID := strings.TrimSpace(out)
 
 	endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar"
-	res, _, err := request.Post(endpoint)
+	res, _, err := request.Post(testutil.GetContext(c), endpoint)
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
@@ -42,7 +43,7 @@ func (s *DockerAPISuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
 			"Cmd":         []string{"/bin/sh"},
 		}
 		uri := fmt.Sprintf("/containers/%s/exec", name)
-		res, body, err := request.Post(uri, request.JSONBody(data))
+		res, body, err := request.Post(testutil.GetContext(c), uri, request.JSONBody(data))
 		if err != nil {
 			return err
 		}
@@ -71,7 +72,7 @@ func (s *DockerAPISuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
 		}
 		defer wc.Close()
 
-		_, rc, err := request.Post(fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain"))
+		_, rc, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain"))
 		if err != nil {
 			// It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned.
 			if err == io.ErrUnexpectedEOF {

+ 31 - 24
integration-cli/docker_api_exec_test.go

@@ -16,6 +16,7 @@ import (
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/checker"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -27,7 +28,7 @@ func (s *DockerAPISuite) TestExecAPICreateNoCmd(c *testing.T) {
 	name := "exec_test"
 	dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
 
-	res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": nil}))
+	res, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": nil}))
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
@@ -48,7 +49,7 @@ func (s *DockerAPISuite) TestExecAPICreateNoValidContentType(c *testing.T) {
 		c.Fatalf("Can not encode data to json %s", err)
 	}
 
-	res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.RawContent(io.NopCloser(jsonData)), request.ContentType("test/plain"))
+	res, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.RawContent(io.NopCloser(jsonData)), request.ContentType("test/plain"))
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, res.StatusCode, http.StatusInternalServerError)
@@ -75,7 +76,7 @@ func (s *DockerAPISuite) TestExecAPICreateContainerPaused(c *testing.T) {
 	config := types.ExecConfig{
 		Cmd: []string{"true"},
 	}
-	_, err = apiClient.ContainerExecCreate(context.Background(), name, config)
+	_, err = apiClient.ContainerExecCreate(testutil.GetContext(c), name, config)
 	assert.ErrorContains(c, err, "Container "+name+" is paused, unpause the container before exec", "Expected message when creating exec command with Container %s is paused", name)
 }
 
@@ -87,7 +88,7 @@ func (s *DockerAPISuite) TestExecAPIStart(c *testing.T) {
 	startExec(c, id, http.StatusOK)
 
 	var execJSON struct{ PID int }
-	inspectExec(c, id, &execJSON)
+	inspectExec(testutil.GetContext(c), c, id, &execJSON)
 	assert.Assert(c, execJSON.PID > 1)
 
 	id = createExec(c, "test")
@@ -111,7 +112,7 @@ func (s *DockerAPISuite) TestExecAPIStartEnsureHeaders(c *testing.T) {
 	dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top")
 
 	id := createExec(c, "test")
-	resp, _, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
+	resp, _, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
 	assert.NilError(c, err)
 	assert.Assert(c, resp.Header.Get("Server") != "")
 }
@@ -121,7 +122,7 @@ func (s *DockerAPISuite) TestExecAPIStartBackwardsCompatible(c *testing.T) {
 	runSleepingContainer(c, "-d", "--name", "test")
 	id := createExec(c, "test")
 
-	resp, body, err := request.Post(fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain"))
+	resp, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain"))
 	assert.NilError(c, err)
 
 	b, err := request.ReadBody(body)
@@ -135,7 +136,7 @@ func (s *DockerAPISuite) TestExecAPIStartMultipleTimesError(c *testing.T) {
 	runSleepingContainer(c, "-d", "--name", "test")
 	execID := createExec(c, "test")
 	startExec(c, execID, http.StatusOK)
-	waitForExec(c, execID)
+	waitForExec(testutil.GetContext(c), c, execID)
 
 	startExec(c, execID, http.StatusConflict)
 }
@@ -145,6 +146,8 @@ func (s *DockerAPISuite) TestExecAPIStartWithDetach(c *testing.T) {
 	name := "foo"
 	runSleepingContainer(c, "-d", "-t", "--name", name)
 
+	ctx := testutil.GetContext(c)
+
 	config := types.ExecConfig{
 		Cmd:          []string{"true"},
 		AttachStderr: true,
@@ -154,17 +157,17 @@ func (s *DockerAPISuite) TestExecAPIStartWithDetach(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	createResp, err := apiClient.ContainerExecCreate(context.Background(), name, config)
+	createResp, err := apiClient.ContainerExecCreate(ctx, name, config)
 	assert.NilError(c, err)
 
-	_, body, err := request.Post(fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON)
+	_, body, err := request.Post(ctx, fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON)
 	assert.NilError(c, err)
 
 	b, err := request.ReadBody(body)
 	comment := fmt.Sprintf("response body: %s", b)
 	assert.NilError(c, err, comment)
 
-	resp, _, err := request.Get("/_ping")
+	resp, _, err := request.Get(ctx, "/_ping")
 	assert.NilError(c, err)
 	if resp.StatusCode != http.StatusOK {
 		c.Fatal("daemon is down, it should alive")
@@ -179,10 +182,11 @@ func (s *DockerAPISuite) TestExecAPIStartValidCommand(c *testing.T) {
 	id := createExecCmd(c, name, "true")
 	startExec(c, id, http.StatusOK)
 
-	waitForExec(c, id)
+	ctx := testutil.GetContext(c)
+	waitForExec(ctx, c, id)
 
 	var inspectJSON struct{ ExecIDs []string }
-	inspectContainer(c, name, &inspectJSON)
+	inspectContainer(ctx, c, name, &inspectJSON)
 
 	assert.Assert(c, inspectJSON.ExecIDs == nil)
 }
@@ -198,10 +202,11 @@ func (s *DockerAPISuite) TestExecAPIStartInvalidCommand(c *testing.T) {
 	} else {
 		startExec(c, id, http.StatusBadRequest)
 	}
-	waitForExec(c, id)
+	ctx := testutil.GetContext(c)
+	waitForExec(ctx, c, id)
 
 	var inspectJSON struct{ ExecIDs []string }
-	inspectContainer(c, name, &inspectJSON)
+	inspectContainer(ctx, c, name, &inspectJSON)
 
 	assert.Assert(c, inspectJSON.ExecIDs == nil)
 }
@@ -229,13 +234,15 @@ func (s *DockerAPISuite) TestExecStateCleanup(c *testing.T) {
 
 	id := createExecCmd(c, name, "ls")
 	startExec(c, id, http.StatusOK)
-	waitForExec(c, id)
+
+	ctx := testutil.GetContext(c)
+	waitForExec(ctx, c, id)
 
 	poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
 
 	id = createExecCmd(c, name, "invalid")
 	startExec(c, id, http.StatusBadRequest)
-	waitForExec(c, id)
+	waitForExec(ctx, c, id)
 
 	poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
 
@@ -250,7 +257,7 @@ func createExec(c *testing.T, name string) string {
 }
 
 func createExecCmd(c *testing.T, name string, cmd string) string {
-	_, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}}))
+	_, reader, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}}))
 	assert.NilError(c, err)
 	b, err := io.ReadAll(reader)
 	assert.NilError(c, err)
@@ -263,7 +270,7 @@ func createExecCmd(c *testing.T, name string, cmd string) string {
 }
 
 func startExec(c *testing.T, id string, code int) {
-	resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
+	resp, body, err := request.Post(testutil.GetContext(c), fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
 	assert.NilError(c, err)
 
 	b, err := request.ReadBody(body)
@@ -271,8 +278,8 @@ func startExec(c *testing.T, id string, code int) {
 	assert.Equal(c, resp.StatusCode, code, "response body: %s", b)
 }
 
-func inspectExec(c *testing.T, id string, out interface{}) {
-	resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id))
+func inspectExec(ctx context.Context, c *testing.T, id string, out interface{}) {
+	resp, body, err := request.Get(ctx, fmt.Sprintf("/exec/%s/json", id))
 	assert.NilError(c, err)
 	defer body.Close()
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
@@ -280,7 +287,7 @@ func inspectExec(c *testing.T, id string, out interface{}) {
 	assert.NilError(c, err)
 }
 
-func waitForExec(c *testing.T, id string) {
+func waitForExec(ctx context.Context, c *testing.T, id string) {
 	timeout := time.After(60 * time.Second)
 	var execJSON struct{ Running bool }
 	for {
@@ -290,15 +297,15 @@ func waitForExec(c *testing.T, id string) {
 		default:
 		}
 
-		inspectExec(c, id, &execJSON)
+		inspectExec(ctx, c, id, &execJSON)
 		if !execJSON.Running {
 			break
 		}
 	}
 }
 
-func inspectContainer(c *testing.T, id string, out interface{}) {
-	resp, body, err := request.Get("/containers/" + id + "/json")
+func inspectContainer(ctx context.Context, c *testing.T, id string, out interface{}) {
+	resp, body, err := request.Get(ctx, "/containers/"+id+"/json")
 	assert.NilError(c, err)
 	defer body.Close()
 	assert.Equal(c, resp.StatusCode, http.StatusOK)

+ 13 - 11
integration-cli/docker_api_images_test.go

@@ -1,7 +1,6 @@
 package main
 
 import (
-	"context"
 	"net/http"
 	"net/http/httptest"
 	"strings"
@@ -11,6 +10,7 @@ import (
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
@@ -20,14 +20,15 @@ func (s *DockerAPISuite) TestAPIImagesSaveAndLoad(c *testing.T) {
 	buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar"))
 	id := getIDByName(c, "saveandload")
 
-	res, body, err := request.Get("/images/" + id + "/get")
+	ctx := testutil.GetContext(c)
+	res, body, err := request.Get(ctx, "/images/"+id+"/get")
 	assert.NilError(c, err)
 	defer body.Close()
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
 	dockerCmd(c, "rmi", id)
 
-	res, loadBody, err := request.Post("/images/load", request.RawContent(body), request.ContentType("application/x-tar"))
+	res, loadBody, err := request.Post(ctx, "/images/load", request.RawContent(body), request.ContentType("application/x-tar"))
 	assert.NilError(c, err)
 	defer loadBody.Close()
 	assert.Equal(c, res.StatusCode, http.StatusOK)
@@ -50,13 +51,13 @@ func (s *DockerAPISuite) TestAPIImagesDelete(c *testing.T) {
 
 	dockerCmd(c, "tag", name, "test:tag1")
 
-	_, err = apiClient.ImageRemove(context.Background(), id, types.ImageRemoveOptions{})
+	_, err = apiClient.ImageRemove(testutil.GetContext(c), id, types.ImageRemoveOptions{})
 	assert.ErrorContains(c, err, "unable to delete")
 
-	_, err = apiClient.ImageRemove(context.Background(), "test:noexist", types.ImageRemoveOptions{})
+	_, err = apiClient.ImageRemove(testutil.GetContext(c), "test:noexist", types.ImageRemoveOptions{})
 	assert.ErrorContains(c, err, "No such image")
 
-	_, err = apiClient.ImageRemove(context.Background(), "test:tag1", types.ImageRemoveOptions{})
+	_, err = apiClient.ImageRemove(testutil.GetContext(c), "test:tag1", types.ImageRemoveOptions{})
 	assert.NilError(c, err)
 }
 
@@ -72,7 +73,7 @@ func (s *DockerAPISuite) TestAPIImagesHistory(c *testing.T) {
 	buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar"))
 	id := getIDByName(c, name)
 
-	historydata, err := apiClient.ImageHistory(context.Background(), id)
+	historydata, err := apiClient.ImageHistory(testutil.GetContext(c), id)
 	assert.NilError(c, err)
 
 	assert.Assert(c, len(historydata) != 0)
@@ -102,8 +103,9 @@ func (s *DockerAPISuite) TestAPIImagesImportBadSrc(c *testing.T) {
 		{http.StatusInternalServerError, "%2Fdata%2Ffile.tar"},
 	}
 
+	ctx := testutil.GetContext(c)
 	for _, te := range tt {
-		res, _, err := request.Post(strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), request.JSON)
+		res, _, err := request.Post(ctx, strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), request.JSON)
 		assert.NilError(c, err)
 		assert.Equal(c, res.StatusCode, te.statusExp)
 		assert.Equal(c, res.Header.Get("Content-Type"), "application/json")
@@ -114,7 +116,7 @@ func (s *DockerAPISuite) TestAPIImagesImportBadSrc(c *testing.T) {
 func (s *DockerAPISuite) TestAPIImagesSearchJSONContentType(c *testing.T) {
 	testRequires(c, Network)
 
-	res, b, err := request.Get("/images/search?term=test", request.JSON)
+	res, b, err := request.Get(testutil.GetContext(c), "/images/search?term=test", request.JSON)
 	assert.NilError(c, err)
 	b.Close()
 	assert.Equal(c, res.StatusCode, http.StatusOK)
@@ -127,7 +129,7 @@ func (s *DockerAPISuite) TestAPIImagesSizeCompatibility(c *testing.T) {
 	apiclient := testEnv.APIClient()
 	defer apiclient.Close()
 
-	images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{})
+	images, err := apiclient.ImageList(testutil.GetContext(c), types.ImageListOptions{})
 	assert.NilError(c, err)
 	assert.Assert(c, len(images) != 0)
 	for _, image := range images {
@@ -138,7 +140,7 @@ func (s *DockerAPISuite) TestAPIImagesSizeCompatibility(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiclient.Close()
 
-	v124Images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{})
+	v124Images, err := apiclient.ImageList(testutil.GetContext(c), types.ImageListOptions{})
 	assert.NilError(c, err)
 	assert.Assert(c, len(v124Images) != 0)
 	for _, image := range v124Images {

+ 2 - 2
integration-cli/docker_api_inspect_test.go

@@ -1,7 +1,6 @@
 package main
 
 import (
-	"context"
 	"encoding/json"
 	"strings"
 	"testing"
@@ -9,6 +8,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/versions/v1p20"
 	"github.com/docker/docker/client"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 )
@@ -111,7 +111,7 @@ func (s *DockerAPISuite) TestInspectAPIImageResponse(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	imageJSON, _, err := apiClient.ImageInspectWithRaw(context.Background(), "busybox")
+	imageJSON, _, err := apiClient.ImageInspectWithRaw(testutil.GetContext(c), "busybox")
 	assert.NilError(c, err)
 
 	assert.Check(c, len(imageJSON.RepoTags) == 2)

+ 8 - 8
integration-cli/docker_api_logs_test.go

@@ -3,7 +3,6 @@ package main
 import (
 	"bufio"
 	"bytes"
-	"context"
 	"fmt"
 	"io"
 	"net/http"
@@ -15,6 +14,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/pkg/stdcopy"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
@@ -30,7 +30,7 @@ func (s *DockerAPISuite) TestLogsAPIWithStdout(c *testing.T) {
 	}
 
 	chLog := make(chan logOut, 1)
-	res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&timestamps=1", id))
+	res, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&timestamps=1", id))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 
@@ -62,7 +62,7 @@ func (s *DockerAPISuite) TestLogsAPINoStdoutNorStderr(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerLogs(context.Background(), name, types.ContainerLogsOptions{})
+	_, err = apiClient.ContainerLogs(testutil.GetContext(c), name, types.ContainerLogsOptions{})
 	assert.ErrorContains(c, err, "Bad parameters: you must choose at least one stream")
 }
 
@@ -72,7 +72,7 @@ func (s *DockerAPISuite) TestLogsAPIFollowEmptyOutput(c *testing.T) {
 	t0 := time.Now()
 	dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10")
 
-	_, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
+	_, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
 	t1 := time.Now()
 	assert.NilError(c, err)
 	body.Close()
@@ -84,7 +84,7 @@ func (s *DockerAPISuite) TestLogsAPIFollowEmptyOutput(c *testing.T) {
 
 func (s *DockerAPISuite) TestLogsAPIContainerNotFound(c *testing.T) {
 	name := "nonExistentContainer"
-	resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
+	resp, _, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
 	assert.NilError(c, err)
 	assert.Equal(c, resp.StatusCode, http.StatusNotFound)
 }
@@ -106,7 +106,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilFutureFollow(c *testing.T) {
 	}
 
 	cfg := types.ContainerLogsOptions{Until: until.Format(time.RFC3339Nano), Follow: true, ShowStdout: true, Timestamps: true}
-	reader, err := client.ContainerLogs(context.Background(), name, cfg)
+	reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
 	assert.NilError(c, err)
 
 	type logOut struct {
@@ -168,7 +168,7 @@ func (s *DockerAPISuite) TestLogsAPIUntil(c *testing.T) {
 	}
 
 	extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
-		reader, err := client.ContainerLogs(context.Background(), name, cfg)
+		reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
 		assert.NilError(c, err)
 
 		actualStdout := new(bytes.Buffer)
@@ -205,7 +205,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilDefaultValue(c *testing.T) {
 	}
 
 	extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
-		reader, err := client.ContainerLogs(context.Background(), name, cfg)
+		reader, err := client.ContainerLogs(testutil.GetContext(c), name, cfg)
 		assert.NilError(c, err)
 
 		actualStdout := new(bytes.Buffer)

+ 8 - 7
integration-cli/docker_api_network_test.go

@@ -13,6 +13,7 @@ import (
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/versions"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
@@ -268,7 +269,7 @@ func createDeletePredefinedNetwork(c *testing.T, name string) {
 }
 
 func isNetworkAvailable(c *testing.T, name string) bool {
-	resp, body, err := request.Get("/networks")
+	resp, body, err := request.Get(testutil.GetContext(c), "/networks")
 	assert.NilError(c, err)
 	defer resp.Body.Close()
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
@@ -291,7 +292,7 @@ func getNetworkIDByName(c *testing.T, name string) string {
 	v := url.Values{}
 	v.Set("filters", filterJSON)
 
-	resp, body, err := request.Get("/networks?" + v.Encode())
+	resp, body, err := request.Get(testutil.GetContext(c), "/networks?"+v.Encode())
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
 	assert.NilError(c, err)
 
@@ -311,7 +312,7 @@ func getNetworkIDByName(c *testing.T, name string) string {
 }
 
 func getNetworkResource(c *testing.T, id string) *types.NetworkResource {
-	_, obj, err := request.Get("/networks/" + id)
+	_, obj, err := request.Get(testutil.GetContext(c), "/networks/"+id)
 	assert.NilError(c, err)
 
 	nr := types.NetworkResource{}
@@ -322,7 +323,7 @@ func getNetworkResource(c *testing.T, id string) *types.NetworkResource {
 }
 
 func createNetwork(c *testing.T, config types.NetworkCreateRequest, expectedStatusCode int) string {
-	resp, body, err := request.Post("/networks/create", request.JSONBody(config))
+	resp, body, err := request.Post(testutil.GetContext(c), "/networks/create", request.JSONBody(config))
 	assert.NilError(c, err)
 	defer resp.Body.Close()
 
@@ -347,7 +348,7 @@ func connectNetwork(c *testing.T, nid, cid string) {
 		Container: cid,
 	}
 
-	resp, _, err := request.Post("/networks/"+nid+"/connect", request.JSONBody(config))
+	resp, _, err := request.Post(testutil.GetContext(c), "/networks/"+nid+"/connect", request.JSONBody(config))
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
 	assert.NilError(c, err)
 }
@@ -357,13 +358,13 @@ func disconnectNetwork(c *testing.T, nid, cid string) {
 		Container: cid,
 	}
 
-	resp, _, err := request.Post("/networks/"+nid+"/disconnect", request.JSONBody(config))
+	resp, _, err := request.Post(testutil.GetContext(c), "/networks/"+nid+"/disconnect", request.JSONBody(config))
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
 	assert.NilError(c, err)
 }
 
 func deleteNetwork(c *testing.T, id string, shouldSucceed bool) {
-	resp, _, err := request.Delete("/networks/" + id)
+	resp, _, err := request.Delete(testutil.GetContext(c), "/networks/"+id)
 	assert.NilError(c, err)
 	defer resp.Body.Close()
 	if !shouldSucceed {

+ 9 - 9
integration-cli/docker_api_stats_test.go

@@ -1,7 +1,6 @@
 package main
 
 import (
-	"context"
 	"encoding/json"
 	"fmt"
 	"net/http"
@@ -17,6 +16,7 @@ import (
 	"github.com/docker/docker/api/types/system"
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/client"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/skip"
@@ -30,7 +30,7 @@ func (s *DockerAPISuite) TestAPIStatsNoStreamGetCpu(c *testing.T) {
 
 	id := strings.TrimSpace(out)
 	assert.NilError(c, waitRun(id))
-	resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id))
+	resp, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/stats?stream=false", id))
 	assert.NilError(c, err)
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
 	assert.Equal(c, resp.Header.Get("Content-Type"), "application/json")
@@ -70,7 +70,7 @@ func (s *DockerAPISuite) TestAPIStatsStoppedContainerInGoroutines(c *testing.T)
 	id := strings.TrimSpace(out)
 
 	getGoRoutines := func() int {
-		_, body, err := request.Get("/info")
+		_, body, err := request.Get(testutil.GetContext(c), "/info")
 		assert.NilError(c, err)
 		info := system.Info{}
 		err = json.NewDecoder(body).Decode(&info)
@@ -81,7 +81,7 @@ func (s *DockerAPISuite) TestAPIStatsStoppedContainerInGoroutines(c *testing.T)
 
 	// When the HTTP connection is closed, the number of goroutines should not increase.
 	routines := getGoRoutines()
-	_, body, err := request.Get("/containers/" + id + "/stats")
+	_, body, err := request.Get(testutil.GetContext(c), "/containers/"+id+"/stats")
 	assert.NilError(c, err)
 	body.Close()
 
@@ -194,7 +194,7 @@ func (s *DockerAPISuite) TestAPIStatsNetworkStatsVersioning(c *testing.T) {
 func getNetworkStats(c *testing.T, id string) map[string]types.NetworkStats {
 	var st *types.StatsJSON
 
-	_, body, err := request.Get("/containers/" + id + "/stats?stream=false")
+	_, body, err := request.Get(testutil.GetContext(c), "/containers/"+id+"/stats?stream=false")
 	assert.NilError(c, err)
 
 	err = json.NewDecoder(body).Decode(&st)
@@ -211,7 +211,7 @@ func getNetworkStats(c *testing.T, id string) map[string]types.NetworkStats {
 func getVersionedStats(c *testing.T, id string, apiVersion string) map[string]interface{} {
 	stats := make(map[string]interface{})
 
-	_, body, err := request.Get("/" + apiVersion + "/containers/" + id + "/stats?stream=false")
+	_, body, err := request.Get(testutil.GetContext(c), "/"+apiVersion+"/containers/"+id+"/stats?stream=false")
 	assert.NilError(c, err)
 	defer body.Close()
 
@@ -269,9 +269,9 @@ func (s *DockerAPISuite) TestAPIStatsContainerNotFound(c *testing.T) {
 
 	expected := "No such container: nonexistent"
 
-	_, err = apiClient.ContainerStats(context.Background(), "nonexistent", true)
+	_, err = apiClient.ContainerStats(testutil.GetContext(c), "nonexistent", true)
 	assert.ErrorContains(c, err, expected)
-	_, err = apiClient.ContainerStats(context.Background(), "nonexistent", false)
+	_, err = apiClient.ContainerStats(testutil.GetContext(c), "nonexistent", false)
 	assert.ErrorContains(c, err, expected)
 }
 
@@ -288,7 +288,7 @@ func (s *DockerAPISuite) TestAPIStatsNoStreamConnectedContainers(c *testing.T) {
 
 	ch := make(chan error, 1)
 	go func() {
-		resp, body, err := request.Get("/containers/" + id2 + "/stats?stream=false")
+		resp, body, err := request.Get(testutil.GetContext(c), "/containers/"+id2+"/stats?stream=false")
 		defer body.Close()
 		if err != nil {
 			ch <- err

+ 45 - 37
integration-cli/docker_api_swarm_node_test.go

@@ -10,16 +10,18 @@ import (
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/poll"
 )
 
 func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
-	d3 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
+	d3 := s.AddDaemon(ctx, c, true, false)
 
-	nodes := d1.ListNodes(c)
+	nodes := d1.ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
 
 loop0:
@@ -34,34 +36,39 @@ loop0:
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
 
-	nodes := d.ListNodes(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
-	d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
+	nodes := d.ListNodes(ctx, c)
+
+	d.UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityPause
 	})
 
-	n := d.GetNode(c, nodes[0].ID)
+	n := d.GetNode(ctx, c, nodes[0].ID)
 	assert.Equal(c, n.Spec.Availability, swarm.NodeAvailabilityPause)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *testing.T) {
 	testRequires(c, Network)
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
-	_ = s.AddDaemon(c, true, false)
 
-	nodes := d1.ListNodes(c)
+	ctx := testutil.GetContext(c)
+
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
+	_ = s.AddDaemon(ctx, c, true, false)
+
+	nodes := d1.ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
 
 	// Getting the info so we can take the NodeID
-	d2Info := d2.SwarmInfo(c)
+	d2Info := d2.SwarmInfo(ctx, c)
 
 	// forceful removal of d2 should work
-	d1.RemoveNode(c, d2Info.NodeID, true)
+	d1.RemoveNode(ctx, c, d2Info.NodeID, true)
 
-	nodes = d1.ListNodes(c)
+	nodes = d1.ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
 
 	// Restart the node that was removed
@@ -71,57 +78,58 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *testing.T) {
 	time.Sleep(1 * time.Second)
 
 	// Make sure the node didn't rejoin
-	nodes = d1.ListNodes(c)
+	nodes = d1.ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
 
 	time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
 
 	// start a service, expect balanced distribution
 	instances := 2
-	id := d1.CreateService(c, simpleTestService, setInstances(instances))
+	id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// drain d2, all containers should move to d1
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityDrain
 	})
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// set d2 back to active
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityActive
 	})
 
 	instances = 1
-	d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
+	d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
 
 	instances = 2
-	d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
+	d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
 
 	// drained node first so we don't get any old containers
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
 
-	d2ContainerCount := len(d2.ActiveContainers(c))
+	d2ContainerCount := len(d2.ActiveContainers(testutil.GetContext(c), c))
 
 	// set d2 to paused, scale service up, only d1 gets new tasks
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityPause
 	})
 
 	instances = 4
-	d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances-d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
+	d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances-d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
 }

+ 142 - 129
integration-cli/docker_api_swarm_service_test.go

@@ -3,7 +3,6 @@
 package main
 
 import (
-	"context"
 	"fmt"
 	"strconv"
 	"strings"
@@ -16,6 +15,7 @@ import (
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"golang.org/x/sys/unix"
 	"gotest.tools/v3/assert"
@@ -33,20 +33,21 @@ func setPortConfig(portConfig []swarm.PortConfig) testdaemon.ServiceConstructor
 }
 
 func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	// Create a service with a port mapping of 8080:8081.
 	portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}}
-	serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig))
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	serviceID := d.CreateService(ctx, c, simpleTestService, setInstances(1), setPortConfig(portConfig))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// Update the service: changed the port mapping from 8080:8081 to 8082:8083.
 	updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}}
-	remoteService := d.GetService(c, serviceID)
-	d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig))
+	remoteService := d.GetService(ctx, c, serviceID)
+	d.UpdateService(ctx, c, remoteService, setPortConfig(updatedPortConfig))
 
 	// Inspect the service and verify port mapping.
-	updatedService := d.GetService(c, serviceID)
+	updatedService := d.GetService(ctx, c, serviceID)
 	assert.Assert(c, updatedService.Spec.EndpointSpec != nil)
 	assert.Equal(c, len(updatedService.Spec.EndpointSpec.Ports), 1)
 	assert.Equal(c, updatedService.Spec.EndpointSpec.Ports[0].TargetPort, uint32(8083))
@@ -54,19 +55,21 @@ func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
-	services := d.ListServices(c)
+	services := d.ListServices(ctx, c)
 	assert.Assert(c, services != nil)
 	assert.Assert(c, len(services) == 0, "services: %#v", services)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	id := d.CreateService(c, simpleTestService, setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	client := d.NewClientT(c)
 	defer client.Close()
@@ -74,79 +77,82 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *testing.T) {
 	options := types.ServiceInspectOptions{InsertDefaults: true}
 
 	// insertDefaults inserts UpdateConfig when service is fetched by ID
-	resp, _, err := client.ServiceInspectWithRaw(context.Background(), id, options)
+	resp, _, err := client.ServiceInspectWithRaw(ctx, id, options)
 	out := fmt.Sprintf("%+v", resp)
 	assert.NilError(c, err)
 	assert.Assert(c, strings.Contains(out, "UpdateConfig"))
 
 	// insertDefaults inserts UpdateConfig when service is fetched by ID
-	resp, _, err = client.ServiceInspectWithRaw(context.Background(), "top", options)
+	resp, _, err = client.ServiceInspectWithRaw(ctx, "top", options)
 	out = fmt.Sprintf("%+v", resp)
 	assert.NilError(c, err)
 	assert.Assert(c, strings.Contains(out, "UpdateConfig"))
 
-	service := d.GetService(c, id)
+	service := d.GetService(ctx, c, id)
 	instances = 5
-	d.UpdateService(c, service, setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	d.UpdateService(ctx, c, service, setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d.RemoveService(c, service.ID)
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	d.RemoveService(ctx, c, service.ID)
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
-	d3 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
+	d3 := s.AddDaemon(ctx, c, true, false)
 
 	time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks
 
 	instances := 9
-	id := d1.CreateService(c, simpleTestService, setInstances(instances))
+	id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount(ctx), checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// reconciliation on d2 node down
 	d2.Stop(c)
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// test downscaling
 	instances = 5
-	d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	d1.UpdateService(ctx, c, d1.GetService(ctx, c, id), setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
-	d3 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
+	d3 := s.AddDaemon(ctx, c, true, false)
 
-	d1.CreateService(c, simpleTestService, setGlobalMode)
+	d1.CreateService(ctx, c, simpleTestService, setGlobalMode)
 
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d4 := s.AddDaemon(c, true, false)
-	d5 := s.AddDaemon(c, true, false)
+	d4 := s.AddDaemon(ctx, c, true, false)
+	d5 := s.AddDaemon(ctx, c, true, false)
 
-	poll.WaitOn(c, pollCheck(c, d4.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d5.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d4.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d5.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	const nodeCount = 3
 	var daemons [nodeCount]*daemon.Daemon
 	for i := 0; i < nodeCount; i++ {
-		daemons[i] = s.AddDaemon(c, true, i == 0)
+		daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
 	}
 	// wait for nodes ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
 
 	// service image at start
 	image1 := "busybox:latest"
@@ -163,23 +169,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
 	instances := 5
 	parallelism := 2
 	rollbackParallelism := 3
-	id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
+	id := daemons[0].CreateService(ctx, c, serviceForUpdate, setInstances(instances))
 
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// issue service update
-	service := daemons[0].GetService(c, id)
-	daemons[0].UpdateService(c, service, setImage(image2))
+	service := daemons[0].GetService(ctx, c, id)
+	daemons[0].UpdateService(ctx, c, service, setImage(image2))
 
 	// first batch
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 2nd batch
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 3nd batch
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// Roll back to the previous version. This uses the CLI because
 	// rollback used to be a client-side operation.
@@ -187,14 +193,15 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
 	assert.NilError(c, err, out)
 
 	// first batch
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 2nd batch
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	// service image at start
 	image1 := "busybox:latest"
@@ -213,12 +220,12 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
 	instances := 5
 	parallelism := 2
 	rollbackParallelism := 3
-	id := d.CreateService(c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst))
+	id := d.CreateService(ctx, c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst))
 
 	checkStartingTasks := func(expected int) []swarm.Task {
 		var startingTasks []swarm.Task
 		poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-			tasks := d.GetServiceTasks(c, id)
+			tasks := d.GetServiceTasks(ctx, c, id)
 			startingTasks = nil
 			for _, t := range tasks {
 				if t.Status.State == swarm.TaskStateStarting {
@@ -239,47 +246,47 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
 	}
 
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// issue service update
-	service := d.GetService(c, id)
-	d.UpdateService(c, service, setImage(image2))
+	service := d.GetService(ctx, c, id)
+	d.UpdateService(ctx, c, service, setImage(image2))
 
 	// first batch
 
 	// The old tasks should be running, and the new ones should be starting.
 	startingTasks := checkStartingTasks(parallelism)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// make it healthy
 	makeTasksHealthy(startingTasks)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 2nd batch
 
 	// The old tasks should be running, and the new ones should be starting.
 	startingTasks = checkStartingTasks(parallelism)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// make it healthy
 	makeTasksHealthy(startingTasks)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 3nd batch
 
 	// The old tasks should be running, and the new ones should be starting.
 	startingTasks = checkStartingTasks(1)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// make it healthy
 	makeTasksHealthy(startingTasks)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// Roll back to the previous version. This uses the CLI because
 	// rollback is a client-side operation.
@@ -287,20 +294,21 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
 	assert.NilError(c, err, out)
 
 	// first batch
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// 2nd batch
-	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	const nodeCount = 3
 	var daemons [nodeCount]*daemon.Daemon
 	for i := 0; i < nodeCount; i++ {
-		daemons[i] = s.AddDaemon(c, true, i == 0)
+		daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
 	}
 	// wait for nodes ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
 
 	// service image at start
 	image1 := "busybox:latest"
@@ -309,18 +317,18 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
 
 	// create service
 	instances := 5
-	id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
+	id := daemons[0].CreateService(ctx, c, serviceForUpdate, setInstances(instances))
 
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// issue service update
-	service := daemons[0].GetService(c, id)
-	daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1))
+	service := daemons[0].GetService(ctx, c, id)
+	daemons[0].UpdateService(ctx, c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1))
 
 	// should update 2 tasks and then pause
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceUpdateState(id), checker.Equals(swarm.UpdateStatePaused)), poll.WithTimeout(defaultReconciliationTimeout))
-	v, _ := daemons[0].CheckServiceRunningTasks(id)(c)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceUpdateState(ctx, id), checker.Equals(swarm.UpdateStatePaused)), poll.WithTimeout(defaultReconciliationTimeout))
+	v, _ := daemons[0].CheckServiceRunningTasks(ctx, id)(c)
 	assert.Assert(c, v == instances-2)
 
 	// Roll back to the previous version. This uses the CLI because
@@ -328,80 +336,82 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
 	out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id)
 	assert.NilError(c, err, out)
 
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	const nodeCount = 3
 	var daemons [nodeCount]*daemon.Daemon
 	for i := 0; i < nodeCount; i++ {
-		daemons[i] = s.AddDaemon(c, true, i == 0)
+		daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
 	}
 	// wait for nodes ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
 
 	// create service
 	constraints := []string{"node.role==worker"}
 	instances := 3
-	id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id := daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 	// validate tasks are running on worker nodes
-	tasks := daemons[0].GetServiceTasks(c, id)
+	tasks := daemons[0].GetServiceTasks(ctx, c, id)
 	for _, task := range tasks {
-		node := daemons[0].GetNode(c, task.NodeID)
+		node := daemons[0].GetNode(ctx, c, task.NodeID)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleWorker)
 	}
 	// remove service
-	daemons[0].RemoveService(c, id)
+	daemons[0].RemoveService(ctx, c, id)
 
 	// create service
 	constraints = []string{"node.role!=worker"}
-	id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	tasks = daemons[0].GetServiceTasks(c, id)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	// validate tasks are running on manager nodes
 	for _, task := range tasks {
-		node := daemons[0].GetNode(c, task.NodeID)
+		node := daemons[0].GetNode(ctx, c, task.NodeID)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleManager)
 	}
 	// remove service
-	daemons[0].RemoveService(c, id)
+	daemons[0].RemoveService(ctx, c, id)
 
 	// create service
 	constraints = []string{"node.role==nosuchrole"}
-	id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks created
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 	// let scheduler try
 	time.Sleep(250 * time.Millisecond)
 	// validate tasks are not assigned to any node
-	tasks = daemons[0].GetServiceTasks(c, id)
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	for _, task := range tasks {
 		assert.Equal(c, task.NodeID, "")
 	}
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	const nodeCount = 3
 	var daemons [nodeCount]*daemon.Daemon
 	for i := 0; i < nodeCount; i++ {
-		daemons[i] = s.AddDaemon(c, true, i == 0)
+		daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
 	}
 	// wait for nodes ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
-	nodes := daemons[0].ListNodes(c)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
+	nodes := daemons[0].ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), nodeCount)
 
 	// add labels to nodes
-	daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
+	daemons[0].UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
 		n.Spec.Annotations.Labels = map[string]string{
 			"security": "high",
 		}
 	})
 	for i := 1; i < nodeCount; i++ {
-		daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) {
+		daemons[0].UpdateNode(ctx, c, nodes[i].ID, func(n *swarm.Node) {
 			n.Spec.Annotations.Labels = map[string]string{
 				"security": "low",
 			}
@@ -411,92 +421,94 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
 	// create service
 	instances := 3
 	constraints := []string{"node.labels.security==high"}
-	id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id := daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	tasks := daemons[0].GetServiceTasks(c, id)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	tasks := daemons[0].GetServiceTasks(ctx, c, id)
 	// validate all tasks are running on nodes[0]
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == nodes[0].ID)
 	}
 	// remove service
-	daemons[0].RemoveService(c, id)
+	daemons[0].RemoveService(ctx, c, id)
 
 	// create service
 	constraints = []string{"node.labels.security!=high"}
-	id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	tasks = daemons[0].GetServiceTasks(c, id)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	// validate all tasks are NOT running on nodes[0]
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID != nodes[0].ID)
 	}
 	// remove service
-	daemons[0].RemoveService(c, id)
+	daemons[0].RemoveService(ctx, c, id)
 
 	constraints = []string{"node.labels.security==medium"}
-	id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks created
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 	// let scheduler try
 	time.Sleep(250 * time.Millisecond)
-	tasks = daemons[0].GetServiceTasks(c, id)
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	// validate tasks are not assigned
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == "")
 	}
 	// remove service
-	daemons[0].RemoveService(c, id)
+	daemons[0].RemoveService(ctx, c, id)
 
 	// multiple constraints
 	constraints = []string{
 		"node.labels.security==high",
 		fmt.Sprintf("node.id==%s", nodes[1].ID),
 	}
-	id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
+	id = daemons[0].CreateService(ctx, c, simpleTestService, setConstraints(constraints), setInstances(instances))
 	// wait for tasks created
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 	// let scheduler try
 	time.Sleep(250 * time.Millisecond)
-	tasks = daemons[0].GetServiceTasks(c, id)
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	// validate tasks are not assigned
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == "")
 	}
 	// make nodes[1] fulfills the constraints
-	daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) {
+	daemons[0].UpdateNode(ctx, c, nodes[1].ID, func(n *swarm.Node) {
 		n.Spec.Annotations.Labels = map[string]string{
 			"security": "high",
 		}
 	})
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	tasks = daemons[0].GetServiceTasks(c, id)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	tasks = daemons[0].GetServiceTasks(ctx, c, id)
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == nodes[1].ID)
 	}
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
+	ctx := testutil.GetContext(c)
+
 	const nodeCount = 3
 	var daemons [nodeCount]*daemon.Daemon
 	for i := 0; i < nodeCount; i++ {
-		daemons[i] = s.AddDaemon(c, true, i == 0)
+		daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
 	}
 	// wait for nodes ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
-	nodes := daemons[0].ListNodes(c)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount(ctx), checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
+	nodes := daemons[0].ListNodes(ctx, c)
 	assert.Equal(c, len(nodes), nodeCount)
 
 	// add labels to nodes
-	daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
+	daemons[0].UpdateNode(ctx, c, nodes[0].ID, func(n *swarm.Node) {
 		n.Spec.Annotations.Labels = map[string]string{
 			"rack": "a",
 		}
 	})
 	for i := 1; i < nodeCount; i++ {
-		daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) {
+		daemons[0].UpdateNode(ctx, c, nodes[i].ID, func(n *swarm.Node) {
 			n.Spec.Annotations.Labels = map[string]string{
 				"rack": "b",
 			}
@@ -506,10 +518,10 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
 	// create service
 	instances := 4
 	prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}}
-	id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances))
+	id := daemons[0].CreateService(ctx, c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances))
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	tasks := daemons[0].GetServiceTasks(c, id)
+	poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(ctx, id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	tasks := daemons[0].GetServiceTasks(ctx, c, id)
 	// validate all tasks are running on nodes[0]
 	tasksOnNode := make(map[string]int)
 	for _, task := range tasks {
@@ -523,22 +535,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
 func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
 	testRequires(c, DaemonIsLinux)
+	ctx := testutil.GetContext(c)
 
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, true)
-	d3 := s.AddDaemon(c, true, false)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, true)
+	d3 := s.AddDaemon(ctx, c, true, false)
 
 	time.Sleep(1 * time.Second) // make sure all daemons are ready to accept
 
 	instances := 9
-	d1.CreateService(c, simpleTestService, setInstances(instances))
+	d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	getContainers := func() map[string]*daemon.Daemon {
 		m := make(map[string]*daemon.Daemon)
 		for _, d := range []*daemon.Daemon{d1, d2, d3} {
-			for _, id := range d.ActiveContainers(c) {
+			for _, id := range d.ActiveContainers(testutil.GetContext(c), c) {
 				m[id] = d
 			}
 		}
@@ -555,7 +568,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
 	_, err := containers[toRemove].Cmd("stop", toRemove)
 	assert.NilError(c, err)
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	containers2 := getContainers()
 	assert.Assert(c, len(containers2) == instances)
@@ -581,7 +594,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
 
 	time.Sleep(time.Second) // give some time to handle the signal
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	containers2 = getContainers()
 	assert.Assert(c, len(containers2) == instances)

+ 187 - 161
integration-cli/docker_api_swarm_test.go

@@ -24,6 +24,7 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/testutil/request"
 	"github.com/moby/swarmkit/v2/ca"
@@ -35,32 +36,33 @@ import (
 var defaultReconciliationTimeout = 30 * time.Second
 
 func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	// todo: should find a better way to verify that components are running than /info
-	d1 := s.AddDaemon(c, true, true)
-	info := d1.SwarmInfo(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	info := d1.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, true)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 	assert.Equal(c, info.Cluster.RootRotationInProgress, false)
 
-	d2 := s.AddDaemon(c, true, false)
-	info = d2.SwarmInfo(c)
+	d2 := s.AddDaemon(ctx, c, true, false)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, false)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 
 	// Leaving cluster
-	assert.NilError(c, d2.SwarmLeave(c, false))
+	assert.NilError(c, d2.SwarmLeave(ctx, c, false))
 
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, false)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
-	d2.SwarmJoin(c, swarm.JoinRequest{
+	d2.SwarmJoin(ctx, c, swarm.JoinRequest{
 		ListenAddr:  d1.SwarmListenAddr(),
 		JoinToken:   d1.JoinTokens(c).Worker,
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
 
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, false)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 
@@ -71,96 +73,98 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
 	d1.StartNode(c)
 	d2.StartNode(c)
 
-	info = d1.SwarmInfo(c)
+	info = d1.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, true)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, false)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) {
-	d1 := s.AddDaemon(c, false, false)
-	d1.SwarmInit(c, swarm.InitRequest{})
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, false, false)
+	d1.SwarmInit(ctx, c, swarm.InitRequest{})
 
 	// todo: error message differs depending if some components of token are valid
 
-	d2 := s.AddDaemon(c, false, false)
+	d2 := s.AddDaemon(ctx, c, false, false)
 	c2 := d2.NewClientT(c)
-	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err := c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
 	assert.ErrorContains(c, err, "join token is necessary")
-	info := d2.SwarmInfo(c)
+	info := d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
-	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		JoinToken:   "foobaz",
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
 	assert.ErrorContains(c, err, "invalid join token")
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
 	workerToken := d1.JoinTokens(c).Worker
 
-	d2.SwarmJoin(c, swarm.JoinRequest{
+	d2.SwarmJoin(ctx, c, swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		JoinToken:   workerToken,
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
-	assert.NilError(c, d2.SwarmLeave(c, false))
-	info = d2.SwarmInfo(c)
+	assert.NilError(c, d2.SwarmLeave(ctx, c, false))
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
 	// change tokens
 	d1.RotateTokens(c)
 
-	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		JoinToken:   workerToken,
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
 	assert.ErrorContains(c, err, "join token is necessary")
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
 	workerToken = d1.JoinTokens(c).Worker
 
-	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
-	info = d2.SwarmInfo(c)
+	d2.SwarmJoin(ctx, c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
-	assert.NilError(c, d2.SwarmLeave(c, false))
-	info = d2.SwarmInfo(c)
+	assert.NilError(c, d2.SwarmLeave(ctx, c, false))
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
 	// change spec, don't change tokens
 	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
 
-	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
 	})
 	assert.ErrorContains(c, err, "join token is necessary")
-	info = d2.SwarmInfo(c)
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 
-	d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
-	info = d2.SwarmInfo(c)
+	d2.SwarmJoin(ctx, c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
-	assert.NilError(c, d2.SwarmLeave(c, false))
-	info = d2.SwarmInfo(c)
+	assert.NilError(c, d2.SwarmLeave(ctx, c, false))
+	info = d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 }
 
 func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
-	d1 := s.AddDaemon(c, false, false)
-	d1.SwarmInit(c, swarm.InitRequest{})
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, false, false)
+	d1.SwarmInit(ctx, c, swarm.InitRequest{})
 	d1.UpdateSwarm(c, func(s *swarm.Spec) {
 		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
 			{
@@ -174,20 +178,21 @@ func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
 			},
 		}
 	})
-	info := d1.SwarmInfo(c)
+	info := d1.SwarmInfo(ctx, c)
 	assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
 	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
 	assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, false, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, false, false)
 	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
 	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
 	replacementToken := strings.Join(splitToken, "-")
 	c2 := d2.NewClientT(c)
-	err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err := c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		JoinToken:   replacementToken,
 		RemoteAddrs: []string{d1.SwarmListenAddr()},
@@ -196,25 +201,26 @@ func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
-	d1 := s.AddDaemon(c, false, false)
-	d1.SwarmInit(c, swarm.InitRequest{})
-	d2 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, false, false)
+	d1.SwarmInit(ctx, c, swarm.InitRequest{})
+	d2 := s.AddDaemon(ctx, c, true, false)
 
-	info := d2.SwarmInfo(c)
+	info := d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, false)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Role = swarm.NodeRoleManager
 	})
 
-	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Role = swarm.NodeRoleWorker
 	})
 
-	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// Wait for the role to change to worker in the cert. This is partially
 	// done because it's something worth testing in its own right, and
@@ -235,10 +241,10 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
 	}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// Demoting last node should fail
-	node := d1.GetNode(c, d1.NodeID())
+	node := d1.GetNode(ctx, c, d1.NodeID())
 	node.Spec.Role = swarm.NodeRoleWorker
 	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
-	res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
+	res, body, err := request.Post(testutil.GetContext(c), url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
 	assert.NilError(c, err)
 	b, err := request.ReadBody(body)
 	assert.NilError(c, err)
@@ -253,44 +259,46 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
 	if !strings.Contains(string(b), "last manager of the swarm") {
 		assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum"))
 	}
-	info = d1.SwarmInfo(c)
+	info = d1.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 	assert.Equal(c, info.ControlAvailable, true)
 
 	// Promote already demoted node
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Role = swarm.NodeRoleManager
 	})
 
-	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable(ctx), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	// add three managers, one of these is leader
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, true)
-	d3 := s.AddDaemon(c, true, true)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, true)
+	d3 := s.AddDaemon(ctx, c, true, true)
 
 	// start a service by hitting each of the 3 managers
-	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
+	d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
 		s.Spec.Name = "test1"
 	})
-	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
+	d2.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
 		s.Spec.Name = "test2"
 	})
-	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
+	d3.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
 		s.Spec.Name = "test3"
 	})
 
 	// 3 services should be started now, because the requests were proxied to leader
 	// query each node and make sure it returns 3 services
 	for _, d := range []*daemon.Daemon{d1, d2, d3} {
-		services := d.ListServices(c)
+		services := d.ListServices(ctx, c)
 		assert.Equal(c, len(services), 3)
 	}
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	if runtime.GOARCH == "s390x" {
 		c.Skip("Disabled on s390x")
 	}
@@ -299,14 +307,14 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
 	}
 
 	// Create 3 nodes
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, true)
-	d3 := s.AddDaemon(c, true, true)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, true)
+	d3 := s.AddDaemon(ctx, c, true, true)
 
 	// assert that the first node we made is the leader, and the other two are followers
-	assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
-	assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
-	assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false)
+	assert.Equal(c, d1.GetNode(ctx, c, d1.NodeID()).ManagerStatus.Leader, true)
+	assert.Equal(c, d1.GetNode(ctx, c, d2.NodeID()).ManagerStatus.Leader, false)
+	assert.Equal(c, d1.GetNode(ctx, c, d3.NodeID()).ManagerStatus.Leader, false)
 
 	d1.Stop(c)
 
@@ -321,7 +329,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
 			leader = nil
 			followers = nil
 			for _, d := range nodes {
-				n := d.GetNode(c, d.NodeID(), func(err error) bool {
+				n := d.GetNode(ctx, c, d.NodeID(), func(err error) bool {
 					if strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
 						lastErr = err
 						return true
@@ -372,6 +380,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	if runtime.GOARCH == "s390x" {
 		c.Skip("Disabled on s390x")
 	}
@@ -379,18 +388,18 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
 		c.Skip("Disabled on  ppc64le")
 	}
 
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, true)
-	d3 := s.AddDaemon(c, true, true)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, true)
+	d3 := s.AddDaemon(ctx, c, true, true)
 
-	d1.CreateService(c, simpleTestService)
+	d1.CreateService(ctx, c, simpleTestService)
 
 	d2.Stop(c)
 
 	// make sure there is a leader
-	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckLeader(ctx), checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
+	d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
 		s.Spec.Name = "top1"
 	})
 
@@ -404,36 +413,37 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
 
 	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
+		_, err := cli.ServiceCreate(testutil.GetContext(c), service.Spec, types.ServiceCreateOptions{})
 		return err.Error(), ""
 	}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2))
 
 	d2.StartNode(c)
 
 	// make sure there is a leader
-	poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckLeader(ctx), checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
+	d1.CreateService(ctx, c, simpleTestService, func(s *swarm.Service) {
 		s.Spec.Name = "top3"
 	})
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	d.CreateService(c, simpleTestService, setInstances(instances))
+	d.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
 	id, err := d.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err, id)
 	id = strings.TrimSpace(id)
 
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	assert.ErrorContains(c, d.SwarmLeave(c, false), "")
-	assert.NilError(c, d.SwarmLeave(c, true))
+	assert.ErrorContains(c, d.SwarmLeave(ctx, c, false), "")
+	assert.NilError(c, d.SwarmLeave(ctx, c, true))
 
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	id2, err := d.Cmd("ps", "-q")
 	assert.NilError(c, err, id2)
@@ -443,26 +453,28 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
 // #23629
 func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
 	testRequires(c, Network)
-	s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, false, false)
+
+	ctx := testutil.GetContext(c)
+	s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, false, false)
 
 	id, err := d2.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err, id)
 	id = strings.TrimSpace(id)
 
 	c2 := d2.NewClientT(c)
-	err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err = c2.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d2.SwarmListenAddr(),
 		RemoteAddrs: []string{"123.123.123.123:1234"},
 	})
 	assert.ErrorContains(c, err, "Timeout was reached")
 
-	info := d2.SwarmInfo(c)
+	info := d2.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending)
 
-	assert.NilError(c, d2.SwarmLeave(c, true))
+	assert.NilError(c, d2.SwarmLeave(ctx, c, true))
 
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	id2, err := d2.Cmd("ps", "-q")
 	assert.NilError(c, err, id2)
@@ -472,61 +484,65 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
 // #23705
 func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) {
 	testRequires(c, Network)
-	d := s.AddDaemon(c, false, false)
+
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, false, false)
 	client := d.NewClientT(c)
-	err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
+	err := client.SwarmJoin(testutil.GetContext(c), swarm.JoinRequest{
 		ListenAddr:  d.SwarmListenAddr(),
 		RemoteAddrs: []string{"123.123.123.123:1234"},
 	})
 	assert.ErrorContains(c, err, "Timeout was reached")
 
-	poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	d.RestartNode(c)
 
-	info := d.SwarmInfo(c)
+	info := d.SwarmInfo(ctx, c)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	id := d1.CreateService(c, simpleTestService, setInstances(instances))
+	id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
-	d1.GetService(c, id)
+	d1.GetService(ctx, c, id)
 	d1.RestartNode(c)
-	d1.GetService(c, id)
+	d1.GetService(ctx, c, id)
 
-	d2 := s.AddDaemon(c, true, true)
-	d2.GetService(c, id)
+	d2 := s.AddDaemon(ctx, c, true, true)
+	d2.GetService(ctx, c, id)
 	d2.RestartNode(c)
-	d2.GetService(c, id)
+	d2.GetService(ctx, c, id)
 
-	d3 := s.AddDaemon(c, true, true)
-	d3.GetService(c, id)
+	d3 := s.AddDaemon(ctx, c, true, true)
+	d3.GetService(ctx, c, id)
 	d3.RestartNode(c)
-	d3.GetService(c, id)
+	d3.GetService(ctx, c, id)
 
 	err := d3.Kill()
 	assert.NilError(c, err)
 	time.Sleep(1 * time.Second) // time to handle signal
 	d3.StartNode(c)
-	d3.GetService(c, id)
+	d3.GetService(ctx, c, id)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	id := d.CreateService(c, simpleTestService, setInstances(instances))
+	id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
 
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	containers := d.ActiveContainers(c)
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	containers := d.ActiveContainers(ctx, c)
 	instances = 4
-	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	containers2 := d.ActiveContainers(c)
+	d.UpdateService(ctx, c, d.GetService(ctx, c, id), setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	containers2 := d.ActiveContainers(ctx, c)
 
 loop0:
 	for _, c1 := range containers {
@@ -540,11 +556,12 @@ loop0:
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
-	d := s.AddDaemon(c, false, false)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, false, false)
 	req := swarm.InitRequest{
 		ListenAddr: "",
 	}
-	res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
+	res, _, err := request.Post(testutil.GetContext(c), "/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
 
@@ -552,44 +569,45 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
 		ListenAddr:  "0.0.0.0:2377",
 		RemoteAddrs: []string{""},
 	}
-	res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
+	res, _, err = request.Post(testutil.GetContext(c), "/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	id := d1.CreateService(c, simpleTestService, setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	id := d1.CreateService(ctx, c, simpleTestService, setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// drain d2, all containers should move to d1
-	d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
+	d1.UpdateNode(ctx, c, d2.NodeID(), func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityDrain
 	})
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
-	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	d2.Stop(c)
 
-	d1.SwarmInit(c, swarm.InitRequest{
+	d1.SwarmInit(ctx, c, swarm.InitRequest{
 		ForceNewCluster: true,
 		Spec:            swarm.Spec{},
 	})
 
-	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	d3 := s.AddDaemon(c, true, true)
-	info := d3.SwarmInfo(c)
+	d3 := s.AddDaemon(ctx, c, true, true)
+	info := d3.SwarmInfo(ctx, c)
 	assert.Equal(c, info.ControlAvailable, true)
 	assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 
 	instances = 4
-	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
+	d3.UpdateService(ctx, c, d3.GetService(ctx, c, id), setInstances(instances))
 
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d3.CheckActiveContainerCount(ctx)), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 func simpleTestService(s *swarm.Service) {
@@ -731,13 +749,14 @@ func setGlobalMode(s *swarm.Service) {
 func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) {
 	var totalMCount, totalWCount int
 
+	ctx := testutil.GetContext(c)
 	for _, d := range cl {
 		var info swarm.Info
 
 		// check info in a poll.WaitOn(), because if the cluster doesn't have a leader, `info` will return an error
 		checkInfo := func(c *testing.T) (interface{}, string) {
 			client := d.NewClientT(c)
-			daemonInfo, err := client.Info(context.Background())
+			daemonInfo, err := client.Info(ctx)
 			info = daemonInfo.Swarm
 			return err, "cluster not ready in time"
 		}
@@ -751,12 +770,12 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
 		totalMCount++
 		var mCount, wCount int
 
-		for _, n := range d.ListNodes(c) {
+		for _, n := range d.ListNodes(ctx, c) {
 			waitReady := func(c *testing.T) (interface{}, string) {
 				if n.Status.State == swarm.NodeStateReady {
 					return true, ""
 				}
-				nn := d.GetNode(c, n.ID)
+				nn := d.GetNode(ctx, c, n.ID)
 				n = *nn
 				return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID())
 			}
@@ -766,7 +785,7 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
 				if n.Spec.Availability == swarm.NodeAvailabilityActive {
 					return true, ""
 				}
-				nn := d.GetNode(c, n.ID)
+				nn := d.GetNode(ctx, c, n.ID)
 				n = *nn
 				return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID())
 			}
@@ -792,20 +811,21 @@ func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerC
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	mCount, wCount := 5, 1
 
 	var nodes []*daemon.Daemon
 	for i := 0; i < mCount; i++ {
-		manager := s.AddDaemon(c, true, true)
-		info := manager.SwarmInfo(c)
+		manager := s.AddDaemon(ctx, c, true, true)
+		info := manager.SwarmInfo(ctx, c)
 		assert.Equal(c, info.ControlAvailable, true)
 		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 		nodes = append(nodes, manager)
 	}
 
 	for i := 0; i < wCount; i++ {
-		worker := s.AddDaemon(c, true, false)
-		info := worker.SwarmInfo(c)
+		worker := s.AddDaemon(ctx, c, true, false)
+		info := worker.SwarmInfo(ctx, c)
 		assert.Equal(c, info.ControlAvailable, false)
 		assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
 		nodes = append(nodes, worker)
@@ -857,38 +877,41 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	instances := 2
-	id := d.CreateService(c, simpleTestService, setInstances(instances))
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	id := d.CreateService(ctx, c, simpleTestService, setInstances(instances))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	service := d.GetService(c, id)
+	service := d.GetService(ctx, c, id)
 	instances = 5
 
 	setInstances(instances)(service)
 	cli := d.NewClientT(c)
 	defer cli.Close()
-	_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	_, err := cli.ServiceUpdate(ctx, service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
 	assert.NilError(c, err)
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 }
 
 // Unlocking an unlocked swarm results in an error
 func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
 	assert.ErrorContains(c, err, "swarm is not locked")
 }
 
 // #29885
 func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
 	assert.NilError(c, err)
 	defer ln.Close()
-	d := s.AddDaemon(c, false, false)
+	d := s.AddDaemon(ctx, c, false, false)
 	client := d.NewClientT(c)
-	_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
+	_, err = client.SwarmInit(testutil.GetContext(c), swarm.InitRequest{
 		ListenAddr: d.SwarmListenAddr(),
 	})
 	assert.ErrorContains(c, err, "address already in use")
@@ -898,7 +921,8 @@ func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
 // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
 // This test makes sure the fixes correctly output scopes instead.
 func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	cli := d.NewClientT(c)
 	defer cli.Close()
 
@@ -909,19 +933,19 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
 
 	networkCreate.Driver = "bridge"
 
-	n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
+	n1, err := cli.NetworkCreate(testutil.GetContext(c), name, networkCreate)
 	assert.NilError(c, err)
 
 	networkCreate.Driver = "overlay"
 
-	n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
+	n2, err := cli.NetworkCreate(testutil.GetContext(c), name, networkCreate)
 	assert.NilError(c, err)
 
-	r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
+	r1, err := cli.NetworkInspect(testutil.GetContext(c), n1.ID, types.NetworkInspectOptions{})
 	assert.NilError(c, err)
 	assert.Equal(c, r1.Scope, "local")
 
-	r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
+	r2, err := cli.NetworkInspect(testutil.GetContext(c), n2.ID, types.NetworkInspectOptions{})
 	assert.NilError(c, err)
 	assert.Equal(c, r2.Scope, "swarm")
 }
@@ -930,13 +954,14 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
 func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
 	// Issue #36386 can be a independent one, which is worth further investigation.
 	c.Skip("Root cause of Issue #36386 is needed")
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
 	assert.NilError(c, err, out)
 
 	instances := 1
-	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
+	d.CreateService(ctx, c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
 		if s.Spec.TaskTemplate.ContainerSpec == nil {
 			s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
 		}
@@ -946,19 +971,20 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
 		}
 	})
 
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
 
-	containers := d.ActiveContainers(c)
+	containers := d.ActiveContainers(testutil.GetContext(c), c)
 
 	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
 	assert.NilError(c, err, out)
 }
 
 func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
-	m := s.AddDaemon(c, true, true)
-	w := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	m := s.AddDaemon(ctx, c, true, true)
+	w := s.AddDaemon(ctx, c, true, false)
 
-	info := m.SwarmInfo(c)
+	info := m.SwarmInfo(ctx, c)
 
 	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
 
@@ -984,7 +1010,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
 		// poll to make sure update succeeds
 		var clusterTLSInfo swarm.TLSInfo
 		for j := 0; j < 18; j++ {
-			info := m.SwarmInfo(c)
+			info := m.SwarmInfo(ctx, c)
 
 			// the desired CA cert and key is always redacted
 			assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
@@ -1006,8 +1032,8 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
 		// could take another second or two for the nodes to trust the new roots after they've all gotten
 		// new TLS certificates
 		for j := 0; j < 18; j++ {
-			mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
-			wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
+			mInfo := m.GetNode(ctx, c, m.NodeID()).Description.TLSInfo
+			wInfo := m.GetNode(ctx, c, w.NodeID()).Description.TLSInfo
 
 			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
 				break
@@ -1017,17 +1043,17 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
 			time.Sleep(250 * time.Millisecond)
 		}
 
-		assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
-		assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
+		assert.DeepEqual(c, m.GetNode(ctx, c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
+		assert.DeepEqual(c, m.GetNode(ctx, c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
 		currentTrustRoot = clusterTLSInfo.TrustRoot
 	}
 }
 
 func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "test-scoped-network"
-	ctx := context.Background()
 	apiclient := d.NewClientT(c)
 
 	resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})

+ 11 - 9
integration-cli/docker_api_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"net/http"
@@ -11,6 +12,7 @@ import (
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types/versions"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
@@ -19,8 +21,8 @@ type DockerAPISuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerAPISuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerAPISuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerAPISuite) OnTimeout(c *testing.T) {
@@ -28,13 +30,13 @@ func (s *DockerAPISuite) OnTimeout(c *testing.T) {
 }
 
 func (s *DockerAPISuite) TestAPIOptionsRoute(c *testing.T) {
-	resp, _, err := request.Do("/", request.Method(http.MethodOptions))
+	resp, _, err := request.Do(testutil.GetContext(c), "/", request.Method(http.MethodOptions))
 	assert.NilError(c, err)
 	assert.Equal(c, resp.StatusCode, http.StatusOK)
 }
 
 func (s *DockerAPISuite) TestAPIGetEnabledCORS(c *testing.T) {
-	res, body, err := request.Get("/version")
+	res, body, err := request.Get(testutil.GetContext(c), "/version")
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusOK)
 	body.Close()
@@ -59,7 +61,7 @@ func (s *DockerAPISuite) TestAPIClientVersionOldNotSupported(c *testing.T) {
 	v[1] = strconv.Itoa(vMinInt)
 	version := strings.Join(v, ".")
 
-	resp, body, err := request.Get("/v" + version + "/version")
+	resp, body, err := request.Get(testutil.GetContext(c), "/v"+version+"/version")
 	assert.NilError(c, err)
 	defer body.Close()
 	assert.Equal(c, resp.StatusCode, http.StatusBadRequest)
@@ -70,7 +72,7 @@ func (s *DockerAPISuite) TestAPIClientVersionOldNotSupported(c *testing.T) {
 }
 
 func (s *DockerAPISuite) TestAPIErrorJSON(c *testing.T) {
-	httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{}))
+	httpResp, body, err := request.Post(testutil.GetContext(c), "/containers/create", request.JSONBody(struct{}{}))
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, httpResp.StatusCode, http.StatusInternalServerError)
@@ -87,7 +89,7 @@ func (s *DockerAPISuite) TestAPIErrorPlainText(c *testing.T) {
 	// Windows requires API 1.25 or later. This test is validating a behaviour which was present
 	// in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors
 	testRequires(c, DaemonIsLinux)
-	httpResp, body, err := request.Post("/v1.23/containers/create", request.JSONBody(struct{}{}))
+	httpResp, body, err := request.Post(testutil.GetContext(c), "/v1.23/containers/create", request.JSONBody(struct{}{}))
 	assert.NilError(c, err)
 	if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
 		assert.Equal(c, httpResp.StatusCode, http.StatusInternalServerError)
@@ -102,7 +104,7 @@ func (s *DockerAPISuite) TestAPIErrorPlainText(c *testing.T) {
 
 func (s *DockerAPISuite) TestAPIErrorNotFoundJSON(c *testing.T) {
 	// 404 is a different code path to normal errors, so test separately
-	httpResp, body, err := request.Get("/notfound", request.JSON)
+	httpResp, body, err := request.Get(testutil.GetContext(c), "/notfound", request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, httpResp.StatusCode, http.StatusNotFound)
 	assert.Assert(c, strings.Contains(httpResp.Header.Get("Content-Type"), "application/json"))
@@ -112,7 +114,7 @@ func (s *DockerAPISuite) TestAPIErrorNotFoundJSON(c *testing.T) {
 }
 
 func (s *DockerAPISuite) TestAPIErrorNotFoundPlainText(c *testing.T) {
-	httpResp, body, err := request.Get("/v1.23/notfound", request.JSON)
+	httpResp, body, err := request.Get(testutil.GetContext(c), "/v1.23/notfound", request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, httpResp.StatusCode, http.StatusNotFound)
 	assert.Assert(c, strings.Contains(httpResp.Header.Get("Content-Type"), "text/plain"))

+ 3 - 2
integration-cli/docker_cli_attach_test.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"bufio"
+	"context"
 	"fmt"
 	"io"
 	"os/exec"
@@ -22,8 +23,8 @@ type DockerCLIAttachSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIAttachSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIAttachSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIAttachSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_build_test.go

@@ -3,6 +3,7 @@ package main
 import (
 	"archive/tar"
 	"bytes"
+	"context"
 	"encoding/json"
 	"fmt"
 	"os"
@@ -34,8 +35,8 @@ type DockerCLIBuildSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIBuildSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIBuildSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIBuildSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_commit_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"strings"
 	"testing"
 
@@ -14,8 +15,8 @@ type DockerCLICommitSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLICommitSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLICommitSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLICommitSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_cp_test.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"io"
 	"os"
@@ -30,8 +31,8 @@ type DockerCLICpSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLICpSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLICpSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLICpSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_create_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"os"
@@ -21,8 +22,8 @@ type DockerCLICreateSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLICreateSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLICreateSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLICreateSuite) OnTimeout(c *testing.T) {

+ 72 - 71
integration-cli/docker_cli_daemon_test.go

@@ -32,6 +32,7 @@ import (
 	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/libnetwork/iptables"
 	"github.com/docker/docker/opts"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/moby/sys/mount"
 	"golang.org/x/sys/unix"
@@ -54,7 +55,7 @@ func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	cli.Docker(
 		cli.Args("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"),
@@ -88,7 +89,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *testi
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
 		c.Fatal(err, out)
@@ -111,7 +112,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *testing.T) {
 
 // #11008
 func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top")
 	assert.NilError(c, err, "run top1: %v", out)
@@ -169,7 +170,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false")
 	assert.NilError(c, err, "run top1: %v", out)
@@ -221,7 +222,7 @@ func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *testing
 }
 
 func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
 		c.Fatalf("Could not run top: %s, %v", out, err)
@@ -239,7 +240,7 @@ func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil {
 		c.Fatalf("Could not run top: %s, %v", out, err)
@@ -288,7 +289,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *testing.T) {
 	setupV6(c)
 	defer teardownV6(c)
 
-	s.d.StartWithBusybox(c, "--ipv6")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6")
 
 	iface, err := net.InterfaceByName("docker0")
 	if err != nil {
@@ -348,7 +349,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *testing.T) {
 	// ipv6 enabled
 	deleteInterface(c, "docker0")
 
-	s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100")
 
 	out, err := s.d.Cmd("run", "-d", "--name=ipv6test", "busybox:latest", "top")
 	assert.NilError(c, err, "Could not run container: %s, %v", out, err)
@@ -375,7 +376,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *testing.T) {
 	// ipv6 enabled
 	deleteInterface(c, "docker0")
 
-	s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64")
 
 	out, err := s.d.Cmd("run", "-d", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox", "top")
 	assert.NilError(c, err, out)
@@ -391,7 +392,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6HostMode(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
 	deleteInterface(c, "docker0")
 
-	s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64")
 	out, err := s.d.Cmd("run", "-d", "--name=hostcnt", "--network=host", "busybox:latest", "top")
 	assert.NilError(c, err, "Could not run container: %s, %v", out, err)
 
@@ -467,7 +468,7 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *testing.T) {
 		cmdArgs = append(cmdArgs, "--tls=false", "--host", "tcp://"+net.JoinHostPort(l.daemon, l.port))
 	}
 
-	s.d.StartWithBusybox(c, cmdArgs...)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, cmdArgs...)
 
 	for _, l := range listeningPorts {
 		output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", l.client, l.port), "busybox", "true")
@@ -514,7 +515,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *testing.T) {
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	d.StartWithBusybox(c, "--bridge", bridgeName)
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName)
 
 	ipTablesSearchString := bridgeIPNet.String()
 	icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{
@@ -532,7 +533,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *testing.T) {
 func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *testing.T) {
 	// start with bridge none
 	d := s.d
-	d.StartWithBusybox(c, "--bridge", "none")
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", "none")
 	defer d.Restart(c)
 
 	// verify docker0 iface is not there
@@ -577,7 +578,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *testing.T) {
 	bridgeIP := "192.169.1.1/24"
 	ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
 
-	d.StartWithBusybox(c, "--bip", bridgeIP)
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIP)
 	defer d.Restart(c)
 
 	ifconfigSearchString := ip.String()
@@ -633,7 +634,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *testing.T) {
 	defer deleteInterface(c, bridgeName)
 
 	args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"}
-	d.StartWithBusybox(c, args...)
+	d.StartWithBusybox(testutil.GetContext(c), c, args...)
 	defer d.Restart(c)
 
 	for i := 0; i < 4; i++ {
@@ -658,7 +659,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *testing.T) {
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	d.StartWithBusybox(c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24")
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24")
 	defer s.d.Restart(c)
 
 	out, err := d.Cmd("run", "-d", "--name", "bb", "busybox", "top")
@@ -687,7 +688,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *testi
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	d.StartWithBusybox(c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP)
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP)
 	defer s.d.Restart(c)
 
 	out, err := d.Cmd("run", "-d", "busybox", "top")
@@ -705,7 +706,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *testing.T) {
 	bridgeIP := "192.169.1.1"
 	bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
 
-	d.StartWithBusybox(c, "--bip", bridgeIPNet)
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIPNet)
 	defer d.Restart(c)
 
 	expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP)
@@ -725,7 +726,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *testing.T) {
 	bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP)
 	gatewayIP := "192.169.1.254"
 
-	d.StartWithBusybox(c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP)
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP)
 	defer d.Restart(c)
 
 	expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP)
@@ -740,7 +741,7 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainer
 	deleteInterface(c, defaultNetworkBridge)
 
 	// Program a custom default gateway outside of the container subnet, daemon should accept it and start
-	s.d.StartWithBusybox(c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
 
 	deleteInterface(c, defaultNetworkBridge)
 	s.d.Restart(c)
@@ -756,7 +757,7 @@ func (s *DockerDaemonSuite) TestDaemonIP(c *testing.T) {
 	ipStr := "192.170.1.1/24"
 	ip, _, _ := net.ParseCIDR(ipStr)
 	args := []string{"--ip", ip.String()}
-	d.StartWithBusybox(c, args...)
+	d.StartWithBusybox(testutil.GetContext(c), c, args...)
 	defer d.Restart(c)
 
 	out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
@@ -791,7 +792,7 @@ func (s *DockerDaemonSuite) TestDaemonICCPing(c *testing.T) {
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
 	defer d.Restart(c)
 
 	result := icmd.RunCommand("iptables", "-nvL", "FORWARD")
@@ -829,7 +830,7 @@ func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *testing.T) {
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
+	d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
 	defer d.Restart(c)
 
 	result := icmd.RunCommand("iptables", "-nvL", "FORWARD")
@@ -855,7 +856,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *tes
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	s.d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--bridge", bridgeName, "--icc=false")
 	defer s.d.Restart(c)
 
 	out, err := s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
@@ -883,7 +884,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *tes
 }
 
 func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *testing.T) {
-	s.d.StartWithBusybox(c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024")
 
 	out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -u)")
 	if err != nil {
@@ -929,7 +930,7 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *testing.T) {
 
 // #11315
 func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil {
 		c.Fatal(err, out)
@@ -947,7 +948,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
 	assert.NilError(c, err, out)
@@ -985,7 +986,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline")
 	if err != nil {
@@ -1002,7 +1003,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *testing.T)
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-driver=none")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
 
 	out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
 	if err != nil {
@@ -1019,7 +1020,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-driver=none")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
 
 	out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline")
 	if err != nil {
@@ -1059,7 +1060,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-driver=none")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=none")
 
 	out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
 	assert.NilError(c, err, out)
@@ -1071,7 +1072,7 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonLoggingDriverShouldBeIgnoredForBuild(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-driver=splunk")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=splunk")
 
 	result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d),
 		build.WithDockerfile(`
@@ -1107,7 +1108,7 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat")
 	if err != nil {
@@ -1173,7 +1174,7 @@ func (s *DockerDaemonSuite) TestHTTPSRun(c *testing.T) {
 		testDaemonHTTPSAddr = "tcp://localhost:4271"
 	)
 
-	s.d.StartWithBusybox(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
 		"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr)
 
 	args := []string{
@@ -1283,7 +1284,7 @@ func pingContainers(c *testing.T, d *daemon.Daemon, expectFailure bool) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	socket := filepath.Join(s.d.Folder, "docker.sock")
 
@@ -1296,7 +1297,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *testing.T) {
 // A subsequent daemon restart should clean up said mounts.
 func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *testing.T) {
 	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
-	d.StartWithBusybox(c)
+	d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := d.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err, "Output: %s", out)
@@ -1334,7 +1335,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *test
 // os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts.
 func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *testing.T) {
 	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
-	d.StartWithBusybox(c)
+	d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := d.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err, "Output: %s", out)
@@ -1352,7 +1353,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *testing.T)
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *testing.T) {
-	s.d.StartWithBusybox(t)
+	s.d.StartWithBusybox(testutil.GetContext(t), t)
 	if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil {
 		t.Fatal(out, err)
 	}
@@ -1365,7 +1366,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *testing.T)
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 	out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top")
 	if err != nil {
 		c.Fatal(out, err)
@@ -1426,7 +1427,7 @@ func teardownV6(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top")
 	assert.NilError(c, err, out)
@@ -1449,7 +1450,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlway
 }
 
 func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-opt=max-size=1k")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-opt=max-size=1k")
 	name := "logtest"
 	out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top")
 	assert.NilError(c, err, "Output: %s, err: %v", out, err)
@@ -1465,7 +1466,7 @@ func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 	if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil {
 		c.Fatal(err, out)
 	}
@@ -1500,7 +1501,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox")
 	assert.NilError(c, err, out)
@@ -1594,7 +1595,7 @@ func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *testing.T
 	bridgeIP := "192.169.1.1"
 	bridgeRange := bridgeIP + "/30"
 
-	s.d.StartWithBusybox(c, "--bip", bridgeRange)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--bip", bridgeRange)
 	defer s.d.Restart(c)
 
 	var cont int
@@ -1642,7 +1643,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *testing.T) {
 
 // Test daemon restart with container links + auto restart
 func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	var parent1Args []string
 	var parent2Args []string
@@ -1705,7 +1706,7 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
 	cgroupParent := "test"
 	name := "cgroup-test"
 
-	s.d.StartWithBusybox(c, "--cgroup-parent", cgroupParent)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--cgroup-parent", cgroupParent)
 	defer s.d.Restart(c)
 
 	out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup")
@@ -1728,7 +1729,7 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *testing.T) {
 	testRequires(c, DaemonIsLinux) // Windows does not support links
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
 	assert.NilError(c, err, out)
@@ -1751,7 +1752,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *testing.T) {
 	testRequires(c, DaemonIsLinux) // Windows does not support links
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("create", "--name=test", "busybox")
 	assert.NilError(c, err, out)
@@ -1799,7 +1800,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *testing.T) {
 // TestDaemonRestartWithKilledRunningContainer requires live restore of running containers
 func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *testing.T) {
 	testRequires(t, DaemonIsLinux)
-	s.d.StartWithBusybox(t)
+	s.d.StartWithBusybox(testutil.GetContext(t), t)
 
 	cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top")
 	defer s.d.Stop(t)
@@ -1848,7 +1849,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *testi
 // them now, should remove the mounts.
 func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *testing.T) {
 	testRequires(c, DaemonIsLinux)
-	s.d.StartWithBusybox(c, "--live-restore")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
 
 	out, err := s.d.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err, "Output: %s", out)
@@ -1895,7 +1896,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *testing.T) {
 // TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers.
 func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *testing.T) {
 	testRequires(t, DaemonIsLinux)
-	s.d.StartWithBusybox(t, "--live-restore")
+	s.d.StartWithBusybox(testutil.GetContext(t), t, "--live-restore")
 
 	cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top")
 	defer s.d.Stop(t)
@@ -1952,7 +1953,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *tes
 // this ensures that the old, pre gh#16032 functionality continues on
 func (s *DockerDaemonSuite) TestRunLinksChanged(c *testing.T) {
 	testRequires(c, DaemonIsLinux) // Windows does not support links
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
 	assert.NilError(c, err, out)
@@ -2045,7 +2046,7 @@ func (s *DockerDaemonSuite) TestDaemonDebugLog(c *testing.T) {
 
 // Test for #21956
 func (s *DockerDaemonSuite) TestDaemonLogOptions(c *testing.T) {
-	s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514")
 
 	out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top")
 	assert.NilError(c, err, out)
@@ -2165,7 +2166,7 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *test
 }
 
 func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *testing.T) {
-	s.d.StartWithBusybox(c, "-b=none", "--iptables=false")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "-b=none", "--iptables=false")
 
 	result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d),
 		build.WithDockerfile(`
@@ -2182,7 +2183,7 @@ func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *testing.T)
 func (s *DockerDaemonSuite) TestDaemonDNSFlagsInHostMode(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, DaemonIsLinux)
 
-	s.d.StartWithBusybox(c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3")
 
 	expectedOutput := "nameserver 1.2.3.4"
 	out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf")
@@ -2216,7 +2217,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *testing.T) {
 }
 `
 	os.WriteFile(configName, []byte(config), 0o644)
-	s.d.StartWithBusybox(c, "--config-file", configName)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--config-file", configName)
 
 	// Run with default runtime
 	out, err := s.d.Cmd("run", "--rm", "busybox", "ls")
@@ -2307,7 +2308,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
-	s.d.StartWithBusybox(c, "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
 
 	// Run with default runtime
 	out, err := s.d.Cmd("run", "--rm", "busybox", "ls")
@@ -2327,7 +2328,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
 	assert.Assert(c, is.Contains(out, "/usr/local/bin/vm-manager: no such file or directory"))
 	// Start a daemon without any extra runtimes
 	s.d.Stop(c)
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	// Run with default runtime
 	out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls")
@@ -2350,7 +2351,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
 	assert.Assert(c, is.Contains(string(content), `runtime name 'runc' is reserved`))
 	// Check that we can select a default runtime
 	s.d.Stop(c)
-	s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-runtime=vm", "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
 
 	out, err = s.d.Cmd("run", "--rm", "busybox", "ls")
 	assert.ErrorContains(c, err, "", out)
@@ -2361,7 +2362,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	// top1 will exist after daemon restarts
 	out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top")
@@ -2384,7 +2385,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *testing.
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(testutil.GetContext(c), c)
 
 	containerName := "error-values"
 	// Make a container with both a non 0 exit code and an error message
@@ -2436,7 +2437,7 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *testing.T) {
 	assert.NilError(c, cmd.Run())
 
 	// custom one
-	s.d.StartWithBusybox(c, "--userland-proxy-path", newProxyPath)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--userland-proxy-path", newProxyPath)
 	out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true")
 	assert.NilError(c, err, out)
 
@@ -2456,7 +2457,7 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *testing.T) {
 // Test case for #22471
 func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
-	s.d.StartWithBusybox(c, "--shutdown-timeout=3")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--shutdown-timeout=3")
 
 	_, err := s.d.Cmd("run", "-d", "busybox", "top")
 	assert.NilError(c, err)
@@ -2511,7 +2512,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *testing.T
 // Test case for 29342
 func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *testing.T) {
 	testRequires(c, DaemonIsLinux)
-	s.d.StartWithBusybox(c, "--live-restore")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
 
 	out, err := s.d.Cmd("run", "--init", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && exec top")
 	assert.NilError(c, err, "Output: %s", out)
@@ -2539,7 +2540,7 @@ func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *testing.T) {
 	testRequires(c, DaemonIsLinux, overlayFSSupported, testEnv.IsLocalDaemon)
-	s.d.StartWithBusybox(c, "--live-restore", "--storage-driver", "overlay2")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore", "--storage-driver", "overlay2")
 	out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top")
 	assert.NilError(c, err, "Output: %s", out)
 
@@ -2572,7 +2573,7 @@ func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *testing.T) {
 // #29598
 func (s *DockerDaemonSuite) TestRestartPolicyWithLiveRestore(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	s.d.StartWithBusybox(c, "--live-restore")
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--live-restore")
 
 	out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top")
 	assert.NilError(c, err, "Output: %s", out)
@@ -2633,7 +2634,7 @@ func (s *DockerDaemonSuite) TestShmSize(c *testing.T) {
 	size := 67108864 * 2
 	pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024))
 
-	s.d.StartWithBusybox(c, "--default-shm-size", fmt.Sprintf("%v", size))
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--default-shm-size", fmt.Sprintf("%v", size))
 
 	name := "shm1"
 	out, err := s.d.Cmd("run", "--name", name, "busybox", "mount")
@@ -2657,7 +2658,7 @@ func (s *DockerDaemonSuite) TestShmSizeReload(c *testing.T) {
 	assert.Assert(c, os.WriteFile(configFile, configData, 0o666) == nil, "could not write temp file for config reload")
 	pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024))
 
-	s.d.StartWithBusybox(c, "--config-file", configFile)
+	s.d.StartWithBusybox(testutil.GetContext(c), c, "--config-file", configFile)
 
 	name := "shm1"
 	out, err := s.d.Cmd("run", "--name", name, "busybox", "mount")
@@ -2749,7 +2750,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
 	d.Start(c)
 	cli := d.NewClientT(c)
 
-	ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 300*time.Second)
 	defer cancel()
 
 	name := "test-plugin-rm-fail"
@@ -2762,7 +2763,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
 	defer out.Close()
 	io.Copy(io.Discard, out)
 
-	ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
+	ctx, cancel = context.WithTimeout(testutil.GetContext(c), 30*time.Second)
 	defer cancel()
 	p, _, err := cli.PluginInspectWithRaw(ctx, name)
 	assert.NilError(c, err)
@@ -2772,7 +2773,7 @@ func (s *DockerDaemonSuite) TestFailedPluginRemove(c *testing.T) {
 	assert.NilError(c, os.Remove(configPath))
 
 	d.Restart(c)
-	ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
+	ctx, cancel = context.WithTimeout(testutil.GetContext(c), 30*time.Second)
 	defer cancel()
 	_, err = cli.Ping(ctx)
 	assert.NilError(c, err)

+ 4 - 3
integration-cli/docker_cli_events_test.go

@@ -19,6 +19,7 @@ import (
 	eventstestutils "github.com/docker/docker/daemon/events/testutils"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 	"gotest.tools/v3/icmd"
@@ -28,8 +29,8 @@ type DockerCLIEventSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIEventSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIEventSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIEventSuite) OnTimeout(c *testing.T) {
@@ -462,7 +463,7 @@ func (s *DockerCLIEventSuite) TestEventsResize(c *testing.T) {
 		Height: 80,
 		Width:  24,
 	}
-	err = apiClient.ContainerResize(context.Background(), cID, options)
+	err = apiClient.ContainerResize(testutil.GetContext(c), cID, options)
 	assert.NilError(c, err)
 
 	dockerCmd(c, "stop", cID)

+ 7 - 5
integration-cli/docker_cli_exec_test.go

@@ -17,6 +17,7 @@ import (
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 	"gotest.tools/v3/icmd"
@@ -26,8 +27,8 @@ type DockerCLIExecSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIExecSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIExecSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIExecSuite) OnTimeout(c *testing.T) {
@@ -90,8 +91,9 @@ func (s *DockerCLIExecSuite) TestExecAfterContainerRestart(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	// TODO Windows CI: DockerDaemonSuite doesn't run on Windows, and requires a little work to get this ported.
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top")
 	assert.NilError(c, err, "Could not run top: %s", out)
@@ -363,7 +365,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	_, err = apiClient.ContainerExecInspect(context.Background(), execID)
+	_, err = apiClient.ContainerExecInspect(testutil.GetContext(c), execID)
 	assert.NilError(c, err)
 
 	// Now delete the container and then an 'inspect' on the exec should
@@ -371,7 +373,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
 	out, ec := dockerCmd(c, "rm", "-f", id)
 	assert.Equal(c, ec, 0, "error removing container: %s", out)
 
-	_, err = apiClient.ContainerExecInspect(context.Background(), execID)
+	_, err = apiClient.ContainerExecInspect(testutil.GetContext(c), execID)
 	assert.ErrorContains(c, err, "No such exec instance")
 }
 

+ 25 - 14
integration-cli/docker_cli_external_volume_driver_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -18,6 +19,7 @@ import (
 	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/volume"
 	"gotest.tools/v3/assert"
@@ -43,20 +45,20 @@ type DockerExternalVolumeSuite struct {
 	*volumePlugin
 }
 
-func (s *DockerExternalVolumeSuite) SetUpTest(c *testing.T) {
+func (s *DockerExternalVolumeSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 	s.ec = &eventCounter{}
 }
 
-func (s *DockerExternalVolumeSuite) TearDownTest(c *testing.T) {
+func (s *DockerExternalVolumeSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.d != nil {
 		s.d.Stop(c)
-		s.ds.TearDownTest(c)
+		s.ds.TearDownTest(ctx, c)
 	}
 }
 
-func (s *DockerExternalVolumeSuite) SetUpSuite(c *testing.T) {
+func (s *DockerExternalVolumeSuite) SetUpSuite(ctx context.Context, c *testing.T) {
 	s.volumePlugin = newVolumePlugin(c, volumePluginName)
 }
 
@@ -267,7 +269,7 @@ func newVolumePlugin(c *testing.T, name string) *volumePlugin {
 	return s
 }
 
-func (s *DockerExternalVolumeSuite) TearDownSuite(c *testing.T) {
+func (s *DockerExternalVolumeSuite) TearDownSuite(ctx context.Context, c *testing.T) {
 	s.volumePlugin.Close()
 
 	err := os.RemoveAll("/etc/docker/plugins")
@@ -286,7 +288,8 @@ func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *testing
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
 	assert.NilError(c, err, out)
@@ -307,7 +310,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *testing.T)
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
 	assert.NilError(c, err, out)
@@ -320,7 +324,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *testing.T
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
 	assert.NilError(c, err, out)
@@ -339,7 +344,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *testi
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
 	assert.NilError(c, err, out)
@@ -396,7 +402,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 	driverName := "test-external-volume-driver-retry"
 
 	errchan := make(chan error, 1)
@@ -522,7 +529,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *testing
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
 	assert.NilError(c, err, out)
@@ -545,11 +553,12 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *test
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	driverName := stringid.GenerateRandomID()
 	p := newVolumePlugin(c, driverName)
 	defer p.Close()
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test")
 	assert.NilError(c, err, out)
@@ -593,7 +602,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *t
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 	s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount")
 
 	out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true")
@@ -603,7 +613,8 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 	s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test")
 
 	out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top")

+ 3 - 2
integration-cli/docker_cli_health_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"strconv"
 	"strings"
@@ -16,8 +17,8 @@ type DockerCLIHealthSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIHealthSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIHealthSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIHealthSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_history_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"regexp"
 	"strconv"
@@ -16,8 +17,8 @@ type DockerCLIHistorySuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIHistorySuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIHistorySuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIHistorySuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_images_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
@@ -21,8 +22,8 @@ type DockerCLIImagesSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIImagesSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIImagesSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIImagesSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_import_test.go

@@ -3,6 +3,7 @@ package main
 import (
 	"bufio"
 	"compress/gzip"
+	"context"
 	"os"
 	"os/exec"
 	"regexp"
@@ -18,8 +19,8 @@ type DockerCLIImportSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIImportSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIImportSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIImportSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_info_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"strings"
@@ -13,8 +14,8 @@ type DockerCLIInfoSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIInfoSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIInfoSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIInfoSuite) OnTimeout(c *testing.T) {

+ 2 - 2
integration-cli/docker_cli_info_unix_test.go

@@ -3,11 +3,11 @@
 package main
 
 import (
-	"context"
 	"testing"
 
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/daemon/config"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 )
@@ -21,7 +21,7 @@ func (s *DockerCLIInfoSuite) TestInfoSecurityOptions(c *testing.T) {
 	apiClient, err := client.NewClientWithOpts(client.FromEnv)
 	assert.NilError(c, err)
 	defer apiClient.Close()
-	info, err := apiClient.Info(context.Background())
+	info, err := apiClient.Info(testutil.GetContext(c))
 	assert.NilError(c, err)
 
 	if Apparmor() {

+ 3 - 2
integration-cli/docker_cli_inspect_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"os"
@@ -19,8 +20,8 @@ type DockerCLIInspectSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIInspectSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIInspectSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIInspectSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_links_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"regexp"
@@ -17,8 +18,8 @@ type DockerCLILinksSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLILinksSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLILinksSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLILinksSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_login_test.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"bytes"
+	"context"
 	"os/exec"
 	"strings"
 	"testing"
@@ -13,8 +14,8 @@ type DockerCLILoginSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLILoginSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLILoginSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLILoginSuite) OnTimeout(c *testing.T) {

+ 3 - 1
integration-cli/docker_cli_logout_test.go

@@ -9,11 +9,13 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 )
 
 func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	workingDir, err := os.Getwd()
 	assert.NilError(c, err)

+ 56 - 20
integration-cli/docker_cli_logs_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"os/exec"
@@ -11,6 +12,9 @@ import (
 
 	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/integration-cli/cli"
+	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
+	testdaemon "github.com/docker/docker/testutil/daemon"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 )
@@ -19,8 +23,8 @@ type DockerCLILogsSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLILogsSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLILogsSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLILogsSuite) OnTimeout(c *testing.T) {
@@ -282,24 +286,39 @@ func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
 }
 
 func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesWithStdout(c *testing.T) {
-	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
-	id := strings.TrimSpace(out)
-	assert.NilError(c, waitRun(id))
+	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
+	c.Parallel()
+
+	ctx := testutil.GetContext(c)
+	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
+	defer func() {
+		d.Stop(c)
+		d.Cleanup(c)
+	}()
+	d.StartWithBusybox(ctx, c, "--iptables=false")
 
-	nroutines, err := getGoroutineNumber()
+	out, err := d.Cmd("run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
 	assert.NilError(c, err)
-	cmd := exec.Command(dockerBinary, "logs", "-f", id)
+
+	id := strings.TrimSpace(out)
+	assert.NilError(c, d.WaitRun(id))
+
+	client := d.NewClientT(c)
+	nroutines := waitForStableGourtineCount(ctx, c, client)
+
+	cmd := d.Command("logs", "-f", id)
 	r, w := io.Pipe()
 	defer r.Close()
 	defer w.Close()
 
 	cmd.Stdout = w
-	assert.NilError(c, cmd.Start())
-	defer cmd.Process.Kill()
+	res := icmd.StartCmd(cmd)
+	assert.NilError(c, res.Error)
+	defer res.Cmd.Process.Kill()
 
 	finished := make(chan error)
 	go func() {
-		finished <- cmd.Wait()
+		finished <- res.Cmd.Wait()
 	}()
 
 	// Make sure pipe is written to
@@ -314,35 +333,52 @@ func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesWithStdout(c *testing.T) {
 	// Check read from pipe succeeded
 	assert.NilError(c, <-chErr)
 
-	assert.NilError(c, cmd.Process.Kill())
+	assert.NilError(c, res.Cmd.Process.Kill())
 	<-finished
 
 	// NGoroutines is not updated right away, so we need to wait before failing
-	assert.NilError(c, waitForGoroutines(nroutines))
+	waitForGoroutines(ctx, c, client, nroutines)
 }
 
 func (s *DockerCLILogsSuite) TestLogsFollowGoroutinesNoOutput(c *testing.T) {
-	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done")
+	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
+	c.Parallel()
+
+	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
+	defer func() {
+		d.Stop(c)
+		d.Cleanup(c)
+	}()
+
+	ctx := testutil.GetContext(c)
+
+	d.StartWithBusybox(ctx, c, "--iptables=false")
+
+	out, err := d.Cmd("run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done")
+	assert.NilError(c, err)
 	id := strings.TrimSpace(out)
-	assert.NilError(c, waitRun(id))
+	assert.NilError(c, d.WaitRun(id))
 
-	nroutines, err := getGoroutineNumber()
+	client := d.NewClientT(c)
+	nroutines := waitForStableGourtineCount(ctx, c, client)
 	assert.NilError(c, err)
-	cmd := exec.Command(dockerBinary, "logs", "-f", id)
-	assert.NilError(c, cmd.Start())
+
+	cmd := d.Command("logs", "-f", id)
+	res := icmd.StartCmd(cmd)
+	assert.NilError(c, res.Error)
 
 	finished := make(chan error)
 	go func() {
-		finished <- cmd.Wait()
+		finished <- res.Cmd.Wait()
 	}()
 
 	time.Sleep(200 * time.Millisecond)
-	assert.NilError(c, cmd.Process.Kill())
+	assert.NilError(c, res.Cmd.Process.Kill())
 
 	<-finished
 
 	// NGoroutines is not updated right away, so we need to wait before failing
-	assert.NilError(c, waitForGoroutines(nroutines))
+	waitForGoroutines(ctx, c, client, nroutines)
 }
 
 func (s *DockerCLILogsSuite) TestLogsCLIContainerNotFound(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_netmode_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"strings"
 	"testing"
 
@@ -18,8 +19,8 @@ type DockerCLINetmodeSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLINetmodeSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLINetmodeSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLINetmodeSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_network_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"net/http/httptest"
 	"testing"
 
@@ -11,8 +12,8 @@ type DockerCLINetworkSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLINetworkSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLINetworkSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLINetworkSuite) OnTimeout(c *testing.T) {

+ 23 - 12
integration-cli/docker_cli_network_unix_test.go

@@ -3,6 +3,7 @@
 package main
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"net"
@@ -25,6 +26,7 @@ import (
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/testutil"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/vishvananda/netlink"
 	"golang.org/x/sys/unix"
@@ -39,18 +41,18 @@ const (
 
 var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest
 
-func (s *DockerNetworkSuite) SetUpTest(c *testing.T) {
+func (s *DockerNetworkSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 }
 
-func (s *DockerNetworkSuite) TearDownTest(c *testing.T) {
+func (s *DockerNetworkSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	if s.d != nil {
 		s.d.Stop(c)
-		s.ds.TearDownTest(c)
+		s.ds.TearDownTest(ctx, c)
 	}
 }
 
-func (s *DockerNetworkSuite) SetUpSuite(c *testing.T) {
+func (s *DockerNetworkSuite) SetUpSuite(ctx context.Context, c *testing.T) {
 	mux := http.NewServeMux()
 	s.server = httptest.NewServer(mux)
 	assert.Assert(c, s.server != nil, "Failed to start an HTTP Server")
@@ -210,7 +212,7 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
 	assert.NilError(c, err)
 }
 
-func (s *DockerNetworkSuite) TearDownSuite(c *testing.T) {
+func (s *DockerNetworkSuite) TearDownSuite(ctx context.Context, c *testing.T) {
 	if s.server == nil {
 		return
 	}
@@ -306,7 +308,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *testing.T) {
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *testing.T) {
-	testRequires(c, OnlyDefaultNetworks)
+	testRequires(c, func() bool { return OnlyDefaultNetworks(testutil.GetContext(c)) })
+
 	testNet := "testnet1"
 	testLabel := "foo"
 	testValue := "bar"
@@ -786,6 +789,8 @@ func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *testing.T) {
+	ctx := testutil.GetContext(c)
+
 	// On default bridge network built-in service discovery should not happen
 	hostsFile := "/etc/hosts"
 	bridgeName := "external-bridge"
@@ -793,7 +798,7 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
 	createInterface(c, "bridge", bridgeName, bridgeIP)
 	defer deleteInterface(c, bridgeName)
 
-	s.d.StartWithBusybox(c, "--bridge", bridgeName)
+	s.d.StartWithBusybox(ctx, c, "--bridge", bridgeName)
 	defer s.d.Restart(c)
 
 	// run two containers and store first container's etc/hosts content
@@ -944,6 +949,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *testing.T) {
 
 func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *testing.T) {
 	testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
+
+	ctx := testutil.GetContext(c)
 	dnd := "dnd"
 	did := "did"
 
@@ -951,7 +958,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *testing
 	server := httptest.NewServer(mux)
 	setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did)
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 	_, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1")
 	assert.NilError(c, err)
 
@@ -1051,10 +1058,11 @@ func verifyContainerIsConnectedToNetworks(c *testing.T, d *daemon.Daemon, cName
 
 func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
+	ctx := testutil.GetContext(c)
 	cName := "bb"
 	nwList := []string{"nw1", "nw2", "nw3"}
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	connectContainerToNetworks(c, s.d, cName, nwList)
 	verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
@@ -1070,10 +1078,11 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRest
 
 func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon)
+	ctx := testutil.GetContext(c)
 	cName := "cc"
 	nwList := []string{"nw1", "nw2", "nw3"}
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	connectContainerToNetworks(c, s.d, cName, nwList)
 	verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
@@ -1097,7 +1106,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *testing.T) {
 
 func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *testing.T) {
 	testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	// Run a few containers on host network
 	for i := 0; i < 10; i++ {
@@ -1620,7 +1630,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *t
 }
 
 func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *testing.T) {
-	s.d.StartWithBusybox(t, "--live-restore")
+	ctx := testutil.GetContext(t)
+	s.d.StartWithBusybox(ctx, t, "--live-restore")
 	defer s.d.Stop(t)
 	oldCon := "old"
 

+ 4 - 3
integration-cli/docker_cli_plugins_logdriver_test.go

@@ -6,6 +6,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/client"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 )
 
@@ -13,8 +14,8 @@ type DockerCLIPluginLogDriverSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPluginLogDriverSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPluginLogDriverSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPluginLogDriverSuite) OnTimeout(c *testing.T) {
@@ -51,7 +52,7 @@ func (s *DockerCLIPluginLogDriverSuite) TestPluginLogDriverInfoList(c *testing.T
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	info, err := apiClient.Info(context.Background())
+	info, err := apiClient.Info(testutil.GetContext(c))
 	assert.NilError(c, err)
 
 	drivers := strings.Join(info.Plugins.Log, " ")

+ 7 - 6
integration-cli/docker_cli_plugins_test.go

@@ -15,6 +15,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/fixtures/plugin"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -34,8 +35,8 @@ type DockerCLIPluginsSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPluginsSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPluginsSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPluginsSuite) OnTimeout(c *testing.T) {
@@ -162,7 +163,7 @@ func (ps *DockerPluginSuite) TestPluginSet(c *testing.T) {
 	client := testEnv.APIClient()
 
 	name := "test"
-	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
 	defer cancel()
 
 	initialValue := "0"
@@ -207,7 +208,7 @@ func (ps *DockerPluginSuite) TestPluginSet(c *testing.T) {
 
 func (ps *DockerPluginSuite) TestPluginInstallArgs(c *testing.T) {
 	pName := path.Join(ps.registryHost(), "plugin", "testplugininstallwithargs")
-	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
 	defer cancel()
 
 	plugin.CreateInRegistry(ctx, pName, nil, func(cfg *plugin.Config) {
@@ -345,7 +346,7 @@ func (ps *DockerPluginSuite) TestPluginIDPrefix(c *testing.T) {
 	name := "test"
 	client := testEnv.APIClient()
 
-	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
 	initialValue := "0"
 	err := plugin.Create(ctx, client, name, func(cfg *plugin.Config) {
 		cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}}
@@ -406,7 +407,7 @@ func (ps *DockerPluginSuite) TestPluginListDefaultFormat(c *testing.T) {
 	name := "test:latest"
 	client := testEnv.APIClient()
 
-	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 60*time.Second)
 	defer cancel()
 	err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) {
 		cfg.Description = "test plugin"

+ 26 - 47
integration-cli/docker_cli_port_test.go

@@ -9,15 +9,17 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
+	is "gotest.tools/v3/assert/cmp"
 )
 
 type DockerCLIPortSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPortSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPortSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPortSuite) OnTimeout(c *testing.T) {
@@ -26,21 +28,19 @@ func (s *DockerCLIPortSuite) OnTimeout(c *testing.T) {
 
 func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 	testRequires(c, DaemonIsLinux)
+	ctx := testutil.GetContext(c)
+
 	// one port
 	out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top")
 	firstID := strings.TrimSpace(out)
 
 	out, _ = dockerCmd(c, "port", firstID, "80")
 
-	err := assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
 
 	out, _ = dockerCmd(c, "port", firstID)
 
-	err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876", "80/tcp -> [::]:9876"})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876", "80/tcp -> [::]:9876"})
 
 	dockerCmd(c, "rm", "-f", firstID)
 
@@ -54,13 +54,11 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 
 	out, _ = dockerCmd(c, "port", ID, "80")
 
-	err = assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
 
 	out, _ = dockerCmd(c, "port", ID)
 
-	err = assertPortList(c, out, []string{
+	assertPortList(c, out, []string{
 		"80/tcp -> 0.0.0.0:9876",
 		"80/tcp -> [::]:9876",
 		"81/tcp -> 0.0.0.0:9877",
@@ -68,8 +66,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 		"82/tcp -> 0.0.0.0:9878",
 		"82/tcp -> [::]:9878",
 	})
-	// Port list is not correct
-	assert.NilError(c, err)
 
 	dockerCmd(c, "rm", "-f", ID)
 
@@ -84,13 +80,11 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 
 	out, _ = dockerCmd(c, "port", ID, "80")
 
-	err = assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876", "0.0.0.0:9999", "[::]:9999"})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876", "0.0.0.0:9999", "[::]:9999"})
 
 	out, _ = dockerCmd(c, "port", ID)
 
-	err = assertPortList(c, out, []string{
+	assertPortList(c, out, []string{
 		"80/tcp -> 0.0.0.0:9876",
 		"80/tcp -> 0.0.0.0:9999",
 		"80/tcp -> [::]:9876",
@@ -100,8 +94,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 		"82/tcp -> 0.0.0.0:9878",
 		"82/tcp -> [::]:9878",
 	})
-	// Port list is not correct
-	assert.NilError(c, err)
 	dockerCmd(c, "rm", "-f", ID)
 
 	testRange := func() {
@@ -113,16 +105,14 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 
 			out, _ = dockerCmd(c, "port", IDs[i])
 
-			err = assertPortList(c, out, []string{
+			assertPortList(c, out, []string{
 				fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i),
 				fmt.Sprintf("80/tcp -> [::]:%d", 9090+i),
 			})
-			// Port list is not correct
-			assert.NilError(c, err)
 		}
 
 		// test port range exhaustion
-		out, _, err = dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top")
+		out, _, err := dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top")
 		// Exhausted port range did not return an error
 		assert.Assert(c, err != nil, "out: %s", out)
 
@@ -136,7 +126,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 
 	// test invalid port ranges
 	for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} {
-		out, _, err = dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top")
+		out, _, err := dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top")
 		// Port range should have returned an error
 		assert.Assert(c, err != nil, "out: %s", out)
 	}
@@ -147,7 +137,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 
 	out, _ = dockerCmd(c, "port", ID)
 
-	err = assertPortList(c, out, []string{
+	assertPortList(c, out, []string{
 		"80/tcp -> 0.0.0.0:9800",
 		"80/tcp -> [::]:9800",
 		"81/tcp -> 0.0.0.0:9801",
@@ -157,8 +147,6 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 		"83/tcp -> 0.0.0.0:9803",
 		"83/tcp -> [::]:9803",
 	})
-	// Port list is not correct
-	assert.NilError(c, err)
 	dockerCmd(c, "rm", "-f", ID)
 
 	// test mixing protocols in same port range
@@ -168,18 +156,15 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
 	out, _ = dockerCmd(c, "port", ID)
 
 	// Running this test multiple times causes the TCP port to increment.
-	err = assertPortRange(ID, []int{8000, 8080}, []int{8000, 8080})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortRange(ctx, ID, []int{8000, 8080}, []int{8000, 8080})
 	dockerCmd(c, "rm", "-f", ID)
 }
 
-func assertPortList(c *testing.T, out string, expected []string) error {
+func assertPortList(c *testing.T, out string, expected []string) {
 	c.Helper()
 	lines := strings.Split(strings.Trim(out, "\n "), "\n")
-	if len(lines) != len(expected) {
-		return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected))
-	}
+	assert.Assert(c, is.Len(lines, len(expected)), "exepcted: %s", strings.Join(expected, ", "))
+
 	sort.Strings(lines)
 	sort.Strings(expected)
 
@@ -196,17 +181,13 @@ func assertPortList(c *testing.T, out string, expected []string) error {
 		if lines[i] == expected[i] {
 			continue
 		}
-		if lines[i] != oldFormat(expected[i]) {
-			return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|")
-		}
+		assert.Equal(c, lines[i], oldFormat(expected[i]))
 	}
-
-	return nil
 }
 
-func assertPortRange(id string, expectedTCP, expectedUDP []int) error {
+func assertPortRange(ctx context.Context, id string, expectedTCP, expectedUDP []int) error {
 	client := testEnv.APIClient()
-	inspect, err := client.ContainerInspect(context.TODO(), id)
+	inspect, err := client.ContainerInspect(ctx, id)
 	if err != nil {
 		return err
 	}
@@ -331,17 +312,15 @@ func (s *DockerCLIPortSuite) TestPortHostBinding(c *testing.T) {
 
 	out, _ = dockerCmd(c, "port", firstID, "80")
 
-	err := assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
-	// Port list is not correct
-	assert.NilError(c, err)
+	assertPortList(c, out, []string{"0.0.0.0:9876", "[::]:9876"})
 
 	dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", "9876")
 
 	dockerCmd(c, "rm", "-f", firstID)
 
-	out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876")
+	out, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876")
 	// Port is still bound after the Container is removed
-	assert.Assert(c, err != nil, "out: %s", out)
+	assert.Assert(c, err != nil, out)
 }
 
 func (s *DockerCLIPortSuite) TestPortExposeHostBinding(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_proxy_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"net"
 	"strings"
 	"testing"
@@ -13,8 +14,8 @@ type DockerCLIProxySuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIProxySuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIProxySuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIProxySuite) OnTimeout(c *testing.T) {

+ 12 - 7
integration-cli/docker_cli_prune_unix_test.go

@@ -3,6 +3,7 @@
 package main
 
 import (
+	"context"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -14,13 +15,14 @@ import (
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 	"gotest.tools/v3/poll"
 )
 
-func (s *DockerCLIPruneSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPruneSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPruneSuite) OnTimeout(c *testing.T) {
@@ -49,7 +51,8 @@ func pruneNetworkAndVerify(c *testing.T, d *daemon.Daemon, kept, pruned []string
 }
 
 func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	_, err := d.Cmd("network", "create", "n1") // used by container (testprune)
 	assert.NilError(c, err)
 	_, err = d.Cmd("network", "create", "n2")
@@ -72,7 +75,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
 		"busybox", "top")
 	assert.NilError(c, err)
 	assert.Assert(c, strings.TrimSpace(out) != "")
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(replicas+1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(replicas+1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// prune and verify
 	pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"})
@@ -82,13 +85,14 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *testing.T) {
 	assert.NilError(c, err)
 	_, err = d.Cmd("service", "rm", serviceName)
 	assert.NilError(c, err)
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"})
 }
 
 func (s *DockerDaemonSuite) TestPruneImageDangling(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	result := cli.BuildCmd(c, "test", cli.Daemon(s.d),
 		build.WithDockerfile(`FROM busybox
@@ -258,7 +262,8 @@ func (s *DockerCLIPruneSuite) TestPruneNetworkLabel(c *testing.T) {
 }
 
 func (s *DockerDaemonSuite) TestPruneImageLabel(c *testing.T) {
-	s.d.StartWithBusybox(c)
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	result := cli.BuildCmd(c, "test1", cli.Daemon(s.d),
 		build.WithDockerfile(`FROM busybox

+ 3 - 2
integration-cli/docker_cli_ps_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"sort"
 	"strconv"
@@ -22,8 +23,8 @@ type DockerCLIPsSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPsSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPsSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPsSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_pull_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"regexp"
 	"strings"
@@ -17,8 +18,8 @@ type DockerCLIPullSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPullSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPullSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPullSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_push_test.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"archive/tar"
+	"context"
 	"fmt"
 	"net/http"
 	"net/http/httptest"
@@ -21,8 +22,8 @@ type DockerCLIPushSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIPushSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIPushSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIPushSuite) OnTimeout(c *testing.T) {

+ 3 - 1
integration-cli/docker_cli_registry_user_agent_test.go

@@ -7,6 +7,7 @@ import (
 	"regexp"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/registry"
 	"gotest.tools/v3/assert"
 )
@@ -71,6 +72,7 @@ func registerUserAgentHandler(reg *registry.Mock, result *string) {
 // a registry, the registry should see a User-Agent string of the form
 // [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA])
 func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	var ua string
 
 	reg, err := registry.NewMock(c)
@@ -80,7 +82,7 @@ func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *testing.T) {
 	registerUserAgentHandler(reg, &ua)
 	repoName := fmt.Sprintf("%s/busybox", reg.URL())
 
-	s.d.StartWithBusybox(c, "--insecure-registry", reg.URL())
+	s.d.StartWithBusybox(ctx, c, "--insecure-registry", reg.URL())
 
 	tmp, err := os.MkdirTemp("", "integration-cli-")
 	assert.NilError(c, err)

+ 3 - 2
integration-cli/docker_cli_restart_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"os"
 	"strconv"
 	"strings"
@@ -18,8 +19,8 @@ type DockerCLIRestartSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIRestartSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIRestartSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIRestartSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_rmi_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"strings"
 	"testing"
@@ -17,8 +18,8 @@ type DockerCLIRmiSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIRmiSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIRmiSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIRmiSuite) OnTimeout(c *testing.T) {

+ 54 - 20
integration-cli/docker_cli_run_test.go

@@ -25,10 +25,12 @@ import (
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/libnetwork/resolvconf"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/testutil"
+	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/testutil/fakecontext"
 	"github.com/docker/go-connections/nat"
 	"github.com/moby/sys/mountinfo"
@@ -42,8 +44,8 @@ type DockerCLIRunSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIRunSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIRunSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIRunSuite) OnTimeout(c *testing.T) {
@@ -3795,7 +3797,7 @@ func (s *DockerCLIRunSuite) TestRunNamedVolumesFromNotRemoved(c *testing.T) {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	container, err := apiClient.ContainerInspect(context.Background(), strings.TrimSpace(cid))
+	container, err := apiClient.ContainerInspect(testutil.GetContext(c), strings.TrimSpace(cid))
 	assert.NilError(c, err)
 	var vname string
 	for _, v := range container.Mounts {
@@ -3816,19 +3818,40 @@ func (s *DockerCLIRunSuite) TestRunNamedVolumesFromNotRemoved(c *testing.T) {
 }
 
 func (s *DockerCLIRunSuite) TestRunAttachFailedNoLeak(c *testing.T) {
-	nroutines, err := getGoroutineNumber()
+	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
+	ctx := testutil.GetContext(c)
+	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
+	defer func() {
+		if c.Failed() {
+			d.Daemon.DumpStackAndQuit()
+		} else {
+			d.Stop(c)
+		}
+		d.Cleanup(c)
+	}()
+	d.StartWithBusybox(ctx, c)
+
+	// Run a dummy container to ensure all goroutines are up and running before we get a count
+	_, err := d.Cmd("run", "--rm", "busybox", "true")
 	assert.NilError(c, err)
 
-	runSleepingContainer(c, "--name=test", "-p", "8000:8000")
+	client := d.NewClientT(c)
+
+	nroutines := waitForStableGourtineCount(ctx, c, client)
+
+	out, err := d.Cmd(append([]string{"run", "-d", "--name=test", "-p", "8000:8000", "busybox"}, sleepCommandForDaemonPlatform()...)...)
+	assert.NilError(c, err, out)
 
 	// Wait until container is fully up and running
-	assert.Assert(c, waitRun("test") == nil)
+	assert.NilError(c, d.WaitRun("test"))
+
+	out, err = d.Cmd("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
 
-	out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
 	// We will need the following `inspect` to diagnose the issue if test fails (#21247)
-	out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test")
-	out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail")
+	out1, err1 := d.Cmd("inspect", "--format", "{{json .State}}", "test")
+	out2, err2 := d.Cmd("inspect", "--format", "{{json .State}}", "fail")
 	assert.Assert(c, err != nil, "Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)
+
 	// check for windows error as well
 	// TODO Windows Post TP5. Fix the error message string
 	outLowerCase := strings.ToLower(out)
@@ -3837,10 +3860,12 @@ func (s *DockerCLIRunSuite) TestRunAttachFailedNoLeak(c *testing.T) {
 		strings.Contains(outLowerCase, "the specified port already exists") ||
 		strings.Contains(outLowerCase, "hns failed with error : failed to create endpoint") ||
 		strings.Contains(outLowerCase, "hns failed with error : the object already exists"), fmt.Sprintf("Output: %s", out))
-	dockerCmd(c, "rm", "-f", "test")
+
+	out, err = d.Cmd("rm", "-f", "test")
+	assert.NilError(c, err, out)
 
 	// NGoroutines is not updated right away, so we need to wait before failing
-	assert.Assert(c, waitForGoroutines(nroutines) == nil)
+	waitForGoroutines(ctx, c, client, nroutines)
 }
 
 // Test for one character directory name case (#20122)
@@ -3993,35 +4018,44 @@ exec "$@"`,
 }
 
 func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *testing.T) {
-	s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535")
+	ctx := testutil.GetContext(c)
+	d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvVars("OTEL_SDK_DISABLED=1"))
+	defer func() {
+		d.Stop(c)
+		d.Cleanup(c)
+	}()
+	d.StartWithBusybox(ctx, c, "--debug", "--default-ulimit=nofile=65535")
 
 	name := "test-A"
-	_, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top")
+	_, err := d.Cmd("run", "--name", name, "-d", "busybox", "top")
 	assert.NilError(c, err)
-	assert.NilError(c, s.d.WaitRun(name))
+	assert.NilError(c, d.WaitRun(name))
 
-	out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
+	out, err := d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
 	assert.NilError(c, err)
 	assert.Assert(c, strings.Contains(out, "[nofile=65535:65535]"))
 	name = "test-B"
-	_, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
+	_, err = d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
 	assert.NilError(c, err)
-	assert.NilError(c, s.d.WaitRun(name))
+	assert.NilError(c, d.WaitRun(name))
 
-	out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
+	out, err = d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
 	assert.NilError(c, err)
 	assert.Assert(c, strings.Contains(out, "[nofile=42:42]"))
 }
 
 func (s *DockerCLIRunSuite) TestRunStoppedLoggingDriverNoLeak(c *testing.T) {
-	nroutines, err := getGoroutineNumber()
+	client := testEnv.APIClient()
+	ctx := testutil.GetContext(c)
+	nroutines, err := getGoroutineNumber(ctx, client)
 	assert.NilError(c, err)
 
 	out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true")
 	assert.ErrorContains(c, err, "")
 	assert.Assert(c, strings.Contains(out, "failed to initialize logging driver"), "error should be about logging driver, got output %s", out)
+
 	// NGoroutines is not updated right away, so we need to wait before failing
-	assert.Assert(c, waitForGoroutines(nroutines) == nil)
+	waitForGoroutines(ctx, c, client, nroutines)
 }
 
 // Handles error conditions for --credentialspec. Validating E2E success cases

+ 27 - 23
integration-cli/docker_cli_run_unix_test.go

@@ -4,7 +4,6 @@ package main
 
 import (
 	"bufio"
-	"context"
 	"encoding/json"
 	"fmt"
 	"os"
@@ -24,6 +23,7 @@ import (
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/sysinfo"
+	"github.com/docker/docker/testutil"
 	"github.com/moby/sys/mount"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
@@ -993,7 +993,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyUnshareUserns(c *testing.T)
 // with a the default seccomp profile exits with operation not permitted.
 func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyCloneUserns(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	icmd.RunCommand(dockerBinary, "run", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{
 		ExitCode: 1,
@@ -1005,7 +1005,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompProfileDenyCloneUserns(c *testing.T) {
 // 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns.
 func (s *DockerCLIRunSuite) TestRunSeccompUnconfinedCloneUserns(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// make sure running w privileged is ok
 	icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined",
@@ -1018,7 +1018,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompUnconfinedCloneUserns(c *testing.T) {
 // allows creating a userns.
 func (s *DockerCLIRunSuite) TestRunSeccompAllowPrivCloneUserns(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// make sure running w privileged is ok
 	icmd.RunCommand(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{
@@ -1030,7 +1030,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompAllowPrivCloneUserns(c *testing.T) {
 // with the default seccomp profile.
 func (s *DockerCLIRunSuite) TestRunSeccompProfileAllow32Bit(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, IsAmd64)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test").Assert(c, icmd.Success)
 }
@@ -1045,7 +1045,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompAllowSetrlimit(c *testing.T) {
 
 func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileAcct(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotUserNamespace)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test")
 	if err == nil || !strings.Contains(out, "Operation not permitted") {
@@ -1075,7 +1075,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileAcct(c *testing.T) {
 
 func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileNS(c *testing.T) {
 	testRequires(c, testEnv.IsLocalDaemon, seccompEnabled, NotUserNamespace)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0")
 	if err == nil || !strings.Contains(out, "Operation not permitted") {
@@ -1112,7 +1112,7 @@ func (s *DockerCLIRunSuite) TestRunSeccompDefaultProfileNS(c *testing.T) {
 // effective uid transitions on executing setuid binaries.
 func (s *DockerCLIRunSuite) TestRunNoNewPrivSetuid(c *testing.T) {
 	testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
-	ensureNNPTest(c)
+	ensureNNPTest(testutil.GetContext(c), c)
 
 	// test that running a setuid binary results in no effective uid transition
 	icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges=true", "--user", "1000",
@@ -1125,7 +1125,7 @@ func (s *DockerCLIRunSuite) TestRunNoNewPrivSetuid(c *testing.T) {
 // effective uid transitions on executing setuid binaries.
 func (s *DockerCLIRunSuite) TestLegacyRunNoNewPrivSetuid(c *testing.T) {
 	testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
-	ensureNNPTest(c)
+	ensureNNPTest(testutil.GetContext(c), c)
 
 	// test that running a setuid binary results in no effective uid transition
 	icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000",
@@ -1136,7 +1136,7 @@ func (s *DockerCLIRunSuite) TestLegacyRunNoNewPrivSetuid(c *testing.T) {
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChown(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_CHOWN
 	dockerCmd(c, "run", "busybox", "chown", "100", "/tmp")
@@ -1154,7 +1154,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChown(c *testing.T) {
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_DAC_OVERRIDE
 	dockerCmd(c, "run", "busybox", "sh", "-c", "echo test > /etc/passwd")
@@ -1167,7 +1167,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *testin
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesFowner(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_FOWNER
 	dockerCmd(c, "run", "busybox", "chmod", "777", "/etc/passwd")
@@ -1183,7 +1183,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesFowner(c *testing.T)
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetuid(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_SETUID
 	dockerCmd(c, "run", "syscall-test", "setuid-test")
@@ -1201,7 +1201,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetuid(c *testing.T)
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesSetgid(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_SETGID
 	dockerCmd(c, "run", "syscall-test", "setgid-test")
@@ -1229,7 +1229,7 @@ func sysctlExists(s string) bool {
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_NET_BIND_SERVICE
 	dockerCmd(c, "run", "syscall-test", "socket-test")
@@ -1258,7 +1258,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *tes
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_NET_RAW
 	dockerCmd(c, "run", "syscall-test", "raw-test")
@@ -1276,7 +1276,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *testing.T)
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChroot(c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_SYS_CHROOT
 	dockerCmd(c, "run", "busybox", "chroot", "/", "/bin/true")
@@ -1294,7 +1294,7 @@ func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesChroot(c *testing.T)
 
 func (s *DockerCLIRunSuite) TestUserNoEffectiveCapabilitiesMknod(c *testing.T) {
 	testRequires(c, DaemonIsLinux, NotUserNamespace, testEnv.IsLocalDaemon)
-	ensureSyscallTest(c)
+	ensureSyscallTest(testutil.GetContext(c), c)
 
 	// test that a root user has default capability CAP_MKNOD
 	dockerCmd(c, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2")
@@ -1428,8 +1428,9 @@ func (s *DockerCLIRunSuite) TestRunUserDeviceAllowed(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *testing.T) {
 	testRequires(c, seccompEnabled)
+	ctx := testutil.GetContext(c)
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	jsonData := `{
 	"defaultAction": "SCMP_ACT_ALLOW",
@@ -1453,8 +1454,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *testing.T) {
 	testRequires(c, seccompEnabled)
+	ctx := testutil.GetContext(c)
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	jsonData := `{
 	"defaultAction": "SCMP_ACT_ALLOW",
@@ -1479,8 +1481,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *testing.T) {
 	testRequires(c, seccompEnabled)
+	ctx := testutil.GetContext(c)
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	jsonData := `{
 	"archMap": [
@@ -1516,8 +1519,9 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *testing.T) {
 
 func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *testing.T) {
 	testRequires(c, seccompEnabled)
+	ctx := testutil.GetContext(c)
 
-	s.d.StartWithBusybox(c)
+	s.d.StartWithBusybox(ctx, c)
 
 	// 1) verify I can run containers with the Docker default shipped profile which allows chmod
 	_, err := s.d.Cmd("run", "busybox", "chmod", "777", ".")
@@ -1560,7 +1564,7 @@ func (s *DockerCLIRunSuite) TestRunWithNanoCPUs(c *testing.T) {
 
 	clt, err := client.NewClientWithOpts(client.FromEnv)
 	assert.NilError(c, err)
-	inspect, err := clt.ContainerInspect(context.Background(), "test")
+	inspect, err := clt.ContainerInspect(testutil.GetContext(c), "test")
 	assert.NilError(c, err)
 	assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
 

+ 3 - 2
integration-cli/docker_cli_save_load_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"os/exec"
@@ -17,8 +18,8 @@ type DockerCLISaveLoadSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLISaveLoadSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLISaveLoadSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLISaveLoadSuite) OnTimeout(c *testing.T) {

+ 2 - 1
integration-cli/docker_cli_save_load_unix_test.go

@@ -13,6 +13,7 @@ import (
 
 	"github.com/creack/pty"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 )
@@ -90,7 +91,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveAndLoadWithProgressBar(c *testing.T) {
 func (s *DockerCLISaveLoadSuite) TestLoadNoStdinFail(c *testing.T) {
 	pty, tty, err := pty.Open()
 	assert.NilError(c, err)
-	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	ctx, cancel := context.WithTimeout(testutil.GetContext(c), 5*time.Second)
 	defer cancel()
 	cmd := exec.CommandContext(ctx, dockerBinary, "load")
 	cmd.Stdin = tty

+ 3 - 2
integration-cli/docker_cli_search_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"strings"
 	"testing"
@@ -12,8 +13,8 @@ type DockerCLISearchSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLISearchSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLISearchSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLISearchSuite) OnTimeout(c *testing.T) {

+ 33 - 23
integration-cli/docker_cli_service_create_test.go

@@ -13,26 +13,28 @@ import (
 	"github.com/docker/docker/api/types/mount"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/integration-cli/checker"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/poll"
 )
 
 func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top")
 	assert.NilError(c, err, out)
 	id := strings.TrimSpace(out)
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, id)
+		tasks = d.GetServiceTasks(ctx, c, id)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -66,7 +68,8 @@ func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	serviceName := "test-service-secret"
 	testName := "test_secret"
@@ -100,7 +103,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	testPaths := map[string]string{
 		"app":                  "/etc/secret",
@@ -139,14 +143,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testi
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, serviceName)
+		tasks = d.GetServiceTasks(ctx, c, serviceName)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -166,7 +170,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *testi
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	id := d.CreateSecret(c, swarm.SecretSpec{
 		Annotations: swarm.Annotations{
@@ -189,14 +194,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, serviceName)
+		tasks = d.GetServiceTasks(ctx, c, serviceName)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -214,7 +219,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *testing
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	serviceName := "test-service-config"
 	testName := "test_config"
@@ -248,7 +254,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	testPaths := map[string]string{
 		"app":             "/etc/config",
@@ -286,14 +293,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testi
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, serviceName)
+		tasks = d.GetServiceTasks(ctx, c, serviceName)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -313,7 +320,8 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *testi
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	id := d.CreateConfig(c, swarm.ConfigSpec{
 		Annotations: swarm.Annotations{
@@ -336,14 +344,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, serviceName)
+		tasks = d.GetServiceTasks(ctx, c, serviceName)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -361,21 +369,22 @@ func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *testing
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; exec tail -f /dev/null")
 	assert.NilError(c, err, out)
 	id := strings.TrimSpace(out)
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, id)
+		tasks = d.GetServiceTasks(ctx, c, id)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
@@ -414,7 +423,8 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 	out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br")
 	assert.NilError(c, err, out)
 
@@ -424,14 +434,14 @@ func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *testing.T) {
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, id)
+		tasks = d.GetServiceTasks(ctx, c, id)
 		return len(tasks) > 0, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	task := tasks[0]
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
 		if task.NodeID == "" || task.Status.ContainerStatus == nil {
-			task = d.GetTask(c, task.ID)
+			task = d.GetTask(ctx, c, task.ID)
 		}
 		return task.NodeID != "" && task.Status.ContainerStatus != nil, ""
 	}, checker.Equals(true)), poll.WithTimeout(defaultReconciliationTimeout))

+ 12 - 9
integration-cli/docker_cli_service_health_test.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 	"gotest.tools/v3/poll"
@@ -22,7 +23,8 @@ import (
 func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
 	testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
 
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	// build image with health-check
 	imageName := "testhealth"
@@ -41,7 +43,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, id)
+		tasks = d.GetServiceTasks(ctx, c, id)
 		return tasks, ""
 	}, checker.HasLen(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -49,7 +51,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
 
 	// wait for task to start
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		task = d.GetTask(c, task.ID)
+		task = d.GetTask(ctx, c, task.ID)
 		return task.Status.State, ""
 	}, checker.Equals(swarm.TaskStateRunning)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -71,7 +73,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
 
 	// Task should be terminated
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		task = d.GetTask(c, task.ID)
+		task = d.GetTask(ctx, c, task.ID)
 		return task.Status.State, ""
 	}, checker.Equals(swarm.TaskStateFailed)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -85,7 +87,8 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *testing.T) {
 func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
 	testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
 
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	// service started from this image won't pass health check
 	imageName := "testhealth"
@@ -103,7 +106,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
 
 	var tasks []swarm.Task
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		tasks = d.GetServiceTasks(c, id)
+		tasks = d.GetServiceTasks(ctx, c, id)
 		return tasks, ""
 	}, checker.HasLen(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -111,7 +114,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
 
 	// wait for task to start
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		task = d.GetTask(c, task.ID)
+		task = d.GetTask(ctx, c, task.ID)
 		return task.Status.State, ""
 	}, checker.Equals(swarm.TaskStateStarting)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -125,7 +128,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
 	}, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// task should be blocked at starting status
-	task = d.GetTask(c, task.ID)
+	task = d.GetTask(ctx, c, task.ID)
 	assert.Equal(c, task.Status.State, swarm.TaskStateStarting)
 
 	// make it healthy
@@ -133,7 +136,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *testing.T) {
 
 	// Task should be at running status
 	poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
-		task = d.GetTask(c, task.ID)
+		task = d.GetTask(ctx, c, task.ID)
 		return task.Status.State, ""
 	}, checker.Equals(swarm.TaskStateRunning)), poll.WithTimeout(defaultReconciliationTimeout))
 }

+ 28 - 18
integration-cli/docker_cli_service_logs_test.go

@@ -13,6 +13,7 @@ import (
 
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 	"gotest.tools/v3/poll"
@@ -24,7 +25,8 @@ type logMessage struct {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogs(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	// we have multiple services here for detecting the goroutine issue #28915
 	services := map[string]string{
@@ -41,7 +43,7 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *testing.T) {
 
 	// make sure task has been deployed.
 	poll.WaitOn(c, pollCheck(c,
-		d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{"busybox:latest": len(services)})), poll.WithTimeout(defaultReconciliationTimeout))
+		d.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{"busybox:latest": len(services)})), poll.WithTimeout(defaultReconciliationTimeout))
 
 	for name, message := range services {
 		out, err := d.Cmd("service", "logs", name)
@@ -69,7 +71,8 @@ func countLogLines(d *daemon.Daemon, name string) func(*testing.T) (interface{},
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsCompleteness"
 
@@ -79,7 +82,7 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
 	assert.Assert(c, strings.TrimSpace(out) != "")
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	// and make sure we have all the log lines
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -96,7 +99,8 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsTail"
 
@@ -106,7 +110,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
 	assert.Assert(c, strings.TrimSpace(out) != "")
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	out, err = d.Cmd("service", "logs", "--tail=2", name)
@@ -120,15 +124,16 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsSince(c *testing.T) {
+	ctx := testutil.GetContext(c)
 	// See DockerSuite.TestLogsSince, which is where this comes from
-	d := s.AddDaemon(c, true, true)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsSince"
 
 	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do usleep 100000; echo log$i; done; exec tail -f /dev/null")
 	assert.NilError(c, err)
 	assert.Assert(c, strings.TrimSpace(out) != "")
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	// wait a sec for the logs to come in
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(3)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -155,7 +160,8 @@ func (s *DockerSwarmSuite) TestServiceLogsSince(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsFollow"
 
@@ -164,7 +170,7 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
 	assert.Assert(c, strings.TrimSpace(out) != "")
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	args := []string{"service", "logs", "-f", name}
 	cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...)
@@ -207,7 +213,8 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServicelogsTaskLogs"
 	replicas := 2
@@ -233,7 +240,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
 	result.Assert(c, icmd.Expected{Out: id})
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(replicas)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(replicas)), poll.WithTimeout(defaultReconciliationTimeout))
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(6*replicas)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// get the task ids
@@ -260,7 +267,8 @@ func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsTTY"
 
@@ -286,7 +294,7 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
 	result.Assert(c, icmd.Expected{Out: id})
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	// and make sure we have all the log lines
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -298,7 +306,8 @@ func (s *DockerSwarmSuite) TestServiceLogsTTY(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsNoHangDeletedContainer"
 
@@ -320,7 +329,7 @@ func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
 	assert.Assert(c, id != "")
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	// and make sure we have all the log lines
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
 
@@ -345,7 +354,8 @@ func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *testing.T) {
 }
 
 func (s *DockerSwarmSuite) TestServiceLogsDetails(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	name := "TestServiceLogsDetails"
 
@@ -371,7 +381,7 @@ func (s *DockerSwarmSuite) TestServiceLogsDetails(c *testing.T) {
 	assert.Assert(c, id != "")
 
 	// make sure task has been deployed
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 	// and make sure we have all the log lines
 	poll.WaitOn(c, pollCheck(c, countLogLines(d, name), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 

+ 3 - 1
integration-cli/docker_cli_service_scale_test.go

@@ -7,11 +7,13 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 )
 
 func (s *DockerSwarmSuite) TestServiceScale(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	service1Name := "TestService1"
 	service1Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service1Name, "busybox"}, sleepCommandForDaemonPlatform()...)

+ 3 - 2
integration-cli/docker_cli_sni_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"log"
@@ -18,8 +19,8 @@ type DockerCLISNISuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLISNISuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLISNISuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLISNISuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_start_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"strings"
 	"testing"
@@ -15,8 +16,8 @@ type DockerCLIStartSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIStartSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIStartSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIStartSuite) OnTimeout(c *testing.T) {

+ 3 - 2
integration-cli/docker_cli_stats_test.go

@@ -2,6 +2,7 @@ package main
 
 import (
 	"bufio"
+	"context"
 	"os/exec"
 	"regexp"
 	"strings"
@@ -17,8 +18,8 @@ type DockerCLIStatsSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIStatsSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIStatsSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIStatsSuite) OnTimeout(c *testing.T) {

File diff suppressed because it is too large
+ 179 - 120
integration-cli/docker_cli_swarm_test.go


+ 11 - 8
integration-cli/docker_cli_swarm_unix_test.go

@@ -10,18 +10,20 @@ import (
 
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/integration-cli/checker"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/poll"
 )
 
 func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
-	d := s.AddDaemon(c, true, true)
+	ctx := testutil.GetContext(c)
+	d := s.AddDaemon(ctx, c, true, true)
 
 	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top")
 	assert.NilError(c, err, out)
 
 	// Make sure task stays pending before plugin is available
-	poll.WaitOn(c, pollCheck(c, d.CheckServiceTasksInStateWithError("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckServiceTasksInStateWithError(ctx, "top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	plugin := newVolumePlugin(c, "customvolumedriver")
 	defer plugin.Close()
@@ -35,7 +37,7 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
 	// this long delay.
 
 	// make sure task has been deployed.
-	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	out, err = d.Cmd("ps", "-q")
 	assert.NilError(c, err)
@@ -58,8 +60,9 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
 // Test network plugin filter in swarm
 func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
 	testRequires(c, IsAmd64)
-	d1 := s.AddDaemon(c, true, true)
-	d2 := s.AddDaemon(c, true, false)
+	ctx := testutil.GetContext(c)
+	d1 := s.AddDaemon(ctx, c, true, true)
+	d2 := s.AddDaemon(ctx, c, true, false)
 
 	// install plugin on d1 and d2
 	pluginName := "aragunathan/global-net-plugin:latest"
@@ -81,7 +84,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
 	assert.NilError(c, err)
 
 	// wait for tasks ready
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// remove service
 	_, err = d1.Cmd("service", "rm", serviceName)
@@ -89,7 +92,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
 
 	// wait to ensure all containers have exited before removing the plugin. Else there's a
 	// possibility of container exits erroring out due to plugins being unavailable.
-	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
 
 	// disable plugin on worker
 	_, err = d2.Cmd("plugin", "disable", "-f", pluginName)
@@ -102,5 +105,5 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
 	_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top")
 	assert.NilError(c, err)
 
-	poll.WaitOn(c, pollCheck(c, d1.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image: 1})), poll.WithTimeout(defaultReconciliationTimeout))
+	poll.WaitOn(c, pollCheck(c, d1.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{image: 1})), poll.WithTimeout(defaultReconciliationTimeout))
 }

+ 3 - 2
integration-cli/docker_cli_top_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"strings"
 	"testing"
 
@@ -12,8 +13,8 @@ type DockerCLITopSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLITopSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLITopSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLITopSuite) OnTimeout(c *testing.T) {

+ 6 - 5
integration-cli/docker_cli_update_unix_test.go

@@ -14,12 +14,13 @@ import (
 	"github.com/creack/pty"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
 
-func (s *DockerCLIUpdateSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIUpdateSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIUpdateSuite) OnTimeout(c *testing.T) {
@@ -180,7 +181,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateStats(c *testing.T) {
 	assert.NilError(c, waitRun(name))
 
 	getMemLimit := func(id string) uint64 {
-		resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id))
+		resp, body, err := request.Get(testutil.GetContext(c), fmt.Sprintf("/containers/%s/stats?stream=false", id))
 		assert.NilError(c, err)
 		assert.Equal(c, resp.Header.Get("Content-Type"), "application/json")
 
@@ -255,7 +256,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
 
 	clt, err := client.NewClientWithOpts(client.FromEnv)
 	assert.NilError(c, err)
-	inspect, err := clt.ContainerInspect(context.Background(), "top")
+	inspect, err := clt.ContainerInspect(testutil.GetContext(c), "top")
 	assert.NilError(c, err)
 	assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
 
@@ -269,7 +270,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
 	assert.Assert(c, strings.Contains(out, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set"))
 
 	dockerCmd(c, "update", "--cpus", "0.8", "top")
-	inspect, err = clt.ContainerInspect(context.Background(), "top")
+	inspect, err = clt.ContainerInspect(testutil.GetContext(c), "top")
 	assert.NilError(c, err)
 	assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(800000000))
 

+ 3 - 1
integration-cli/docker_cli_userns_test.go

@@ -14,6 +14,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 )
 
@@ -23,7 +24,8 @@ import (
 func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *testing.T) {
 	testRequires(c, UserNamespaceInKernel)
 
-	s.d.StartWithBusybox(c, "--userns-remap", "default")
+	ctx := testutil.GetContext(c)
+	s.d.StartWithBusybox(ctx, c, "--userns-remap", "default")
 
 	tmpDir, err := os.MkdirTemp("", "userns")
 	assert.NilError(c, err)

+ 4 - 3
integration-cli/docker_cli_volume_test.go

@@ -14,6 +14,7 @@ import (
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/icmd"
 )
@@ -22,8 +23,8 @@ type DockerCLIVolumeSuite struct {
 	ds *DockerSuite
 }
 
-func (s *DockerCLIVolumeSuite) TearDownTest(c *testing.T) {
-	s.ds.TearDownTest(c)
+func (s *DockerCLIVolumeSuite) TearDownTest(ctx context.Context, c *testing.T) {
+	s.ds.TearDownTest(ctx, c)
 }
 
 func (s *DockerCLIVolumeSuite) OnTimeout(c *testing.T) {
@@ -589,7 +590,7 @@ func (s *DockerCLIVolumeSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c
 			},
 		},
 	}
-	_, err = apiClient.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, nil, "app")
+	_, err = apiClient.ContainerCreate(testutil.GetContext(c), &config, &hostConfig, &network.NetworkingConfig{}, nil, "app")
 
 	assert.NilError(c, err)
 

+ 14 - 13
integration-cli/docker_deprecated_api_v124_test.go

@@ -9,6 +9,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/api/types/versions"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -24,7 +25,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartHostConfig(c *testing.T)
 	config := map[string]interface{}{
 		"Binds": []string{"/aa:/bb"},
 	}
-	res, body, err := request.Post("/containers/"+name+"/start", request.JSONBody(config))
+	res, body, err := request.Post(testutil.GetContext(c), "/containers/"+name+"/start", request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusBadRequest)
 	if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
@@ -50,7 +51,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumeBinds(c *testing.T
 		"Volumes": map[string]struct{}{path: {}},
 	}
 
-	res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
+	res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 
@@ -58,7 +59,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumeBinds(c *testing.T
 	config = map[string]interface{}{
 		"Binds": []string{bindPath + ":" + path},
 	}
-	res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
+	res, _, err = request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 
@@ -77,7 +78,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *testin
 		"Volumes": map[string]struct{}{"/tmp": {}},
 	}
 
-	res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
+	res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 
@@ -87,7 +88,7 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *testin
 	config = map[string]interface{}{
 		"Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"},
 	}
-	res, body, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
+	res, body, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
 	assert.NilError(c, err)
 
 	buf, err := request.ReadBody(body)
@@ -115,14 +116,14 @@ func (s *DockerAPISuite) TestDeprecatedContainerAPIStartVolumesFrom(c *testing.T
 		"Volumes": map[string]struct{}{volPath: {}},
 	}
 
-	res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
+	res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusCreated)
 
 	config = map[string]interface{}{
 		"VolumesFrom": []string{volName},
 	}
-	res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
+	res, _, err = request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 
@@ -145,7 +146,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainerBindNormalVolume(c *testing.
 	dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox")
 
 	bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}}
-	res, _, err := request.Post(formatV123StartAPIURL("/containers/two/start"), request.JSONBody(bindSpec))
+	res, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/two/start"), request.JSONBody(bindSpec))
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 
@@ -166,7 +167,7 @@ func (s *DockerAPISuite) TestDeprecatedStartWithTooLowMemoryLimit(c *testing.T)
                 "Memory":    524287
         }`
 
-	res, body, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
+	res, body, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	b, err := request.ReadBody(body)
 	assert.NilError(c, err)
@@ -189,7 +190,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithoutLinksInHostConf
 	hc := inspectFieldJSON(c, name, "HostConfig")
 	config := `{"HostConfig":` + hc + `}`
 
-	res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
+	res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 	b.Close()
@@ -207,7 +208,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(
 	hc := inspectFieldJSON(c, name, "HostConfig")
 	config := `{"HostConfig":` + hc + `}`
 
-	res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
+	res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 	b.Close()
@@ -227,7 +228,7 @@ func (s *DockerAPISuite) TestDeprecatedPostContainersStartWithLinksInHostConfigI
 	hc := inspectFieldJSON(c, name, "HostConfig")
 	config := `{"HostConfig":` + hc + `}`
 
-	res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
+	res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 	b.Close()
@@ -241,7 +242,7 @@ func (s *DockerAPISuite) TestDeprecatedStartWithNilDNS(c *testing.T) {
 
 	config := `{"HostConfig": {"Dns": null}}`
 
-	res, b, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
+	res, b, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON)
 	assert.NilError(c, err)
 	assert.Equal(c, res.StatusCode, http.StatusNoContent)
 	b.Close()

+ 2 - 1
integration-cli/docker_deprecated_api_v124_unix_test.go

@@ -6,6 +6,7 @@ import (
 	"strings"
 	"testing"
 
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/request"
 	"gotest.tools/v3/assert"
 )
@@ -22,7 +23,7 @@ func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c
 			"NetworkMode": netName,
 		},
 	}
-	_, _, err := request.Post(formatV123StartAPIURL("/containers/"+conName+"/start"), request.JSONBody(config))
+	_, _, err := request.Post(testutil.GetContext(c), formatV123StartAPIURL("/containers/"+conName+"/start"), request.JSONBody(config))
 	assert.NilError(c, err)
 	assert.NilError(c, waitRun(conName))
 	networks := inspectField(c, conName, "NetworkSettings.Networks")

+ 6 - 5
integration-cli/docker_hub_pull_suite_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"os/exec"
 	"strings"
 	"testing"
@@ -30,31 +31,31 @@ func newDockerHubPullSuite() *DockerHubPullSuite {
 }
 
 // SetUpSuite starts the suite daemon.
-func (s *DockerHubPullSuite) SetUpSuite(c *testing.T) {
+func (s *DockerHubPullSuite) SetUpSuite(ctx context.Context, c *testing.T) {
 	testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
 	s.d.Start(c)
 }
 
 // TearDownSuite stops the suite daemon.
-func (s *DockerHubPullSuite) TearDownSuite(c *testing.T) {
+func (s *DockerHubPullSuite) TearDownSuite(ctx context.Context, c *testing.T) {
 	if s.d != nil {
 		s.d.Stop(c)
 	}
 }
 
 // SetUpTest declares that all tests of this suite require network.
-func (s *DockerHubPullSuite) SetUpTest(c *testing.T) {
+func (s *DockerHubPullSuite) SetUpTest(ctx context.Context, c *testing.T) {
 	testRequires(c, Network)
 }
 
 // TearDownTest removes all images from the suite daemon.
-func (s *DockerHubPullSuite) TearDownTest(c *testing.T) {
+func (s *DockerHubPullSuite) TearDownTest(ctx context.Context, c *testing.T) {
 	out := s.Cmd(c, "images", "-aq")
 	images := strings.Split(out, "\n")
 	images = append([]string{"rmi", "-f"}, images...)
 	s.d.Cmd(images...)
-	s.ds.TearDownTest(c)
+	s.ds.TearDownTest(ctx, c)
 }
 
 // Cmd executes a command against the suite daemon and returns the combined

+ 56 - 29
integration-cli/docker_utils_test.go

@@ -18,6 +18,7 @@ import (
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/testutil"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert/cmp"
 	"gotest.tools/v3/icmd"
@@ -249,7 +250,7 @@ func daemonTime(c *testing.T) time.Time {
 	assert.NilError(c, err)
 	defer apiClient.Close()
 
-	info, err := apiClient.Info(context.Background())
+	info, err := apiClient.Info(testutil.GetContext(c))
 	assert.NilError(c, err)
 
 	dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
@@ -327,7 +328,7 @@ func getInspectBody(c *testing.T, version, id string) []byte {
 	apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion(version))
 	assert.NilError(c, err)
 	defer apiClient.Close()
-	_, body, err := apiClient.ContainerInspectWithRaw(context.Background(), id, false)
+	_, body, err := apiClient.ContainerInspectWithRaw(testutil.GetContext(c), id, false)
 	assert.NilError(c, err)
 	return body
 }
@@ -356,45 +357,71 @@ func minimalBaseImage() string {
 	return testEnv.PlatformDefaults.BaseImage
 }
 
-func getGoroutineNumber() (int, error) {
-	apiClient, err := client.NewClientWithOpts(client.FromEnv)
+func getGoroutineNumber(ctx context.Context, apiClient client.APIClient) (int, error) {
+	info, err := apiClient.Info(ctx)
 	if err != nil {
 		return 0, err
 	}
-	defer apiClient.Close()
+	return info.NGoroutines, nil
+}
 
-	info, err := apiClient.Info(context.Background())
-	if err != nil {
-		return 0, err
+func waitForStableGourtineCount(ctx context.Context, t poll.TestingT, apiClient client.APIClient) int {
+	var out int
+	poll.WaitOn(t, stableGoroutineCount(ctx, apiClient, &out), poll.WithTimeout(30*time.Second))
+	return out
+}
+
+func stableGoroutineCount(ctx context.Context, apiClient client.APIClient, count *int) poll.Check {
+	var (
+		numStable int
+		nRoutines int
+	)
+
+	return func(t poll.LogT) poll.Result {
+		n, err := getGoroutineNumber(ctx, apiClient)
+		if err != nil {
+			return poll.Error(err)
+		}
+
+		last := nRoutines
+
+		if nRoutines == n {
+			numStable++
+		} else {
+			numStable = 0
+			nRoutines = n
+		}
+
+		if numStable > 3 {
+			*count = n
+			return poll.Success()
+		}
+		return poll.Continue("goroutine count is not stable: last %d, current %d, stable iters: %d", last, n, numStable)
 	}
-	return info.NGoroutines, nil
 }
 
-func waitForGoroutines(expected int) error {
-	t := time.After(30 * time.Second)
-	for {
-		select {
-		case <-t:
-			n, err := getGoroutineNumber()
-			if err != nil {
-				return err
-			}
-			if n > expected {
-				return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n)
-			}
-		default:
-			n, err := getGoroutineNumber()
-			if err != nil {
-				return err
-			}
-			if n <= expected {
-				return nil
+func checkGoroutineCount(ctx context.Context, apiClient client.APIClient, expected int) poll.Check {
+	first := true
+	return func(t poll.LogT) poll.Result {
+		n, err := getGoroutineNumber(ctx, apiClient)
+		if err != nil {
+			return poll.Error(err)
+		}
+		if n > expected {
+			if first {
+				t.Log("Waiting for goroutines to stabilize")
+				first = false
 			}
-			time.Sleep(200 * time.Millisecond)
+			return poll.Continue("exepcted %d goroutines, got %d", expected, n)
 		}
+		return poll.Success()
 	}
 }
 
+func waitForGoroutines(ctx context.Context, t poll.TestingT, apiClient client.APIClient, expected int) {
+	poll.WaitOn(t, checkGoroutineCount(ctx, apiClient, expected), poll.WithDelay(500*time.Millisecond), poll.WithTimeout(30*time.Second))
+}
+
 // getErrorMessage returns the error message from an error API response
 func getErrorMessage(c *testing.T, body []byte) string {
 	c.Helper()

+ 3 - 2
integration-cli/environment/environment.go

@@ -1,6 +1,7 @@
 package environment // import "github.com/docker/docker/integration-cli/environment"
 
 import (
+	"context"
 	"os"
 	"os/exec"
 
@@ -29,8 +30,8 @@ func (e *Execution) DockerBinary() string {
 }
 
 // New returns details about the testing environment
-func New() (*Execution, error) {
-	env, err := environment.New()
+func New(ctx context.Context) (*Execution, error) {
+	env, err := environment.New(ctx)
 	if err != nil {
 		return nil, err
 	}

+ 9 - 8
integration-cli/fixtures_linux_daemon_test.go

@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"os/exec"
@@ -13,7 +14,7 @@ import (
 	"gotest.tools/v3/assert"
 )
 
-func ensureSyscallTest(c *testing.T) {
+func ensureSyscallTest(ctx context.Context, c *testing.T) {
 	defer testEnv.ProtectImage(c, "syscall-test:latest")
 
 	// If the image already exists, there's nothing left to do.
@@ -24,7 +25,7 @@ func ensureSyscallTest(c *testing.T) {
 	// if no match, must build in docker, which is significantly slower
 	// (slower mostly because of the vfs graphdriver)
 	if testEnv.DaemonInfo.OSType != runtime.GOOS {
-		ensureSyscallTestBuild(c)
+		ensureSyscallTestBuild(ctx, c)
 		return
 	}
 
@@ -63,8 +64,8 @@ func ensureSyscallTest(c *testing.T) {
 	dockerCmd(c, buildArgs...)
 }
 
-func ensureSyscallTestBuild(c *testing.T) {
-	err := load.FrozenImagesLinux(testEnv.APIClient(), "debian:bullseye-slim")
+func ensureSyscallTestBuild(ctx context.Context, c *testing.T) {
+	err := load.FrozenImagesLinux(ctx, testEnv.APIClient(), "debian:bullseye-slim")
 	assert.NilError(c, err)
 
 	var buildArgs []string
@@ -76,7 +77,7 @@ func ensureSyscallTestBuild(c *testing.T) {
 	dockerCmd(c, buildArgs...)
 }
 
-func ensureNNPTest(c *testing.T) {
+func ensureNNPTest(ctx context.Context, c *testing.T) {
 	defer testEnv.ProtectImage(c, "nnp-test:latest")
 
 	// If the image already exists, there's nothing left to do.
@@ -87,7 +88,7 @@ func ensureNNPTest(c *testing.T) {
 	// if no match, must build in docker, which is significantly slower
 	// (slower mostly because of the vfs graphdriver)
 	if testEnv.DaemonInfo.OSType != runtime.GOOS {
-		ensureNNPTestBuild(c)
+		ensureNNPTestBuild(ctx, c)
 		return
 	}
 
@@ -118,8 +119,8 @@ func ensureNNPTest(c *testing.T) {
 	dockerCmd(c, buildArgs...)
 }
 
-func ensureNNPTestBuild(c *testing.T) {
-	err := load.FrozenImagesLinux(testEnv.APIClient(), "debian:bullseye-slim")
+func ensureNNPTestBuild(ctx context.Context, c *testing.T) {
+	err := load.FrozenImagesLinux(ctx, testEnv.APIClient(), "debian:bullseye-slim")
 	assert.NilError(c, err)
 
 	var buildArgs []string

+ 2 - 2
integration-cli/requirements_test.go

@@ -33,12 +33,12 @@ func MinimumAPIVersion(version string) func() bool {
 	}
 }
 
-func OnlyDefaultNetworks() bool {
+func OnlyDefaultNetworks(ctx context.Context) bool {
 	apiClient, err := client.NewClientWithOpts(client.FromEnv)
 	if err != nil {
 		return false
 	}
-	networks, err := apiClient.NetworkList(context.TODO(), types.NetworkListOptions{})
+	networks, err := apiClient.NetworkList(ctx, types.NetworkListOptions{})
 	if err != nil || len(networks) > 0 {
 		return false
 	}

+ 11 - 14
integration-cli/requirements_unix_test.go

@@ -11,8 +11,11 @@ import (
 	"github.com/docker/docker/pkg/sysinfo"
 )
 
-// SysInfo stores information about which features a kernel supports.
-var SysInfo *sysinfo.SysInfo
+var sysInfo *sysinfo.SysInfo
+
+func setupLocalInfo() {
+	sysInfo = sysinfo.New()
+}
 
 func cpuCfsPeriod() bool {
 	return testEnv.DaemonInfo.CPUCfsPeriod
@@ -31,7 +34,7 @@ func oomControl() bool {
 }
 
 func pidsLimit() bool {
-	return SysInfo.PidsLimit
+	return sysInfo.PidsLimit
 }
 
 func memoryLimitSupport() bool {
@@ -39,7 +42,7 @@ func memoryLimitSupport() bool {
 }
 
 func memoryReservationSupport() bool {
-	return SysInfo.MemoryReservation
+	return sysInfo.MemoryReservation
 }
 
 func swapMemorySupport() bool {
@@ -47,11 +50,11 @@ func swapMemorySupport() bool {
 }
 
 func memorySwappinessSupport() bool {
-	return testEnv.IsLocalDaemon() && SysInfo.MemorySwappiness
+	return testEnv.IsLocalDaemon() && sysInfo.MemorySwappiness
 }
 
 func blkioWeight() bool {
-	return testEnv.IsLocalDaemon() && SysInfo.BlkioWeight
+	return testEnv.IsLocalDaemon() && sysInfo.BlkioWeight
 }
 
 func cgroupCpuset() bool {
@@ -59,11 +62,11 @@ func cgroupCpuset() bool {
 }
 
 func seccompEnabled() bool {
-	return SysInfo.Seccomp
+	return sysInfo.Seccomp
 }
 
 func bridgeNfIptables() bool {
-	return !SysInfo.BridgeNFCallIPTablesDisabled
+	return !sysInfo.BridgeNFCallIPTablesDisabled
 }
 
 func unprivilegedUsernsClone() bool {
@@ -79,9 +82,3 @@ func overlayFSSupported() bool {
 	}
 	return bytes.Contains(out, []byte("overlay\n"))
 }
-
-func init() {
-	if testEnv.IsLocalDaemon() {
-		SysInfo = sysinfo.New()
-	}
-}

+ 4 - 0
integration-cli/requirements_windows_test.go

@@ -0,0 +1,4 @@
+package main
+
+func setupLocalInfo() {
+}

+ 9 - 5
integration/build/build_cgroupns_linux_test.go

@@ -10,6 +10,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/integration/internal/requirement"
 	"github.com/docker/docker/pkg/jsonmessage"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/testutil/fakecontext"
 	"gotest.tools/v3/assert"
@@ -38,16 +39,15 @@ func getCgroupFromBuildOutput(buildOutput io.Reader) (string, error) {
 
 // Runs a docker build against a daemon with the given cgroup namespace default value.
 // Returns the container cgroup and daemon cgroup.
-func testBuildWithCgroupNs(t *testing.T, daemonNsMode string) (string, string) {
+func testBuildWithCgroupNs(ctx context.Context, t *testing.T, daemonNsMode string) (string, string) {
 	d := daemon.New(t, daemon.WithDefaultCgroupNamespaceMode(daemonNsMode))
-	d.StartWithBusybox(t)
+	d.StartWithBusybox(ctx, t)
 	defer d.Stop(t)
 
 	dockerfile := `
 		FROM busybox
 		RUN readlink /proc/self/ns/cgroup
 	`
-	ctx := context.Background()
 	source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
 	defer source.Close()
 
@@ -74,9 +74,11 @@ func TestCgroupNamespacesBuild(t *testing.T) {
 	skip.If(t, testEnv.IsRemoteDaemon())
 	skip.If(t, !requirement.CgroupNamespacesEnabled())
 
+	ctx := testutil.StartSpan(baseContext, t)
+
 	// When the daemon defaults to private cgroup namespaces, containers launched
 	// should be in their own private cgroup namespace by default
-	containerCgroup, daemonCgroup := testBuildWithCgroupNs(t, "private")
+	containerCgroup, daemonCgroup := testBuildWithCgroupNs(ctx, t, "private")
 	assert.Assert(t, daemonCgroup != containerCgroup)
 }
 
@@ -85,8 +87,10 @@ func TestCgroupNamespacesBuildDaemonHostMode(t *testing.T) {
 	skip.If(t, testEnv.IsRemoteDaemon())
 	skip.If(t, !requirement.CgroupNamespacesEnabled())
 
+	ctx := testutil.StartSpan(baseContext, t)
+
 	// When the daemon defaults to host cgroup namespaces, containers
 	// launched should not be inside their own cgroup namespaces
-	containerCgroup, daemonCgroup := testBuildWithCgroupNs(t, "host")
+	containerCgroup, daemonCgroup := testBuildWithCgroupNs(ctx, t, "host")
 	assert.Assert(t, daemonCgroup == containerCgroup)
 }

+ 13 - 11
integration/build/build_session_test.go

@@ -11,6 +11,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/versions"
 	dclient "github.com/docker/docker/client"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/fakecontext"
 	"github.com/docker/docker/testutil/request"
 	"github.com/moby/buildkit/session"
@@ -26,6 +27,8 @@ func TestBuildWithSession(t *testing.T) {
 	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
 	skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"), "experimental in older versions")
 
+	ctx := testutil.StartSpan(baseContext, t)
+
 	client := testEnv.APIClient()
 
 	dockerfile := `
@@ -39,7 +42,7 @@ func TestBuildWithSession(t *testing.T) {
 	)
 	defer fctx.Close()
 
-	out := testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
+	out := testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
 	assert.Check(t, is.Contains(out, "some content"))
 
 	fctx.Add("second", "contentcontent")
@@ -49,25 +52,25 @@ func TestBuildWithSession(t *testing.T) {
 	RUN cat /second
 	`
 
-	out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
+	out = testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
 	assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 2))
 	assert.Check(t, is.Contains(out, "contentcontent"))
 
-	du, err := client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
+	du, err := client.DiskUsage(ctx, types.DiskUsageOptions{})
 	assert.Check(t, err)
 	assert.Check(t, du.BuilderSize > 10)
 
-	out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile)
+	out = testBuildWithSession(ctx, t, client, client.DaemonHost(), fctx.Dir, dockerfile)
 	assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 4))
 
-	du2, err := client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
+	du2, err := client.DiskUsage(ctx, types.DiskUsageOptions{})
 	assert.Check(t, err)
 	assert.Check(t, is.Equal(du.BuilderSize, du2.BuilderSize))
 
 	// rebuild with regular tar, confirm cache still applies
 	fctx.Add("Dockerfile", dockerfile)
 	// FIXME(vdemeester) use sock here
-	res, body, err := request.Do(
+	res, body, err := request.Do(ctx,
 		"/build",
 		request.Host(client.DaemonHost()),
 		request.Method(http.MethodPost),
@@ -81,17 +84,16 @@ func TestBuildWithSession(t *testing.T) {
 	assert.Check(t, is.Contains(string(outBytes), "Successfully built"))
 	assert.Check(t, is.Equal(strings.Count(string(outBytes), "Using cache"), 4))
 
-	_, err = client.BuildCachePrune(context.TODO(), types.BuildCachePruneOptions{All: true})
+	_, err = client.BuildCachePrune(ctx, types.BuildCachePruneOptions{All: true})
 	assert.Check(t, err)
 
-	du, err = client.DiskUsage(context.TODO(), types.DiskUsageOptions{})
+	du, err = client.DiskUsage(ctx, types.DiskUsageOptions{})
 	assert.Check(t, err)
 	assert.Check(t, is.Equal(du.BuilderSize, int64(0)))
 }
 
 //nolint:unused // false positive: linter detects this as "unused"
-func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) {
-	ctx := context.Background()
+func testBuildWithSession(ctx context.Context, t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) {
 	sess, err := session.NewSession(ctx, "foo1", "foo")
 	assert.Check(t, err)
 
@@ -110,7 +112,7 @@ func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost str
 
 	g.Go(func() error {
 		// FIXME use sock here
-		res, body, err := request.Do(
+		res, body, err := request.Do(ctx,
 			"/build?remote=client-session&session="+sess.ID(),
 			request.Host(daemonHost),
 			request.Method(http.MethodPost),

+ 4 - 3
integration/build/build_squash_test.go

@@ -2,7 +2,6 @@ package build
 
 import (
 	"bytes"
-	"context"
 	"io"
 	"strings"
 	"testing"
@@ -11,6 +10,7 @@ import (
 	dclient "github.com/docker/docker/client"
 	"github.com/docker/docker/integration/internal/container"
 	"github.com/docker/docker/pkg/stdcopy"
+	"github.com/docker/docker/testutil"
 	"github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/testutil/fakecontext"
 	"gotest.tools/v3/assert"
@@ -21,12 +21,14 @@ import (
 func TestBuildSquashParent(t *testing.T) {
 	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
 
+	ctx := testutil.StartSpan(baseContext, t)
+
 	var client dclient.APIClient
 	if !testEnv.DaemonInfo.ExperimentalBuild {
 		skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
 
 		d := daemon.New(t, daemon.WithExperimental())
-		d.StartWithBusybox(t)
+		d.StartWithBusybox(ctx, t)
 		defer d.Stop(t)
 		client = d.NewClientT(t)
 	} else {
@@ -43,7 +45,6 @@ func TestBuildSquashParent(t *testing.T) {
 		`
 
 	// build and get the ID that we can use later for history comparison
-	ctx := context.Background()
 	source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile))
 	defer source.Close()
 

Some files were not shown because too many files changed in this diff