Browse Source

Merge pull request #45799 from cpuguy83/containerd_logrus

Switch all logging to use containerd log pkg
Bjorn Neergaard 2 years ago
parent
commit
8805e38398
100 changed files with 532 additions and 464 deletions
  1. 4 2
      api/server/httpstatus/status.go
  2. 2 2
      api/server/middleware/cors.go
  3. 4 4
      api/server/middleware/debug.go
  4. 2 2
      api/server/router/build/build_routes.go
  5. 5 5
      api/server/router/container/container_routes.go
  6. 3 3
      api/server/router/container/exec.go
  7. 2 2
      api/server/router/grpc/grpc.go
  8. 17 16
      api/server/router/swarm/cluster_routes.go
  9. 3 3
      api/server/router/system/system_routes.go
  10. 3 3
      api/server/router/volume/volume_routes.go
  11. 4 4
      api/server/server.go
  12. 4 4
      builder/builder-next/adapters/containerimage/pull.go
  13. 2 2
      builder/builder-next/adapters/snapshot/leasemanager.go
  14. 5 5
      builder/builder-next/executor_unix.go
  15. 2 2
      builder/builder-next/exporter/mobyexporter/writer.go
  16. 4 4
      builder/builder-next/worker/worker.go
  17. 3 3
      builder/dockerfile/builder.go
  18. 3 3
      builder/dockerfile/containerbackend.go
  19. 2 2
      builder/dockerfile/imagecontext.go
  20. 3 3
      builder/dockerfile/imageprobe.go
  21. 2 2
      builder/dockerfile/internals.go
  22. 3 2
      builder/remotecontext/detect.go
  23. 4 3
      builder/remotecontext/git.go
  24. 33 27
      cmd/dockerd/daemon.go
  25. 2 2
      cmd/dockerd/daemon_unix.go
  26. 4 4
      cmd/dockerd/daemon_windows.go
  27. 2 3
      cmd/dockerd/docker_windows.go
  28. 4 1
      cmd/dockerd/grpclog.go
  29. 4 3
      cmd/dockerd/metrics.go
  30. 7 5
      cmd/dockerd/service_windows.go
  31. 4 3
      cmd/dockerd/trap/trap.go
  32. 4 4
      container/container.go
  33. 13 8
      container/container_unix.go
  34. 3 2
      container/exec.go
  35. 5 4
      container/health.go
  36. 4 3
      container/monitor.go
  37. 8 8
      container/stream/attach.go
  38. 5 3
      container/stream/streams.go
  39. 4 3
      container/view.go
  40. 5 5
      daemon/attach.go
  41. 4 3
      daemon/cdi.go
  42. 5 5
      daemon/cluster/cluster.go
  43. 2 1
      daemon/cluster/controllers/plugin/controller.go
  44. 4 3
      daemon/cluster/convert/container.go
  45. 1 1
      daemon/cluster/executor/container/adapter.go
  46. 3 2
      daemon/cluster/executor/container/container.go
  47. 3 3
      daemon/cluster/executor/container/executor.go
  48. 5 5
      daemon/cluster/networks.go
  49. 5 5
      daemon/cluster/noderunner.go
  50. 9 9
      daemon/cluster/services.go
  51. 4 4
      daemon/cluster/swarm.go
  52. 3 1
      daemon/config/config.go
  53. 4 2
      daemon/configs.go
  54. 4 3
      daemon/container.go
  55. 11 10
      daemon/container_operations.go
  56. 12 10
      daemon/container_operations_unix.go
  57. 10 8
      daemon/container_operations_windows.go
  58. 4 4
      daemon/containerd/image.go
  59. 6 6
      daemon/containerd/image_builder.go
  60. 2 2
      daemon/containerd/image_changes.go
  61. 3 2
      daemon/containerd/image_children.go
  62. 5 5
      daemon/containerd/image_commit.go
  63. 4 4
      daemon/containerd/image_delete.go
  64. 8 7
      daemon/containerd/image_exporter.go
  65. 2 1
      daemon/containerd/image_import.go
  66. 4 3
      daemon/containerd/image_list.go
  67. 5 4
      daemon/containerd/image_prune.go
  68. 2 1
      daemon/containerd/image_pull.go
  69. 4 4
      daemon/containerd/image_push.go
  70. 2 1
      daemon/containerd/image_tag.go
  71. 3 3
      daemon/containerd/mount.go
  72. 3 3
      daemon/containerd/progress.go
  73. 2 1
      daemon/containerd/resolver.go
  74. 2 1
      daemon/containerd/service.go
  75. 2 2
      daemon/containerfs_linux.go
  76. 3 3
      daemon/create.go
  77. 3 3
      daemon/create_unix.go
  78. 39 37
      daemon/daemon.go
  79. 7 6
      daemon/daemon_linux.go
  80. 18 18
      daemon/daemon_unix.go
  81. 6 6
      daemon/daemon_windows.go
  82. 4 3
      daemon/debugtrap_unix.go
  83. 7 6
      daemon/debugtrap_windows.go
  84. 4 4
      daemon/delete.go
  85. 6 6
      daemon/events.go
  86. 6 6
      daemon/exec.go
  87. 4 3
      daemon/graphdriver/btrfs/btrfs.go
  88. 10 8
      daemon/graphdriver/driver.go
  89. 4 3
      daemon/graphdriver/fsdiff.go
  90. 4 4
      daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go
  91. 2 2
      daemon/graphdriver/overlay2/overlay.go
  92. 4 3
      daemon/graphdriver/overlayutils/overlayutils.go
  93. 6 5
      daemon/graphdriver/overlayutils/userxattr.go
  94. 4 2
      daemon/graphdriver/vfs/quota_linux.go
  95. 16 15
      daemon/graphdriver/windows/windows.go
  96. 9 8
      daemon/graphdriver/zfs/zfs.go
  97. 2 2
      daemon/graphdriver/zfs/zfs_freebsd.go
  98. 4 2
      daemon/graphdriver/zfs/zfs_linux.go
  99. 9 9
      daemon/health.go
  100. 2 2
      daemon/images/cache.go

+ 4 - 2
api/server/httpstatus/status.go

@@ -1,10 +1,12 @@
 package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 
 	cerrdefs "github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/registry/api/errcode"
 	"github.com/docker/docker/errdefs"
 	"github.com/sirupsen/logrus"
@@ -19,7 +21,7 @@ type causer interface {
 // FromError retrieves status code from error message.
 func FromError(err error) int {
 	if err == nil {
-		logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
+		log.G(context.TODO()).WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
 		return http.StatusInternalServerError
 	}
 
@@ -65,7 +67,7 @@ func FromError(err error) int {
 			return FromError(e.Cause())
 		}
 
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"module":     "api",
 			"error_type": fmt.Sprintf("%T", err),
 		}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)

+ 2 - 2
api/server/middleware/cors.go

@@ -4,8 +4,8 @@ import (
 	"context"
 	"net/http"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/registry"
-	"github.com/sirupsen/logrus"
 )
 
 // CORSMiddleware injects CORS headers to each request
@@ -29,7 +29,7 @@ func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.Res
 			corsHeaders = "*"
 		}
 
-		logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
+		log.G(ctx).Debugf("CORS header is enabled and set to: %s", corsHeaders)
 		w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
 		w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, "+registry.AuthHeader)
 		w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")

+ 4 - 4
api/server/middleware/debug.go

@@ -8,15 +8,15 @@ import (
 	"net/http"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/sirupsen/logrus"
 )
 
 // DebugRequestMiddleware dumps the request to logger
 func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-		logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
+		log.G(ctx).Debugf("Calling %s %s", r.Method, r.RequestURI)
 
 		if r.Method != http.MethodPost {
 			return handler(ctx, w, r, vars)
@@ -44,9 +44,9 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
 			maskSecretKeys(postForm)
 			formStr, errMarshal := json.Marshal(postForm)
 			if errMarshal == nil {
-				logrus.Debugf("form data: %s", string(formStr))
+				log.G(ctx).Debugf("form data: %s", string(formStr))
 			} else {
-				logrus.Debugf("form data: %q", postForm)
+				log.G(ctx).Debugf("form data: %q", postForm)
 			}
 		}
 

+ 2 - 2
api/server/router/build/build_routes.go

@@ -14,6 +14,7 @@ import (
 	"strings"
 	"sync"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
@@ -26,7 +27,6 @@ import (
 	"github.com/docker/docker/pkg/streamformatter"
 	units "github.com/docker/go-units"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 type invalidParam struct {
@@ -248,7 +248,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
 		}
 		_, err = output.Write(streamformatter.FormatError(err))
 		if err != nil {
-			logrus.Warnf("could not write error response: %v", err)
+			log.G(ctx).Warnf("could not write error response: %v", err)
 		}
 		return nil
 	}

+ 5 - 5
api/server/router/container/container_routes.go

@@ -9,6 +9,7 @@ import (
 	"runtime"
 	"strconv"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/api/server/httpstatus"
 	"github.com/docker/docker/api/server/httputils"
@@ -23,7 +24,6 @@ import (
 	"github.com/docker/docker/pkg/ioutils"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/net/websocket"
 )
 
@@ -706,11 +706,11 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
 	}
 
 	if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
-		logrus.WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
+		log.G(ctx).WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
 		// Remember to close stream if error happens
 		conn, _, errHijack := hijacker.Hijack()
 		if errHijack != nil {
-			logrus.WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path)
+			log.G(ctx).WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path)
 		} else {
 			statusCode := httpstatus.FromError(err)
 			statusText := http.StatusText(statusCode)
@@ -780,9 +780,9 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
 	select {
 	case <-started:
 		if err != nil {
-			logrus.Errorf("Error attaching websocket: %s", err)
+			log.G(ctx).Errorf("Error attaching websocket: %s", err)
 		} else {
-			logrus.Debug("websocket connection was closed by client")
+			log.G(ctx).Debug("websocket connection was closed by client")
 		}
 		return nil
 	default:

+ 3 - 3
api/server/router/container/exec.go

@@ -7,13 +7,13 @@ import (
 	"net/http"
 	"strconv"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/pkg/stdcopy"
-	"github.com/sirupsen/logrus"
 )
 
 func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -56,7 +56,7 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
 	// Register an instance of Exec in container.
 	id, err := s.backend.ContainerExecCreate(vars["name"], execConfig)
 	if err != nil {
-		logrus.Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
+		log.G(ctx).Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
 		return err
 	}
 
@@ -154,7 +154,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
 			return err
 		}
 		stdout.Write([]byte(err.Error() + "\r\n"))
-		logrus.Errorf("Error running exec %s in container: %v", execName, err)
+		log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
 	}
 	return nil
 }

+ 2 - 2
api/server/router/grpc/grpc.go

@@ -4,11 +4,11 @@ import (
 	"context"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/router"
 	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	"github.com/moby/buildkit/util/grpcerrors"
 	"github.com/moby/buildkit/util/tracing/detect"
-	"github.com/sirupsen/logrus"
 	"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
 	"go.opentelemetry.io/otel/propagation"
 	"go.opentelemetry.io/otel/trace"
@@ -33,7 +33,7 @@ var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceCon
 func NewRouter(backends ...Backend) router.Router {
 	tp, err := detect.TracerProvider()
 	if err != nil {
-		logrus.WithError(err).Error("failed to detect trace provider")
+		log.G(context.TODO()).WithError(err).Error("failed to detect trace provider")
 	}
 
 	opts := []grpc.ServerOption{grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor)}

+ 17 - 16
api/server/router/swarm/cluster_routes.go

@@ -6,6 +6,7 @@ import (
 	"net/http"
 	"strconv"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	basictypes "github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
@@ -36,7 +37,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
 	}
 	nodeID, err := sr.backend.Init(req)
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error initializing swarm")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error initializing swarm")
 		return err
 	}
 	return httputils.WriteJSON(w, http.StatusOK, nodeID)
@@ -62,7 +63,7 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter,
 func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	swarm, err := sr.backend.Inspect()
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error getting swarm")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting swarm")
 		return err
 	}
 
@@ -114,7 +115,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
 	}
 
 	if err := sr.backend.Update(version, swarm, flags); err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error configuring swarm")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error configuring swarm")
 		return err
 	}
 	return nil
@@ -127,7 +128,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
 	}
 
 	if err := sr.backend.UnlockSwarm(req); err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
 		return err
 	}
 	return nil
@@ -136,7 +137,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
 func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	unlockKey, err := sr.backend.GetUnlockKey()
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
 		return err
 	}
 
@@ -168,7 +169,7 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
 
 	services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error getting services")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting services")
 		return err
 	}
 
@@ -194,7 +195,7 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
 
 	service, err := sr.backend.GetService(vars["id"], insertDefaults)
 	if err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":      err,
 			"service-id": vars["id"],
 		}).Debug("Error getting service")
@@ -221,7 +222,7 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
 	}
 	resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
 	if err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithFields(logrus.Fields{
 			"error":        err,
 			"service-name": service.Name,
 		}).Debug("Error creating service")
@@ -260,7 +261,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
 
 	resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
 	if err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":      err,
 			"service-id": vars["id"],
 		}).Debug("Error updating service")
@@ -271,7 +272,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
 
 func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := sr.backend.RemoveService(vars["id"]); err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":      err,
 			"service-id": vars["id"],
 		}).Debug("Error removing service")
@@ -315,7 +316,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
 
 	nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error getting nodes")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting nodes")
 		return err
 	}
 
@@ -325,7 +326,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
 func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	node, err := sr.backend.GetNode(vars["id"])
 	if err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":   err,
 			"node-id": vars["id"],
 		}).Debug("Error getting node")
@@ -349,7 +350,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
 	}
 
 	if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":   err,
 			"node-id": vars["id"],
 		}).Debug("Error updating node")
@@ -366,7 +367,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r
 	force := httputils.BoolValue(r, "force")
 
 	if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":   err,
 			"node-id": vars["id"],
 		}).Debug("Error removing node")
@@ -386,7 +387,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
 
 	tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
 	if err != nil {
-		logrus.WithContext(ctx).WithError(err).Debug("Error getting tasks")
+		log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting tasks")
 		return err
 	}
 
@@ -396,7 +397,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
 func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	task, err := sr.backend.GetTask(vars["id"])
 	if err != nil {
-		logrus.WithContext(ctx).WithFields(logrus.Fields{
+		log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{
 			"error":   err,
 			"task-id": vars["id"],
 		}).Debug("Error getting task")

+ 3 - 3
api/server/router/system/system_routes.go

@@ -7,6 +7,7 @@ import (
 	"net/http"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/server/router/build"
 	"github.com/docker/docker/api/types"
@@ -18,7 +19,6 @@ import (
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 )
 
@@ -279,7 +279,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
 		case ev := <-l:
 			jev, ok := ev.(events.Message)
 			if !ok {
-				logrus.Warnf("unexpected event message: %q", ev)
+				log.G(ctx).Warnf("unexpected event message: %q", ev)
 				continue
 			}
 			if err := enc.Encode(jev); err != nil {
@@ -288,7 +288,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
 		case <-timeout:
 			return nil
 		case <-ctx.Done():
-			logrus.Debug("Client context cancelled, stop sending events")
+			log.G(ctx).Debug("Client context cancelled, stop sending events")
 			return nil
 		}
 	}

+ 3 - 3
api/server/router/volume/volume_routes.go

@@ -6,6 +6,7 @@ import (
 	"net/http"
 	"strconv"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/versions"
@@ -13,7 +14,6 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/volume/service/opts"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -116,10 +116,10 @@ func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWri
 	// Instead, we will allow creating a volume with a duplicate name, which
 	// should not break anything.
 	if req.ClusterVolumeSpec != nil && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) {
-		logrus.Debug("using cluster volume")
+		log.G(ctx).Debug("using cluster volume")
 		vol, err = v.cluster.CreateVolume(req)
 	} else {
-		logrus.Debug("using regular volume")
+		log.G(ctx).Debug("using regular volume")
 		vol, err = v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels))
 	}
 

+ 4 - 4
api/server/server.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"net/http"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/server/httpstatus"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/server/middleware"
@@ -11,7 +12,6 @@ import (
 	"github.com/docker/docker/api/server/router/debug"
 	"github.com/docker/docker/dockerversion"
 	"github.com/gorilla/mux"
-	"github.com/sirupsen/logrus"
 )
 
 // versionMatcher defines a variable matcher to be parsed by the router
@@ -53,7 +53,7 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
 		if err := handlerFunc(ctx, w, r, vars); err != nil {
 			statusCode := httpstatus.FromError(err)
 			if statusCode >= 500 {
-				logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
+				log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
 			}
 			makeErrorHandler(err)(w, r)
 		}
@@ -72,12 +72,12 @@ func (pageNotFoundError) NotFound() {}
 func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
 	m := mux.NewRouter()
 
-	logrus.Debug("Registering routers")
+	log.G(context.TODO()).Debug("Registering routers")
 	for _, apiRouter := range routers {
 		for _, r := range apiRouter.Routes() {
 			f := s.makeHTTPHandler(r.Handler())
 
-			logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
+			log.G(context.TODO()).Debugf("Registering %s, %s", r.Method(), r.Path())
 			m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
 			m.Path(r.Path()).Methods(r.Method()).Handler(f)
 		}

+ 4 - 4
builder/builder-next/adapters/containerimage/pull.go

@@ -14,6 +14,7 @@ import (
 	"github.com/containerd/containerd/gc"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	ctdreference "github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/remotes"
@@ -42,7 +43,6 @@ import (
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/time/rate"
 )
 
@@ -147,7 +147,7 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re
 		img, err := is.resolveLocal(ref)
 		if err == nil {
 			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
-				logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
+				log.G(ctx).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
 					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
 					path.Join(img.OS, img.Architecture, img.Variant),
 				)
@@ -245,7 +245,7 @@ func (p *puller) resolveLocal() {
 			img, err := p.is.resolveLocal(ref)
 			if err == nil {
 				if !platformMatches(img, &p.platform) {
-					logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
+					log.G(context.TODO()).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
 						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
 						path.Join(img.OS, img.Architecture, img.Variant),
 					)
@@ -828,7 +828,7 @@ func cacheKeyFromConfig(dt []byte) digest.Digest {
 	var img ocispec.Image
 	err := json.Unmarshal(dt, &img)
 	if err != nil {
-		logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
+		log.G(context.TODO()).WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
 		return digest.FromBytes(dt)
 	}
 	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {

+ 2 - 2
builder/builder-next/adapters/snapshot/leasemanager.go

@@ -5,7 +5,7 @@ import (
 	"sync"
 
 	"github.com/containerd/containerd/leases"
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 	bolt "go.etcd.io/bbolt"
 )
 
@@ -126,7 +126,7 @@ func (l *sLM) delRef(lID, sID string) {
 		if len(leases) == 0 {
 			delete(l.bySnapshot, sID)
 			if err := l.s.remove(context.TODO(), sID); err != nil {
-				logrus.Warnf("failed to remove snapshot %v", sID)
+				log.G(context.TODO()).Warnf("failed to remove snapshot %v", sID)
 			}
 		}
 	}

+ 5 - 5
builder/builder-next/executor_unix.go

@@ -9,6 +9,7 @@ import (
 	"strconv"
 	"sync"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/pkg/idtools"
@@ -20,7 +21,6 @@ import (
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/network"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
 )
 
 const networkName = "bridge"
@@ -39,7 +39,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
 		for _, fi := range fis {
 			fp := filepath.Join(netRoot, fi.Name())
 			if err := os.RemoveAll(fp); err != nil {
-				logrus.WithError(err).Errorf("failed to delete old network state: %v", fp)
+				log.G(context.TODO()).WithError(err).Errorf("failed to delete old network state: %v", fp)
 			}
 		}
 	}
@@ -124,7 +124,7 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n libnetwork.Network) {
 func (iface *lnInterface) Set(s *specs.Spec) error {
 	<-iface.ready
 	if iface.err != nil {
-		logrus.WithError(iface.err).Error("failed to set networking spec")
+		log.G(context.TODO()).WithError(iface.err).Error("failed to set networking spec")
 		return iface.err
 	}
 	shortNetCtlrID := stringid.TruncateID(iface.provider.Controller.ID())
@@ -143,10 +143,10 @@ func (iface *lnInterface) Close() error {
 	if iface.sbx != nil {
 		go func() {
 			if err := iface.sbx.Delete(); err != nil {
-				logrus.WithError(err).Errorf("failed to delete builder network sandbox")
+				log.G(context.TODO()).WithError(err).Errorf("failed to delete builder network sandbox")
 			}
 			if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil {
-				logrus.WithError(err).Errorf("failed to delete builder sandbox directory")
+				log.G(context.TODO()).WithError(err).Errorf("failed to delete builder sandbox directory")
 			}
 		}()
 	}

+ 2 - 2
builder/builder-next/exporter/mobyexporter/writer.go

@@ -5,6 +5,7 @@ import (
 	"encoding/json"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/util/progress"
@@ -12,7 +13,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 func emptyImageConfig() ([]byte, error) {
@@ -97,7 +97,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
 	if historyLayers > len(diffs) {
 		// this case shouldn't happen but if it does force set history layers empty
 		// from the bottom
-		logrus.Warn("invalid image config with unaccounted layers")
+		log.G(context.TODO()).Warn("invalid image config with unaccounted layers")
 		historyCopy := make([]ocispec.History, 0, len(history))
 		var l int
 		for _, h := range history {

+ 4 - 4
builder/builder-next/worker/worker.go

@@ -10,6 +10,7 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
@@ -46,7 +47,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/semaphore"
 )
 
@@ -110,7 +110,7 @@ func NewWorker(opt Opt) (*Worker, error) {
 	if err == nil {
 		sm.Register(gs)
 	} else {
-		logrus.Warnf("Could not register builder git source: %s", err)
+		log.G(context.TODO()).Warnf("Could not register builder git source: %s", err)
 	}
 
 	hs, err := http.NewSource(http.Opt{
@@ -120,7 +120,7 @@ func NewWorker(opt Opt) (*Worker, error) {
 	if err == nil {
 		sm.Register(hs)
 	} else {
-		logrus.Warnf("Could not register builder http source: %s", err)
+		log.G(context.TODO()).Warnf("Could not register builder http source: %s", err)
 	}
 
 	ss, err := local.NewSource(local.Opt{
@@ -129,7 +129,7 @@ func NewWorker(opt Opt) (*Worker, error) {
 	if err == nil {
 		sm.Register(ss)
 	} else {
-		logrus.Warnf("Could not register builder local source: %s", err)
+		log.G(context.TODO()).Warnf("Could not register builder local source: %s", err)
 	}
 
 	return &Worker{

+ 3 - 3
builder/dockerfile/builder.go

@@ -8,6 +8,7 @@ import (
 	"sort"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
@@ -23,7 +24,6 @@ import (
 	"github.com/moby/buildkit/frontend/dockerfile/shell"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/syncmap"
 )
 
@@ -76,7 +76,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
 	defer func() {
 		if source != nil {
 			if err := source.Close(); err != nil {
-				logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
+				log.G(ctx).Debugf("[BUILDER] failed to remove temporary context: %v", err)
 			}
 		}
 	}()
@@ -283,7 +283,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR
 		for _, cmd := range stage.Commands {
 			select {
 			case <-ctx.Done():
-				logrus.Debug("Builder: build cancelled!")
+				log.G(ctx).Debug("Builder: build cancelled!")
 				fmt.Fprint(b.Stdout, "Build cancelled\n")
 				buildsFailed.WithValues(metricsBuildCanceled).Inc()
 				return nil, errors.New("Build cancelled")

+ 3 - 3
builder/dockerfile/containerbackend.go

@@ -5,13 +5,13 @@ import (
 	"fmt"
 	"io"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/builder"
 	containerpkg "github.com/docker/docker/container"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 type containerManager struct {
@@ -60,7 +60,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i
 	go func() {
 		select {
 		case <-ctx.Done():
-			logrus.Debugln("Build cancelled, killing and removing container:", cID)
+			log.G(ctx).Debugln("Build cancelled, killing and removing container:", cID)
 			c.backend.ContainerKill(cID, "")
 			c.removeContainer(cID, stdout)
 			cancelErrCh <- errCancelled
@@ -102,7 +102,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i
 
 func logCancellationError(cancelErrCh chan error, msg string) {
 	if cancelErr := <-cancelErrCh; cancelErr != nil {
-		logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg)
+		log.G(context.TODO()).Debugf("Build cancelled (%v): %s", cancelErr, msg)
 	}
 }
 

+ 2 - 2
builder/dockerfile/imagecontext.go

@@ -4,13 +4,13 @@ import (
 	"context"
 	"runtime"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	dockerimage "github.com/docker/docker/image"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 type getAndMountFunc func(context.Context, string, bool, *ocispec.Platform) (builder.Image, builder.ROLayer, error)
@@ -64,7 +64,7 @@ func (m *imageSources) Get(ctx context.Context, idOrRef string, localOnly bool,
 func (m *imageSources) Unmount() (retErr error) {
 	for _, im := range m.mounts {
 		if err := im.unmount(); err != nil {
-			logrus.Error(err)
+			log.G(context.TODO()).Error(err)
 			retErr = err
 		}
 	}

+ 3 - 3
builder/dockerfile/imageprobe.go

@@ -3,9 +3,9 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile"
 import (
 	"context"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/builder"
-	"github.com/sirupsen/logrus"
 )
 
 // ImageProber exposes an Image cache to the Builder. It supports resetting a
@@ -60,11 +60,11 @@ func (c *imageProber) Probe(parentID string, runConfig *container.Config) (strin
 		return "", err
 	}
 	if len(cacheID) == 0 {
-		logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
+		log.G(context.TODO()).Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
 		c.cacheBusted = true
 		return "", nil
 	}
-	logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
+	log.G(context.TODO()).Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
 	return cacheID, nil
 }
 

+ 2 - 2
builder/dockerfile/internals.go

@@ -10,6 +10,7 @@ import (
 	"fmt"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/container"
@@ -21,7 +22,6 @@ import (
 	"github.com/docker/go-connections/nat"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 func (b *Builder) getArchiver() *archive.Archiver {
@@ -348,7 +348,7 @@ func (b *Builder) probeAndCreate(ctx context.Context, dispatchState *dispatchSta
 }
 
 func (b *Builder) create(ctx context.Context, runConfig *container.Config) (string, error) {
-	logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
+	log.G(ctx).Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
 
 	hostConfig := hostConfigFromOptions(b.options)
 	container, err := b.containerManager.Create(ctx, runConfig, hostConfig)

+ 3 - 2
builder/remotecontext/detect.go

@@ -2,12 +2,14 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext"
 
 import (
 	"bufio"
+	"context"
 	"fmt"
 	"io"
 	"os"
 	"runtime"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/continuity/driver"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
@@ -18,7 +20,6 @@ import (
 	"github.com/moby/buildkit/frontend/dockerfile/parser"
 	"github.com/moby/patternmatcher"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // ClientSessionRemote is identifier for client-session context transport
@@ -133,7 +134,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
 	for _, fileToRemove := range filesToRemove {
 		if rm, _ := patternmatcher.MatchesOrParentMatches(fileToRemove, excludes); rm {
 			if err := c.Remove(fileToRemove); err != nil {
-				logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
+				log.G(context.TODO()).Errorf("failed to remove %s: %v", fileToRemove, err)
 			}
 		}
 	}

+ 4 - 3
builder/remotecontext/git.go

@@ -1,12 +1,13 @@
 package remotecontext // import "github.com/docker/docker/builder/remotecontext"
 
 import (
+	"context"
 	"os"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/remotecontext/git"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/sirupsen/logrus"
 )
 
 // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
@@ -24,11 +25,11 @@ func MakeGitContext(gitURL string) (builder.Source, error) {
 	defer func() {
 		err := c.Close()
 		if err != nil {
-			logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context")
+			log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context")
 		}
 		err = os.RemoveAll(root)
 		if err != nil {
-			logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root")
+			log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root")
 		}
 	}()
 	return FromArchive(c)

+ 33 - 27
cmd/dockerd/daemon.go

@@ -16,6 +16,7 @@ import (
 
 	"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
 	containerddefaults "github.com/containerd/containerd/defaults"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api"
 	apiserver "github.com/docker/docker/api/server"
 	buildbackend "github.com/docker/docker/api/server/backend/build"
@@ -83,6 +84,8 @@ func NewDaemonCli() *DaemonCli {
 }
 
 func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
+	ctx := context.TODO()
+
 	if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
 		return err
 	}
@@ -101,7 +104,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 	configureProxyEnv(cli.Config)
 	configureDaemonLogs(cli.Config)
 
-	logrus.Info("Starting up")
+	log.G(ctx).Info("Starting up")
 
 	cli.configFile = &opts.configFile
 	cli.flags = opts.flags
@@ -111,14 +114,14 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 	}
 
 	if cli.Config.Experimental {
-		logrus.Warn("Running experimental build")
+		log.G(ctx).Warn("Running experimental build")
 	}
 
 	if cli.Config.IsRootless() {
-		logrus.Warn("Running in rootless mode. This mode has feature limitations.")
+		log.G(ctx).Warn("Running in rootless mode. This mode has feature limitations.")
 	}
 	if rootless.RunningWithRootlessKit() {
-		logrus.Info("Running with RootlessKit integration")
+		log.G(ctx).Info("Running with RootlessKit integration")
 		if !cli.Config.IsRootless() {
 			return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
 		}
@@ -155,7 +158,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
 		defer func() {
 			if err := os.Remove(cli.Pidfile); err != nil {
-				logrus.Error(err)
+				log.G(ctx).Error(err)
 			}
 		}()
 	}
@@ -164,7 +167,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
 		if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
 			// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
-			logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
+			log.G(ctx).WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
 		}
 	}
 
@@ -199,7 +202,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		<-cli.apiShutdown
 		err := httpServer.Shutdown(apiShutdownCtx)
 		if err != nil {
-			logrus.WithError(err).Error("Error shutting down http server")
+			log.G(ctx).WithError(err).Error("Error shutting down http server")
 		}
 		close(apiShutdownDone)
 	}()
@@ -217,7 +220,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 			// e.g. because the daemon failed to start.
 			// Stop the HTTP server with no grace period.
 			if closeErr := httpServer.Close(); closeErr != nil {
-				logrus.WithError(closeErr).Error("Error closing http server")
+				log.G(ctx).WithError(closeErr).Error("Error closing http server")
 			}
 		}
 	}()
@@ -262,7 +265,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 
 	c, err := createAndStartCluster(cli, d)
 	if err != nil {
-		logrus.Fatalf("Error starting cluster component: %v", err)
+		log.G(ctx).Fatalf("Error starting cluster component: %v", err)
 	}
 
 	// Restart all autostart containers which has a swarm endpoint
@@ -270,7 +273,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 	// initialized the cluster.
 	d.RestartSwarmContainers()
 
-	logrus.Info("Daemon has completed initialization")
+	log.G(ctx).Info("Daemon has completed initialization")
 
 	routerCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 	defer cancel()
@@ -300,9 +303,9 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		apiWG.Add(1)
 		go func(ls net.Listener) {
 			defer apiWG.Done()
-			logrus.Infof("API listen on %s", ls.Addr())
+			log.G(ctx).Infof("API listen on %s", ls.Addr())
 			if err := httpServer.Serve(ls); err != http.ErrServerClosed {
-				logrus.WithFields(logrus.Fields{
+				log.G(ctx).WithFields(logrus.Fields{
 					logrus.ErrorKey: err,
 					"listener":      ls.Addr(),
 				}).Error("ServeAPI error")
@@ -330,7 +333,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		return errors.Wrap(err, "shutting down due to ServeAPI error")
 	}
 
-	logrus.Info("Daemon shutdown complete")
+	log.G(ctx).Info("Daemon shutdown complete")
 	return nil
 }
 
@@ -396,14 +399,15 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
 }
 
 func (cli *DaemonCli) reloadConfig() {
+	ctx := context.TODO()
 	reload := func(c *config.Config) {
 		if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
-			logrus.Fatalf("Error validating authorization plugin: %v", err)
+			log.G(ctx).Fatalf("Error validating authorization plugin: %v", err)
 			return
 		}
 
 		if err := cli.d.Reload(c); err != nil {
-			logrus.Errorf("Error reconfiguring the daemon: %v", err)
+			log.G(ctx).Errorf("Error reconfiguring the daemon: %v", err)
 			return
 		}
 
@@ -424,7 +428,7 @@ func (cli *DaemonCli) reloadConfig() {
 	}
 
 	if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
-		logrus.Error(err)
+		log.G(ctx).Error(err)
 	}
 }
 
@@ -457,9 +461,9 @@ func shutdownDaemon(ctx context.Context, d *daemon.Daemon) {
 
 	<-ctx.Done()
 	if errors.Is(ctx.Err(), context.DeadlineExceeded) {
-		logrus.Error("Force shutdown daemon")
+		log.G(ctx).Error("Force shutdown daemon")
 	} else {
-		logrus.Debug("Clean shutdown succeeded")
+		log.G(ctx).Debug("Clean shutdown succeeded")
 	}
 }
 
@@ -724,6 +728,8 @@ func checkTLSAuthOK(c *config.Config) bool {
 }
 
 func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, []string, error) {
+	ctx := context.TODO()
+
 	if len(cfg.Hosts) == 0 {
 		return nil, nil, errors.New("no hosts configured")
 	}
@@ -742,8 +748,8 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [
 		// It's a bad idea to bind to TCP without tlsverify.
 		authEnabled := tlsConfig != nil && tlsConfig.ClientAuth == tls.RequireAndVerifyClientCert
 		if proto == "tcp" && !authEnabled {
-			logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
-			logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
+			log.G(ctx).WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
+			log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
 			time.Sleep(time.Second)
 
 			// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
@@ -761,17 +767,17 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [
 					if ip == nil {
 						ipA, err := net.ResolveIPAddr("ip", ipAddr)
 						if err != nil {
-							logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
+							log.G(ctx).WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
 						}
 						if ipA != nil {
 							ip = ipA.IP
 						}
 					}
 					if ip == nil || !ip.IsLoopback() {
-						logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
-						logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
-						logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
-						logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
+						log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
+						log.G(ctx).WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
+						log.G(ctx).WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
+						log.G(ctx).WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
 
 						time.Sleep(15 * time.Second)
 					}
@@ -788,7 +794,7 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [
 		if err != nil {
 			return nil, nil, err
 		}
-		logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
+		log.G(ctx).Debugf("Listener created for HTTP on %s (%s)", proto, addr)
 		hosts = append(hosts, addr)
 		lss = append(lss, ls...)
 	}
@@ -885,7 +891,7 @@ func configureProxyEnv(conf *config.Config) {
 
 func overrideProxyEnv(name, val string) {
 	if oldVal := os.Getenv(name); oldVal != "" && oldVal != val {
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"name":      name,
 			"old-value": config.MaskCredentials(oldVal),
 			"new-value": config.MaskCredentials(val),

+ 2 - 2
cmd/dockerd/daemon_unix.go

@@ -11,13 +11,13 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libcontainerd/supervisor"
 	"github.com/docker/docker/libnetwork/portallocator"
 	"github.com/docker/docker/pkg/homedir"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -132,7 +132,7 @@ func (cli *DaemonCli) initContainerd(ctx context.Context) (func(time.Duration) e
 		return nil, nil
 	}
 
-	logrus.Info("containerd not running, starting managed containerd")
+	log.G(ctx).Info("containerd not running, starting managed containerd")
 	opts, err := cli.getContainerdDaemonOpts()
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to generate containerd options")

+ 4 - 4
cmd/dockerd/daemon_windows.go

@@ -8,7 +8,7 @@ import (
 
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/pkg/system"
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 	"golang.org/x/sys/windows"
 )
 
@@ -28,7 +28,7 @@ func preNotifyReady() {
 	if service != nil {
 		err := service.started()
 		if err != nil {
-			logrus.Fatal(err)
+			log.G(context.TODO()).Fatal(err)
 		}
 	}
 }
@@ -45,7 +45,7 @@ func notifyStopping() {
 func notifyShutdown(err error) {
 	if service != nil {
 		if err != nil {
-			logrus.Fatal(err)
+			log.G(context.TODO()).Fatal(err)
 		}
 		service.stopped(err)
 	}
@@ -60,7 +60,7 @@ func (cli *DaemonCli) setupConfigReloadTrap() {
 		event := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid())
 		ev, _ := windows.UTF16PtrFromString(event)
 		if h, _ := windows.CreateEvent(&sa, 0, 0, ev); h != 0 {
-			logrus.Debugf("Config reload - waiting signal at %s", event)
+			log.G(context.TODO()).Debugf("Config reload - waiting signal at %s", event)
 			for {
 				windows.WaitForSingleObject(h, windows.INFINITE)
 				cli.reloadConfig()

+ 2 - 3
cmd/dockerd/docker_windows.go

@@ -1,11 +1,10 @@
 package main
 
 import (
-	"io"
-	"path/filepath"
-
 	"github.com/Microsoft/go-winio/pkg/etwlogrus"
 	"github.com/sirupsen/logrus"
+	"io"
+	"path/filepath"
 )
 
 func runDaemon(opts *daemonOptions) error {

+ 4 - 1
cmd/dockerd/grpclog.go

@@ -1,6 +1,9 @@
 package main
 
 import (
+	"context"
+
+	"github.com/containerd/containerd/log"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc/grpclog"
 )
@@ -12,6 +15,6 @@ import (
 // warn => debug
 // error => warn
 func configureGRPCLog() {
-	l := logrus.WithField("library", "grpc")
+	l := log.G(context.TODO()).WithField("library", "grpc")
 	grpclog.SetLoggerV2(grpclog.NewLoggerV2(l.WriterLevel(logrus.TraceLevel), l.WriterLevel(logrus.DebugLevel), l.WriterLevel(logrus.WarnLevel)))
 }

+ 4 - 3
cmd/dockerd/metrics.go

@@ -1,13 +1,14 @@
 package main
 
 import (
+	"context"
 	"net"
 	"net/http"
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	metrics "github.com/docker/go-metrics"
-	"github.com/sirupsen/logrus"
 )
 
 func startMetricsServer(addr string) error {
@@ -24,13 +25,13 @@ func startMetricsServer(addr string) error {
 	mux := http.NewServeMux()
 	mux.Handle("/metrics", metrics.Handler())
 	go func() {
-		logrus.Infof("metrics API listening on %s", l.Addr())
+		log.G(context.TODO()).Infof("metrics API listening on %s", l.Addr())
 		srv := &http.Server{
 			Handler:           mux,
 			ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
 		}
 		if err := srv.Serve(l); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
-			logrus.WithError(err).Error("error serving metrics API")
+			log.G(context.TODO()).WithError(err).Error("error serving metrics API")
 		}
 	}()
 	return nil

+ 7 - 5
cmd/dockerd/service_windows.go

@@ -2,16 +2,18 @@ package main
 
 import (
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"io"
-	"log"
 	"os"
 	"os/exec"
 	"path/filepath"
 	"time"
 
 	"github.com/sirupsen/logrus"
+
+	"github.com/containerd/containerd/log"
 	"github.com/spf13/pflag"
 	"golang.org/x/sys/windows"
 	"golang.org/x/sys/windows/svc"
@@ -295,7 +297,7 @@ func (h *handler) started() error {
 }
 
 func (h *handler) stopped(err error) {
-	logrus.Debugf("Stopping service: %v", err)
+	log.G(context.TODO()).Debugf("Stopping service: %v", err)
 	h.tosvc <- err != nil
 	<-h.fromsvc
 }
@@ -308,12 +310,12 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S
 	// Wait for initialization to complete.
 	failed := <-h.tosvc
 	if failed {
-		logrus.Debug("Aborting service start due to failure during initialization")
+		log.G(context.TODO()).Debug("Aborting service start due to failure during initialization")
 		return true, 1
 	}
 
 	s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
-	logrus.Debug("Service running")
+	log.G(context.TODO()).Debug("Service running")
 Loop:
 	for {
 		select {
@@ -380,7 +382,7 @@ func initPanicFile(path string) error {
 	os.Stderr = os.NewFile(panicFile.Fd(), "/dev/stderr")
 
 	// Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether
-	log.SetOutput(os.Stderr)
+	logrus.SetOutput(os.Stderr)
 
 	return nil
 }

+ 4 - 3
cmd/dockerd/trap/trap.go

@@ -1,11 +1,12 @@
 package trap // import "github.com/docker/docker/cmd/dockerd/trap"
 
 import (
+	"context"
 	"os"
 	"os/signal"
 	"syscall"
 
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 )
 
 const (
@@ -29,7 +30,7 @@ func Trap(cleanup func()) {
 	go func() {
 		var interruptCount int
 		for sig := range c {
-			logrus.Infof("Processing signal '%v'", sig)
+			log.G(context.TODO()).Infof("Processing signal '%v'", sig)
 			if interruptCount < forceQuitCount {
 				interruptCount++
 				// Initiate the cleanup only once
@@ -39,7 +40,7 @@ func Trap(cleanup func()) {
 				continue
 			}
 
-			logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
+			log.G(context.TODO()).Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
 			os.Exit(128 + int(sig.(syscall.Signal)))
 		}
 	}()

+ 4 - 4
container/container.go

@@ -15,6 +15,7 @@ import (
 	"time"
 
 	"github.com/containerd/containerd/cio"
+	"github.com/containerd/containerd/log"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
@@ -41,7 +42,6 @@ import (
 	"github.com/moby/sys/symlink"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -317,7 +317,7 @@ func (container *Container) GetResourcePath(path string) (string, error) {
 	// from the error being propagated all the way back to the client. This makes
 	// debugging significantly easier and clearly indicates the error comes from the daemon.
 	if e != nil {
-		logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e)
+		log.G(context.TODO()).Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e)
 	}
 	return r, e
 }
@@ -432,7 +432,7 @@ func (container *Container) StartLogger() (logger.Logger, error) {
 			}
 
 			if !container.LocalLogCacheMeta.HaveNotifyEnabled {
-				logrus.WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs")
+				log.G(context.TODO()).WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs")
 				container.LocalLogCacheMeta.HaveNotifyEnabled = true
 			}
 			info.LogPath = logPath
@@ -673,7 +673,7 @@ func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
 	if container.StreamConfig.Stdin() == nil && !container.Config.Tty {
 		if iop.Stdin != nil {
 			if err := iop.Stdin.Close(); err != nil {
-				logrus.Warnf("error closing stdin: %+v", err)
+				log.G(context.TODO()).Warnf("error closing stdin: %+v", err)
 			}
 		}
 	}

+ 13 - 8
container/container_unix.go

@@ -3,10 +3,12 @@
 package container // import "github.com/docker/docker/container"
 
 import (
+	"context"
 	"os"
 	"path/filepath"
 	"syscall"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/continuity/fs"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
@@ -18,7 +20,6 @@ import (
 	"github.com/moby/sys/mount"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -64,12 +65,14 @@ func (container *Container) BuildHostnameFile() error {
 
 // NetworkMounts returns the list of network mounts.
 func (container *Container) NetworkMounts() []Mount {
+	ctx := context.TODO()
+
 	var mounts []Mount
 	shared := container.HostConfig.NetworkMode.IsContainer()
 	parser := volumemounts.NewParser()
 	if container.ResolvConfPath != "" {
 		if _, err := os.Stat(container.ResolvConfPath); err != nil {
-			logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
+			log.G(ctx).Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
 		} else {
 			writable := !container.HostConfig.ReadonlyRootfs
 			if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
@@ -87,7 +90,7 @@ func (container *Container) NetworkMounts() []Mount {
 	}
 	if container.HostnamePath != "" {
 		if _, err := os.Stat(container.HostnamePath); err != nil {
-			logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
+			log.G(ctx).Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
 		} else {
 			writable := !container.HostConfig.ReadonlyRootfs
 			if m, exists := container.MountPoints["/etc/hostname"]; exists {
@@ -105,7 +108,7 @@ func (container *Container) NetworkMounts() []Mount {
 	}
 	if container.HostsPath != "" {
 		if _, err := os.Stat(container.HostsPath); err != nil {
-			logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
+			log.G(ctx).Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
 		} else {
 			writable := !container.HostConfig.ReadonlyRootfs
 			if m, exists := container.MountPoints["/etc/hosts"]; exists {
@@ -146,7 +149,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st
 
 	defer func() {
 		if err := v.Unmount(id); err != nil {
-			logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
+			log.G(context.TODO()).Warnf("error while unmounting volume %s: %v", v.Name(), err)
 		}
 	}()
 	if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
@@ -363,13 +366,15 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
 // unmounts each volume normally.
 // This is used from daemon/archive for `docker cp`
 func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
+	ctx := context.TODO()
+
 	networkMounts := container.NetworkMounts()
 	mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts))
 
 	for _, mntPoint := range container.MountPoints {
 		dest, err := container.GetResourcePath(mntPoint.Destination)
 		if err != nil {
-			logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err)
+			log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err)
 			continue
 		}
 		mountPaths = append(mountPaths, dest)
@@ -378,7 +383,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st
 	for _, m := range networkMounts {
 		dest, err := container.GetResourcePath(m.Destination)
 		if err != nil {
-			logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err)
+			log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err)
 			continue
 		}
 		mountPaths = append(mountPaths, dest)
@@ -386,7 +391,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st
 
 	for _, mountPath := range mountPaths {
 		if err := mount.Unmount(mountPath); err != nil {
-			logrus.WithError(err).WithField("container", container.ID).
+			log.G(ctx).WithError(err).WithField("container", container.ID).
 				Warn("Unable to unmount")
 		}
 	}

+ 3 - 2
container/exec.go

@@ -1,14 +1,15 @@
 package container // import "github.com/docker/docker/container"
 
 import (
+	"context"
 	"runtime"
 	"sync"
 
 	"github.com/containerd/containerd/cio"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/libcontainerd/types"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/sirupsen/logrus"
 )
 
 // ExecConfig holds the configurations for execs. The Daemon keeps
@@ -55,7 +56,7 @@ func (c *ExecConfig) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
 	if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
 		if iop.Stdin != nil {
 			if err := iop.Stdin.Close(); err != nil {
-				logrus.Errorf("error closing exec stdin: %+v", err)
+				log.G(context.TODO()).Errorf("error closing exec stdin: %+v", err)
 			}
 		}
 	}

+ 5 - 4
container/health.go

@@ -1,10 +1,11 @@
 package container // import "github.com/docker/docker/container"
 
 import (
+	"context"
 	"sync"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
-	"github.com/sirupsen/logrus"
 )
 
 // Health holds the current container health-check state
@@ -59,7 +60,7 @@ func (s *Health) OpenMonitorChannel() chan struct{} {
 	defer s.mu.Unlock()
 
 	if s.stop == nil {
-		logrus.Debug("OpenMonitorChannel")
+		log.G(context.TODO()).Debug("OpenMonitorChannel")
 		s.stop = make(chan struct{})
 		return s.stop
 	}
@@ -72,11 +73,11 @@ func (s *Health) CloseMonitorChannel() {
 	defer s.mu.Unlock()
 
 	if s.stop != nil {
-		logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
+		log.G(context.TODO()).Debug("CloseMonitorChannel: waiting for probe to stop")
 		close(s.stop)
 		s.stop = nil
 		// unhealthy when the monitor has stopped for compatibility reasons
 		s.Health.Status = types.Unhealthy
-		logrus.Debug("CloseMonitorChannel done")
+		log.G(context.TODO()).Debug("CloseMonitorChannel done")
 	}
 }

+ 4 - 3
container/monitor.go

@@ -1,9 +1,10 @@
 package container // import "github.com/docker/docker/container"
 
 import (
+	"context"
 	"time"
 
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 )
 
 const (
@@ -18,7 +19,7 @@ func (container *Container) Reset(lock bool) {
 	}
 
 	if err := container.CloseStreams(); err != nil {
-		logrus.Errorf("%s: %s", container.ID, err)
+		log.G(context.TODO()).Errorf("%s: %s", container.ID, err)
 	}
 
 	// Re-create a brand new stdin pipe once the container exited
@@ -38,7 +39,7 @@ func (container *Container) Reset(lock bool) {
 			defer timer.Stop()
 			select {
 			case <-timer.C:
-				logrus.Warn("Logger didn't exit in time: logs may be truncated")
+				log.G(context.TODO()).Warn("Logger didn't exit in time: logs may be truncated")
 			case <-exit:
 			}
 		}

+ 8 - 8
container/stream/attach.go

@@ -4,10 +4,10 @@ import (
 	"context"
 	"io"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/pools"
 	"github.com/moby/term"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 )
 
@@ -63,8 +63,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
 	// Connect stdin of container to the attach stdin stream.
 	if cfg.Stdin != nil {
 		group.Go(func() error {
-			logrus.Debug("attach: stdin: begin")
-			defer logrus.Debug("attach: stdin: end")
+			log.G(ctx).Debug("attach: stdin: begin")
+			defer log.G(ctx).Debug("attach: stdin: end")
 
 			defer func() {
 				if cfg.CloseStdin && !cfg.TTY {
@@ -90,7 +90,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
 				err = nil
 			}
 			if err != nil {
-				logrus.WithError(err).Debug("error on attach stdin")
+				log.G(ctx).WithError(err).Debug("error on attach stdin")
 				return errors.Wrap(err, "error on attach stdin")
 			}
 			return nil
@@ -98,8 +98,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
 	}
 
 	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error {
-		logrus.Debugf("attach: %s: begin", name)
-		defer logrus.Debugf("attach: %s: end", name)
+		log.G(ctx).Debugf("attach: %s: begin", name)
+		defer log.G(ctx).Debugf("attach: %s: end", name)
 		defer func() {
 			// Make sure stdin gets closed
 			if cfg.Stdin != nil {
@@ -113,7 +113,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
 			err = nil
 		}
 		if err != nil {
-			logrus.WithError(err).Debugf("attach: %s", name)
+			log.G(ctx).WithError(err).Debugf("attach: %s", name)
 			return errors.Wrapf(err, "error attaching %s stream", name)
 		}
 		return nil
@@ -132,7 +132,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
 
 	errs := make(chan error, 1)
 	go func() {
-		defer logrus.Debug("attach done")
+		defer log.G(ctx).Debug("attach done")
 		groupErr := make(chan error, 1)
 		go func() {
 			groupErr <- group.Wait()

+ 5 - 3
container/stream/streams.go

@@ -8,10 +8,10 @@ import (
 	"sync"
 
 	"github.com/containerd/containerd/cio"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/broadcaster"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/pools"
-	"github.com/sirupsen/logrus"
 )
 
 // Config holds information about I/O streams managed together.
@@ -116,12 +116,14 @@ func (c *Config) CloseStreams() error {
 
 // CopyToPipe connects streamconfig with a libcontainerd.IOPipe
 func (c *Config) CopyToPipe(iop *cio.DirectIO) {
+	ctx := context.TODO()
+
 	c.dio = iop
 	copyFunc := func(w io.Writer, r io.ReadCloser) {
 		c.wg.Add(1)
 		go func() {
 			if _, err := pools.Copy(w, r); err != nil {
-				logrus.Errorf("stream copy error: %v", err)
+				log.G(ctx).Errorf("stream copy error: %v", err)
 			}
 			r.Close()
 			c.wg.Done()
@@ -140,7 +142,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
 			go func() {
 				pools.Copy(iop.Stdin, stdin)
 				if err := iop.Stdin.Close(); err != nil {
-					logrus.Warnf("failed to close stdin: %v", err)
+					log.G(ctx).Warnf("failed to close stdin: %v", err)
 				}
 			}()
 		}

+ 4 - 3
container/view.go

@@ -2,17 +2,18 @@ package container // import "github.com/docker/docker/container"
 
 import (
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/go-connections/nat"
 	memdb "github.com/hashicorp/go-memdb"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -387,7 +388,7 @@ func (v *View) transform(container *Container) *Snapshot {
 		for port, bindings := range container.NetworkSettings.Ports {
 			p, err := nat.ParsePort(port.Port())
 			if err != nil {
-				logrus.WithError(err).Warn("invalid port map")
+				log.G(context.TODO()).WithError(err).Warn("invalid port map")
 				continue
 			}
 			if len(bindings) == 0 {
@@ -400,7 +401,7 @@ func (v *View) transform(container *Container) *Snapshot {
 			for _, binding := range bindings {
 				h, err := nat.ParsePort(binding.HostPort)
 				if err != nil {
-					logrus.WithError(err).Warn("invalid host port map")
+					log.G(context.TODO()).WithError(err).Warn("invalid host port map")
 					continue
 				}
 				snapshot.Ports = append(snapshot.Ports, types.Port{

+ 5 - 5
daemon/attach.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"io"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container/stream"
@@ -13,7 +14,6 @@ import (
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/moby/term"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig.
@@ -115,7 +115,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
 		if logCreated {
 			defer func() {
 				if err = logDriver.Close(); err != nil {
-					logrus.Errorf("Error closing logger: %v", err)
+					log.G(context.TODO()).Errorf("Error closing logger: %v", err)
 				}
 			}()
 		}
@@ -140,7 +140,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
 					cfg.Stderr.Write(msg.Line)
 				}
 			case err := <-logs.Err:
-				logrus.Errorf("Error streaming logs: %v", err)
+				log.G(context.TODO()).Errorf("Error streaming logs: %v", err)
 				break LogLoop
 			}
 		}
@@ -156,7 +156,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
 		r, w := io.Pipe()
 		go func(stdin io.ReadCloser) {
 			defer w.Close()
-			defer logrus.Debug("Closing buffered stdin pipe")
+			defer log.G(context.TODO()).Debug("Closing buffered stdin pipe")
 			io.Copy(w, stdin)
 		}(cfg.Stdin)
 		cfg.Stdin = r
@@ -181,7 +181,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
 		if errors.Is(err, context.Canceled) || errors.As(err, &ierr) {
 			daemon.LogContainerEvent(c, "detach")
 		} else {
-			logrus.Errorf("attach failed with error: %v", err)
+			log.G(ctx).Errorf("attach failed with error: %v", err)
 		}
 	}
 

+ 4 - 3
daemon/cdi.go

@@ -1,14 +1,15 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 
 	"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/errdefs"
 	"github.com/hashicorp/go-multierror"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 type cdiHandler struct {
@@ -20,7 +21,7 @@ type cdiHandler struct {
 func RegisterCDIDriver(opts ...cdi.Option) {
 	cache, err := cdi.NewCache(opts...)
 	if err != nil {
-		logrus.WithError(err).Error("CDI registry initialization failed")
+		log.G(context.TODO()).WithError(err).Error("CDI registry initialization failed")
 		// We create a spec updater that always returns an error.
 		// This error will be returned only when a CDI device is requested.
 		// This ensures that daemon startup is not blocked by a CDI registry initialization failure.
@@ -66,7 +67,7 @@ func (c *cdiHandler) injectCDIDevices(s *specs.Spec, dev *deviceInstance) error
 			// We log the errors that may have been generated while refreshing the CDI registry.
 			// These may be due to malformed specifications or device name conflicts that could be
 			// the cause of an injection failure.
-			logrus.WithError(rerrs).Warning("Refreshing the CDI registry generated errors")
+			log.G(context.TODO()).WithError(rerrs).Warning("Refreshing the CDI registry generated errors")
 		}
 
 		return fmt.Errorf("CDI device injection failed: %w", err)

+ 5 - 5
daemon/cluster/cluster.go

@@ -49,6 +49,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/network"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/controllers/plugin"
@@ -58,7 +59,6 @@ import (
 	swarmapi "github.com/moby/swarmkit/v2/api"
 	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 )
 
@@ -193,10 +193,10 @@ func (c *Cluster) Start() error {
 
 	select {
 	case <-timer.C:
-		logrus.Error("swarm component could not be started before timeout was reached")
+		log.G(context.TODO()).Error("swarm component could not be started before timeout was reached")
 	case err := <-nr.Ready():
 		if err != nil {
-			logrus.WithError(err).Error("swarm component could not be started")
+			log.G(context.TODO()).WithError(err).Error("swarm component could not be started")
 			return nil
 		}
 	}
@@ -386,13 +386,13 @@ func (c *Cluster) Cleanup() {
 		if err == nil {
 			singlenode := active && isLastManager(reachable, unreachable)
 			if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) {
-				logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
+				log.G(context.TODO()).Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
 			}
 		}
 	}
 
 	if err := node.Stop(); err != nil {
-		logrus.Errorf("failed to shut down cluster node: %v", err)
+		log.G(context.TODO()).Errorf("failed to shut down cluster node: %v", err)
 		stack.Dump()
 	}
 

+ 2 - 1
daemon/cluster/controllers/plugin/controller.go

@@ -5,6 +5,7 @@ import (
 	"io"
 	"net/http"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/registry"
@@ -61,7 +62,7 @@ func NewController(backend Backend, t *api.Task) (*Controller, error) {
 		backend:   backend,
 		spec:      spec,
 		serviceID: t.ServiceID,
-		logger: logrus.WithFields(logrus.Fields{
+		logger: log.G(context.TODO()).WithFields(logrus.Fields{
 			"controller": "plugin",
 			"task":       t.ID,
 			"plugin":     spec.Name,

+ 4 - 3
daemon/cluster/convert/container.go

@@ -1,9 +1,11 @@
 package convert // import "github.com/docker/docker/daemon/cluster/convert"
 
 import (
+	"context"
 	"fmt"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	types "github.com/docker/docker/api/types/swarm"
@@ -11,7 +13,6 @@ import (
 	gogotypes "github.com/gogo/protobuf/types"
 	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
@@ -168,7 +169,7 @@ func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretRef
 		target := s.GetFile()
 		if target == nil {
 			// not a file target
-			logrus.Warnf("secret target not a file: secret=%s", s.SecretID)
+			log.G(context.TODO()).Warnf("secret target not a file: secret=%s", s.SecretID)
 			continue
 		}
 		refs = append(refs, &types.SecretReference{
@@ -240,7 +241,7 @@ func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigRef
 			}
 		} else {
 			// not a file target
-			logrus.Warnf("config target not known: config=%s", s.ConfigID)
+			log.G(context.TODO()).Warnf("config target not known: config=%s", s.ConfigID)
 			continue
 		}
 		refs = append(refs, r)

+ 1 - 1
daemon/cluster/executor/container/adapter.go

@@ -95,7 +95,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error {
 	authConfig := &registry.AuthConfig{}
 	if encodedAuthConfig != "" {
 		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
-			logrus.Warnf("invalid authconfig: %v", err)
+			log.G(ctx).Warnf("invalid authconfig: %v", err)
 		}
 	}
 

+ 3 - 2
daemon/cluster/executor/container/container.go

@@ -1,12 +1,14 @@
 package container // import "github.com/docker/docker/daemon/cluster/executor/container"
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"net"
 	"strconv"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	enginecontainer "github.com/docker/docker/api/types/container"
@@ -26,7 +28,6 @@ import (
 	"github.com/moby/swarmkit/v2/api"
 	"github.com/moby/swarmkit/v2/api/genericresource"
 	"github.com/moby/swarmkit/v2/template"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -594,7 +595,7 @@ func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
 		return nil
 	}
 
-	logrus.Debugf("Creating service config in agent for t = %+v", c.task)
+	log.G(context.TODO()).Debugf("Creating service config in agent for t = %+v", c.task)
 	svcCfg := &clustertypes.ServiceConfig{
 		Name:             c.task.ServiceAnnotations.Name,
 		Aliases:          make(map[string][]string),

+ 3 - 3
daemon/cluster/executor/container/executor.go

@@ -160,7 +160,7 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error {
 		if na == nil || na.Network == nil || len(na.Addresses) == 0 {
 			// this should not happen, but we got a panic here and don't have a
 			// good idea about what the underlying data structure looks like.
-			logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
+			log.G(ctx).WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
 				Warnf("skipping nil or malformed node network attachment entry")
 			continue
 		}
@@ -192,7 +192,7 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error {
 			// same thing as above, check sanity of the attachments so we don't
 			// get a panic.
 			if na == nil || na.Network == nil || len(na.Addresses) == 0 {
-				logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
+				log.G(ctx).WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
 					Warnf("skipping nil or malformed node network attachment entry")
 				continue
 			}
@@ -301,7 +301,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
 	var ctlr exec.Controller
 	switch r := t.Spec.GetRuntime().(type) {
 	case *api.TaskSpec_Generic:
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"kind":     r.Generic.Kind,
 			"type_url": r.Generic.Payload.TypeUrl,
 		}).Debug("custom runtime requested")

+ 5 - 5
daemon/cluster/networks.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"fmt"
 
+	"github.com/containerd/containerd/log"
 	apitypes "github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/network"
@@ -14,7 +15,6 @@ import (
 	"github.com/docker/docker/runconfig"
 	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // GetNetworks returns all current cluster managed networks.
@@ -127,7 +127,7 @@ func (c *Cluster) UpdateAttachment(target, containerID string, config *network.N
 		return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target)
 	}
 	if attacher.inProgress {
-		logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID)
+		log.G(context.TODO()).Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID)
 		c.mu.Unlock()
 		return nil
 	}
@@ -219,13 +219,13 @@ func (c *Cluster) AttachNetwork(target string, containerID string, addresses []s
 	close(attachCompleteCh)
 	c.mu.Unlock()
 
-	logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID)
+	log.G(ctx).Debugf("Successfully attached to network %s with task id %s", target, taskID)
 
 	release := func() {
 		ctx, cancel := c.getRequestContext()
 		defer cancel()
 		if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil {
-			logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v",
+			log.G(ctx).Errorf("Failed remove network attachment %s to network %s on allocation failure: %v",
 				taskID, target, err)
 		}
 	}
@@ -242,7 +242,7 @@ func (c *Cluster) AttachNetwork(target string, containerID string, addresses []s
 	c.attachers[aKey].config = config
 	c.mu.Unlock()
 
-	logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID)
+	log.G(ctx).Debugf("Successfully allocated resources on network %s for task id %s", target, taskID)
 
 	return config, nil
 }

+ 5 - 5
daemon/cluster/noderunner.go

@@ -8,6 +8,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/executor/container"
 	lncluster "github.com/docker/docker/libnetwork/cluster"
@@ -15,7 +16,6 @@ import (
 	swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
 	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -231,7 +231,7 @@ func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientCo
 		IncludeOldObject: true,
 	})
 	if err != nil {
-		logrus.WithError(err).Error("failed to watch cluster store")
+		log.G(ctx).WithError(err).Error("failed to watch cluster store")
 		return
 	}
 	for {
@@ -240,7 +240,7 @@ func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientCo
 			// store watch is broken
 			errStatus, ok := status.FromError(err)
 			if !ok || errStatus.Code() != codes.Canceled {
-				logrus.WithError(err).Error("failed to receive changes from store watch API")
+				log.G(ctx).WithError(err).Error("failed to receive changes from store watch API")
 			}
 			return
 		}
@@ -271,7 +271,7 @@ func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node,
 func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
 	err := detectLockedError(node.Err(context.Background()))
 	if err != nil {
-		logrus.Errorf("cluster exited with error: %v", err)
+		log.G(context.TODO()).Errorf("cluster exited with error: %v", err)
 	}
 	n.mu.Lock()
 	n.swarmNode = nil
@@ -352,7 +352,7 @@ func (n *nodeRunner) enableReconnectWatcher() {
 	if n.reconnectDelay > maxReconnectDelay {
 		n.reconnectDelay = maxReconnectDelay
 	}
-	logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
+	log.G(context.TODO()).Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
 	delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
 	n.cancelReconnect = cancel
 

+ 9 - 9
daemon/cluster/services.go

@@ -11,6 +11,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
@@ -23,7 +24,6 @@ import (
 	gogotypes "github.com/gogo/protobuf/types"
 	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 )
 
@@ -234,7 +234,7 @@ func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRe
 				authReader := strings.NewReader(encodedAuth)
 				dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
 				if err := dec.Decode(authConfig); err != nil {
-					logrus.Warnf("invalid authconfig: %v", err)
+					log.G(ctx).Warnf("invalid authconfig: %v", err)
 				}
 			}
 
@@ -245,14 +245,14 @@ func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRe
 			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
 				digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
 				if err != nil {
-					logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
+					log.G(ctx).Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
 					// warning in the client response should be concise
 					resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
 				} else if ctnr.Image != digestImage {
-					logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
+					log.G(ctx).Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
 					ctnr.Image = digestImage
 				} else {
-					logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
+					log.G(ctx).Debugf("creating service using supplied digest reference %s", ctnr.Image)
 				}
 
 				// Replace the context with a fresh one.
@@ -349,7 +349,7 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swa
 			authConfig := &registry.AuthConfig{}
 			if encodedAuth != "" {
 				if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
-					logrus.Warnf("invalid authconfig: %v", err)
+					log.G(ctx).Warnf("invalid authconfig: %v", err)
 				}
 			}
 
@@ -360,14 +360,14 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swa
 			if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
 				digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
 				if err != nil {
-					logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
+					log.G(ctx).Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
 					// warning in the client response should be concise
 					resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
 				} else if newCtnr.Image != digestImage {
-					logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
+					log.G(ctx).Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
 					newCtnr.Image = digestImage
 				} else {
-					logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
+					log.G(ctx).Debugf("updating service using supplied digest reference %s", newCtnr.Image)
 				}
 
 				// Replace the context with a fresh one.

+ 4 - 4
daemon/cluster/swarm.go

@@ -7,6 +7,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	apitypes "github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
 	types "github.com/docker/docker/api/types/swarm"
@@ -18,7 +19,6 @@ import (
 	"github.com/moby/swarmkit/v2/manager/encryption"
 	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 )
 
@@ -87,7 +87,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
 		if !found {
 			ip, err := c.resolveSystemAddr()
 			if err != nil {
-				logrus.Warnf("Could not find a local address: %v", err)
+				log.G(context.TODO()).Warnf("Could not find a local address: %v", err)
 				return "", errMustSpecifyListenAddr
 			}
 			localAddr = ip.String()
@@ -398,7 +398,7 @@ func (c *Cluster) Leave(ctx context.Context, force bool) error {
 	}
 	// release readers in here
 	if err := nr.Stop(); err != nil {
-		logrus.Errorf("failed to shut down cluster node: %v", err)
+		log.G(ctx).Errorf("failed to shut down cluster node: %v", err)
 		stack.Dump()
 		return err
 	}
@@ -414,7 +414,7 @@ func (c *Cluster) Leave(ctx context.Context, force bool) error {
 		}
 		for _, id := range nodeContainers {
 			if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
-				logrus.Errorf("error removing %v: %v", id, err)
+				log.G(ctx).Errorf("error removing %v: %v", id, err)
 			}
 		}
 	}

+ 3 - 1
daemon/config/config.go

@@ -2,6 +2,7 @@ package config // import "github.com/docker/docker/daemon/config"
 
 import (
 	"bytes"
+	"context"
 	"encoding/json"
 	"fmt"
 	"net"
@@ -14,6 +15,7 @@ import (
 	"golang.org/x/text/transform"
 
 	"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/registry"
 	"github.com/imdario/mergo"
@@ -324,7 +326,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) {
 
 // Reload reads the configuration in the host and reloads the daemon and server.
 func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
-	logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
+	log.G(context.TODO()).Infof("Got signal to reload configuration, reloading from: %s", configFile)
 	newConfig, err := getConflictFreeConfiguration(configFile, flags)
 	if err != nil {
 		if flags.Changed("config-file") || !os.IsNotExist(err) {

+ 4 - 2
daemon/configs.go

@@ -1,14 +1,16 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
+
+	"github.com/containerd/containerd/log"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
-	"github.com/sirupsen/logrus"
 )
 
 // SetContainerConfigReferences sets the container config references needed
 func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error {
 	if !configsSupported() && len(refs) > 0 {
-		logrus.Warn("configs are not supported on this platform")
+		log.G(context.TODO()).Warn("configs are not supported on this platform")
 		return nil
 	}
 

+ 4 - 3
daemon/container.go

@@ -1,12 +1,14 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
 	"runtime"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/docker/container"
@@ -23,7 +25,6 @@ import (
 	"github.com/moby/sys/signal"
 	"github.com/opencontainers/selinux/go-selinux"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // GetContainer looks for a container using the provided information, which could be
@@ -59,7 +60,7 @@ func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, e
 		// or consistent w.r.t. the live daemon.containers Store so
 		// while reaching this code path may be indicative of a bug,
 		// it is not _necessarily_ the case.
-		logrus.WithField("prefixOrName", prefixOrName).
+		log.G(context.TODO()).WithField("prefixOrName", prefixOrName).
 			WithField("id", containerID).
 			Debugf("daemon.GetContainer: container is known to daemon.containersReplica but not daemon.containers")
 		return nil, containerNotFound(prefixOrName)
@@ -247,7 +248,7 @@ func (daemon *Daemon) verifyContainerSettings(daemonCfg *configStore, hostConfig
 	// Now do platform-specific verification
 	warnings, err = verifyPlatformContainerSettings(daemon, daemonCfg, hostConfig, update)
 	for _, w := range warnings {
-		logrus.Warn(w)
+		log.G(context.TODO()).Warn(w)
 	}
 	return warnings, err
 }

+ 11 - 10
daemon/container_operations.go

@@ -1,6 +1,7 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"net"
@@ -9,6 +10,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	containertypes "github.com/docker/docker/api/types/container"
 	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/container"
@@ -24,7 +26,6 @@ import (
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/go-connections/nat"
-	"github.com/sirupsen/logrus"
 )
 
 func (daemon *Daemon) getDNSSearchSettings(cfg *config.Config, container *container.Container) []string {
@@ -224,7 +225,7 @@ func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *contain
 		}
 
 		_, alias = path.Split(alias)
-		logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress)
+		log.G(context.TODO()).Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress)
 		sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(
 			parent.ID,
 			alias,
@@ -415,7 +416,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN
 		if err != nil {
 			if daemon.clusterProvider != nil {
 				if err := daemon.clusterProvider.DetachNetwork(id, container.ID); err != nil {
-					logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err)
+					log.G(context.TODO()).Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err)
 				}
 			}
 
@@ -532,7 +533,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C
 
 	// Cleanup any stale sandbox left over due to ungraceful daemon shutdown
 	if err := controller.SandboxDestroy(container.ID); err != nil {
-		logrus.WithError(err).Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
+		log.G(context.TODO()).WithError(err).Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
 	}
 
 	if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() {
@@ -780,7 +781,7 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
 	defer func() {
 		if err != nil {
 			if e := ep.Delete(false); e != nil {
-				logrus.Warnf("Could not rollback container connection to network %s", idOrName)
+				log.G(context.TODO()).Warnf("Could not rollback container connection to network %s", idOrName)
 			}
 		}
 	}()
@@ -935,9 +936,9 @@ func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n li
 func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) {
 	if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed {
 		if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil {
-			logrus.Warnf("error detaching from network %s: %v", network.Name(), err)
+			log.G(context.TODO()).Warnf("error detaching from network %s: %v", network.Name(), err)
 			if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil {
-				logrus.Warnf("error detaching from network %s: %v", network.ID(), err)
+				log.G(context.TODO()).Warnf("error detaching from network %s: %v", network.ID(), err)
 			}
 		}
 	}
@@ -1033,12 +1034,12 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) {
 
 	sb, err := daemon.netController.SandboxByID(sid)
 	if err != nil {
-		logrus.Warnf("error locating sandbox id %s: %v", sid, err)
+		log.G(context.TODO()).Warnf("error locating sandbox id %s: %v", sid, err)
 		return
 	}
 
 	if err := sb.Delete(); err != nil {
-		logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
+		log.G(context.TODO()).Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
 	}
 
 	for _, nw := range networks {
@@ -1149,7 +1150,7 @@ func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) er
 	sb := daemon.getNetworkSandbox(ctr)
 	if sb == nil {
 		// If the network sandbox is not found, then there is nothing to deactivate
-		logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
+		log.G(context.TODO()).Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
 		return nil
 	}
 	return sb.DisableService()

+ 12 - 10
daemon/container_operations_unix.go

@@ -3,12 +3,14 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
 	"strconv"
 	"syscall"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/links"
@@ -184,7 +186,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 	for _, s := range c.SecretReferences {
 		// TODO (ehazlett): use type switch when more are supported
 		if s.File == nil {
-			logrus.Error("secret target type is not a file target")
+			log.G(context.TODO()).Error("secret target type is not a file target")
 			continue
 		}
 
@@ -198,7 +200,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 			return errors.Wrap(err, "error creating secret mount path")
 		}
 
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"name": s.File.Name,
 			"path": fPath,
 		}).Debug("injecting secret")
@@ -234,7 +236,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 			// a valid type of config so we should not error when we encounter
 			// one.
 			if configRef.Runtime == nil {
-				logrus.Error("config target type is not a file or runtime target")
+				log.G(context.TODO()).Error("config target type is not a file or runtime target")
 			}
 			// However, in any case, this isn't a file config, so we have no
 			// further work to do
@@ -249,7 +251,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 			return errors.Wrap(err, "error creating config mount path")
 		}
 
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"name": configRef.File.Name,
 			"path": fPath,
 		}).Debug("injecting config")
@@ -309,7 +311,7 @@ func (daemon *Daemon) remountSecretDir(c *container.Container) error {
 		return errors.Wrap(err, "error getting container secrets path")
 	}
 	if err := label.Relabel(dir, c.MountLabel, false); err != nil {
-		logrus.WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label")
+		log.G(context.TODO()).WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label")
 	}
 	rootIDs := daemon.idMapping.RootPair()
 	tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID)
@@ -325,13 +327,13 @@ func (daemon *Daemon) remountSecretDir(c *container.Container) error {
 func (daemon *Daemon) cleanupSecretDir(c *container.Container) {
 	dir, err := c.SecretMountPath()
 	if err != nil {
-		logrus.WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container")
+		log.G(context.TODO()).WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container")
 	}
 	if err := mount.RecursiveUnmount(dir); err != nil {
-		logrus.WithField("dir", dir).WithError(err).Warn("Error while attempting to unmount dir, this may prevent removal of container.")
+		log.G(context.TODO()).WithField("dir", dir).WithError(err).Warn("Error while attempting to unmount dir, this may prevent removal of container.")
 	}
 	if err := os.RemoveAll(dir); err != nil {
-		logrus.WithField("dir", dir).WithError(err).Error("Error removing dir.")
+		log.G(context.TODO()).WithField("dir", dir).WithError(err).Error("Error removing dir.")
 	}
 }
 
@@ -347,7 +349,7 @@ func killProcessDirectly(container *container.Container) error {
 			return errdefs.System(err)
 		}
 		err = errNoSuchProcess{pid, syscall.SIGKILL}
-		logrus.WithError(err).WithField("container", container.ID).Debug("no such process")
+		log.G(context.TODO()).WithError(err).WithField("container", container.ID).Debug("no such process")
 		return err
 	}
 
@@ -356,7 +358,7 @@ func killProcessDirectly(container *container.Container) error {
 		// Since we can not kill a zombie pid, add zombie check here
 		isZombie, err := process.Zombie(pid)
 		if err != nil {
-			logrus.WithError(err).WithField("container", container.ID).Warn("Container state is invalid")
+			log.G(context.TODO()).WithError(err).WithField("container", container.ID).Warn("Container state is invalid")
 			return err
 		}
 		if isZombie {

+ 10 - 8
daemon/container_operations_windows.go

@@ -1,9 +1,11 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"fmt"
 	"os"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libnetwork"
@@ -22,7 +24,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
 	}
 
 	localPath := c.ConfigsDirPath()
-	logrus.Debugf("configs: setting up config dir: %s", localPath)
+	log.G(context.TODO()).Debugf("configs: setting up config dir: %s", localPath)
 
 	// create local config root
 	if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil {
@@ -32,7 +34,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
 	defer func() {
 		if setupErr != nil {
 			if err := os.RemoveAll(localPath); err != nil {
-				logrus.Errorf("error cleaning up config dir: %s", err)
+				log.G(context.TODO()).Errorf("error cleaning up config dir: %s", err)
 			}
 		}
 	}()
@@ -48,7 +50,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
 			// a valid type of config so we should not error when we encounter
 			// one.
 			if configRef.Runtime == nil {
-				logrus.Error("config target type is not a file or runtime target")
+				log.G(context.TODO()).Error("config target type is not a file or runtime target")
 			}
 			// However, in any case, this isn't a file config, so we have no
 			// further work to do
@@ -59,7 +61,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
 		if err != nil {
 			return errors.Wrap(err, "error getting config file path for container")
 		}
-		log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath})
+		log := log.G(context.TODO()).WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath})
 
 		log.Debug("injecting config")
 		config, err := c.DependencyStore.Configs().Get(configRef.ConfigID)
@@ -97,7 +99,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 	if err != nil {
 		return err
 	}
-	logrus.Debugf("secrets: setting up secret dir: %s", localMountPath)
+	log.G(context.TODO()).Debugf("secrets: setting up secret dir: %s", localMountPath)
 
 	// create local secret root
 	if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil {
@@ -107,7 +109,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 	defer func() {
 		if setupErr != nil {
 			if err := os.RemoveAll(localMountPath); err != nil {
-				logrus.Errorf("error cleaning up secret mount: %s", err)
+				log.G(context.TODO()).Errorf("error cleaning up secret mount: %s", err)
 			}
 		}
 	}()
@@ -119,7 +121,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 	for _, s := range c.SecretReferences {
 		// TODO (ehazlett): use type switch when more are supported
 		if s.File == nil {
-			logrus.Error("secret target type is not a file target")
+			log.G(context.TODO()).Error("secret target type is not a file target")
 			continue
 		}
 
@@ -129,7 +131,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
 		if err != nil {
 			return err
 		}
-		logrus.WithFields(logrus.Fields{
+		log.G(context.TODO()).WithFields(logrus.Fields{
 			"name": s.File.Name,
 			"path": fPath,
 		}).Debug("injecting secret")

+ 4 - 4
daemon/containerd/image.go

@@ -12,6 +12,7 @@ import (
 	"github.com/containerd/containerd/content"
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	cplatforms "github.com/containerd/containerd/platforms"
 	"github.com/docker/distribution/reference"
 	containertypes "github.com/docker/docker/api/types/container"
@@ -25,7 +26,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/semaphore"
 )
 
@@ -113,7 +113,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
 					// This is unexpected - dangling image should be deleted
 					// as soon as another image with the same target is created.
 					// Log a warning, but don't error out the whole operation.
-					logrus.WithField("refs", tagged).Warn("multiple images have the same target, but one of them is still dangling")
+					log.G(ctx).WithField("refs", tagged).Warn("multiple images have the same target, but one of them is still dangling")
 				}
 				continue
 			}
@@ -122,7 +122,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
 			if err != nil {
 				// This is inconsistent with `docker image ls` which will
 				// still include the malformed name in RepoTags.
-				logrus.WithField("name", name).WithError(err).Error("failed to parse image name as reference")
+				log.G(ctx).WithField("name", name).WithError(err).Error("failed to parse image name as reference")
 				continue
 			}
 			refs = append(refs, name)
@@ -132,7 +132,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
 				// This could only happen if digest is invalid, but considering that
 				// we get it from the Descriptor it's highly unlikely.
 				// Log error just in case.
-				logrus.WithError(err).Error("failed to create digested reference")
+				log.G(ctx).WithError(err).Error("failed to create digested reference")
 				continue
 			}
 			refs = append(refs, digested)

+ 6 - 6
daemon/containerd/image_builder.go

@@ -22,6 +22,7 @@ import (
 
 	// "github.com/docker/docker/api/types/container"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/image"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/errdefs"
@@ -34,7 +35,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
 )
 
 // GetImageAndReleasableLayer returns an image and releaseable layer for a
@@ -154,7 +154,7 @@ This is most likely caused by a bug in the build system that created the fetched
 Please notify the image author to correct the configuration.`,
 					platforms.Format(p), platforms.Format(imgPlat), name,
 				)
-				logrus.WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.")
+				log.G(ctx).WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.")
 			}
 		} else {
 			return nil, err
@@ -257,11 +257,11 @@ func (rl *rolayer) Release() error {
 		return nil
 	}
 	if err := mount.UnmountAll(rl.root, 0); err != nil {
-		logrus.WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer")
+		log.G(context.TODO()).WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer")
 		return err
 	}
 	if err := os.Remove(rl.root); err != nil {
-		logrus.WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir")
+		log.G(context.TODO()).WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir")
 		return err
 	}
 	rl.root = ""
@@ -370,11 +370,11 @@ func (rw *rwlayer) Release() error {
 		return nil
 	}
 	if err := mount.UnmountAll(rw.root, 0); err != nil {
-		logrus.WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer")
+		log.G(context.TODO()).WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer")
 		return err
 	}
 	if err := os.Remove(rw.root); err != nil {
-		logrus.WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir")
+		log.G(context.TODO()).WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir")
 		return err
 	}
 	rw.root = ""

+ 2 - 2
daemon/containerd/image_changes.go

@@ -5,13 +5,13 @@ import (
 	"encoding/json"
 
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/google/uuid"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
 )
 
 func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) {
@@ -54,7 +54,7 @@ func (i *ImageService) Changes(ctx context.Context, container *container.Contain
 	}
 	defer func() {
 		if err := snapshotter.Remove(ctx, rnd.String()); err != nil {
-			logrus.WithError(err).WithField("key", rnd.String()).Warn("remove temporary snapshot")
+			log.G(ctx).WithError(err).WithField("key", rnd.String()).Warn("remove temporary snapshot")
 		}
 	}()
 

+ 3 - 2
daemon/containerd/image_children.go

@@ -6,6 +6,7 @@ import (
 	"github.com/containerd/containerd/content"
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
@@ -35,7 +36,7 @@ func (i *ImageService) Children(ctx context.Context, id image.ID) ([]image.ID, e
 		rootfs, err := platformRootfs(ctx, cs, target, platform)
 		if err != nil {
 			if !cerrdefs.IsNotFound(err) {
-				logrus.WithFields(logrus.Fields{
+				log.G(ctx).WithFields(logrus.Fields{
 					logrus.ErrorKey: err,
 					"image":         target.Digest,
 					"platform":      platform,
@@ -59,7 +60,7 @@ func (i *ImageService) Children(ctx context.Context, id image.ID) ([]image.ID, e
 			rootfs, err := platformRootfs(ctx, cs, img.Target, platform)
 			if err != nil {
 				if !cerrdefs.IsNotFound(err) {
-					logrus.WithFields(logrus.Fields{
+					log.G(ctx).WithFields(logrus.Fields{
 						logrus.ErrorKey: err,
 						"image":         img.Target.Digest,
 						"platform":      platform,

+ 5 - 5
daemon/containerd/image_commit.go

@@ -16,6 +16,7 @@ import (
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/docker/docker/api/types/backend"
@@ -24,7 +25,6 @@ import (
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/specs-go"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
 )
 
 /*
@@ -122,14 +122,14 @@ func generateCommitImageConfig(baseConfig ocispec.Image, diffID digest.Digest, o
 	arch := baseConfig.Architecture
 	if arch == "" {
 		arch = runtime.GOARCH
-		logrus.Warnf("assuming arch=%q", arch)
+		log.G(context.TODO()).Warnf("assuming arch=%q", arch)
 	}
 	os := baseConfig.OS
 	if os == "" {
 		os = runtime.GOOS
-		logrus.Warnf("assuming os=%q", os)
+		log.G(context.TODO()).Warnf("assuming os=%q", os)
 	}
-	logrus.Debugf("generateCommitImageConfig(): arch=%q, os=%q", arch, os)
+	log.G(context.TODO()).Debugf("generateCommitImageConfig(): arch=%q, os=%q", arch, os)
 	return ocispec.Image{
 		Platform: ocispec.Platform{
 			Architecture: arch,
@@ -262,7 +262,7 @@ func applyDiffLayer(ctx context.Context, name string, baseImg ocispec.Image, sn
 			// NOTE: the snapshotter should be hold by lease. Even
 			// if the cleanup fails, the containerd gc can delete it.
 			if err := sn.Remove(ctx, key); err != nil {
-				logrus.Warnf("failed to cleanup aborted apply %s: %s", key, err)
+				log.G(ctx).Warnf("failed to cleanup aborted apply %s: %s", key, err)
 			}
 		}
 	}()

+ 4 - 4
daemon/containerd/image_delete.go

@@ -7,6 +7,7 @@ import (
 	"strings"
 
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/container"
@@ -14,7 +15,6 @@ import (
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
 )
 
 // ImageDelete deletes the image referenced by the given imageRef from this
@@ -135,7 +135,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p
 	}
 	defer func() {
 		if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil {
-			logrus.WithError(err).Warn("failed to unlease snapshots")
+			log.G(ctx).WithError(err).Warn("failed to unlease snapshots")
 		}
 	}()
 
@@ -145,7 +145,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p
 	if prune {
 		parents, err = i.parents(ctx, image.ID(imgID))
 		if err != nil {
-			logrus.WithError(err).Warn("failed to get image parents")
+			log.G(ctx).WithError(err).Warn("failed to get image parents")
 		}
 		sortParentsByAffinity(parents)
 	}
@@ -168,7 +168,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p
 		}
 		err = i.imageDeleteHelper(ctx, parent.img, &records, false)
 		if err != nil {
-			logrus.WithError(err).Warn("failed to remove image parent")
+			log.G(ctx).WithError(err).Warn("failed to remove image parent")
 			break
 		}
 		parentID := parent.img.Target.Digest.String()

+ 8 - 7
daemon/containerd/image_exporter.go

@@ -9,6 +9,7 @@ import (
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	containerdimages "github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images/archive"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
 	cplatforms "github.com/containerd/containerd/platforms"
 	"github.com/docker/distribution/reference"
@@ -84,14 +85,14 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea
 			ref = reference.TagNameOnly(ref)
 			opts = append(opts, archive.WithManifest(target, ref.String()))
 
-			logrus.WithFields(logrus.Fields{
+			log.G(ctx).WithFields(logrus.Fields{
 				"target": target,
 				"name":   ref.String(),
 			}).Debug("export image")
 		} else {
 			opts = append(opts, archive.WithManifest(target))
 
-			logrus.WithFields(logrus.Fields{
+			log.G(ctx).WithFields(logrus.Fields{
 				"target": target,
 			}).Debug("export image without name")
 		}
@@ -122,7 +123,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt
 
 	imgs, err := i.client.Import(ctx, inTar, opts...)
 	if err != nil {
-		logrus.WithError(err).Debug("failed to import image to containerd")
+		log.G(ctx).WithError(err).Debug("failed to import image to containerd")
 		return errdefs.System(err)
 	}
 
@@ -140,7 +141,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt
 		}
 
 		err = i.walkImageManifests(ctx, img, func(platformImg *ImageManifest) error {
-			logger := logrus.WithFields(logrus.Fields{
+			logger := log.G(ctx).WithFields(logrus.Fields{
 				"image":    name,
 				"manifest": platformImg.Target().Digest,
 			})
@@ -213,16 +214,16 @@ func (i *ImageService) getBestDescriptorForExport(ctx context.Context, indexDesc
 			available, _, _, missing, err := containerdimages.Check(ctx, store, mfst, nil)
 			if err != nil {
 				hasMissingManifests = true
-				logrus.WithField("manifest", mfst.Digest).Warn("failed to check manifest's blob availability, won't export")
+				log.G(ctx).WithField("manifest", mfst.Digest).Warn("failed to check manifest's blob availability, won't export")
 				continue
 			}
 
 			if available && len(missing) == 0 {
 				presentManifests = append(presentManifests, mfst)
-				logrus.WithField("manifest", mfst.Digest).Debug("manifest content present, will export")
+				log.G(ctx).WithField("manifest", mfst.Digest).Debug("manifest content present, will export")
 			} else {
 				hasMissingManifests = true
-				logrus.WithFields(logrus.Fields{
+				log.G(ctx).WithFields(logrus.Fields{
 					"manifest": mfst.Digest,
 					"missing":  missing,
 				}).Debug("manifest is missing, won't export")

+ 2 - 1
daemon/containerd/image_import.go

@@ -12,6 +12,7 @@ import (
 	"github.com/containerd/containerd/content"
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types/container"
@@ -40,7 +41,7 @@ func (i *ImageService) ImportImage(ctx context.Context, ref reference.Named, pla
 	if ref != nil {
 		refString = ref.String()
 	}
-	logger := logrus.WithField("ref", refString)
+	logger := log.G(ctx).WithField("ref", refString)
 
 	ctx, release, err := i.client.WithLease(ctx)
 	if err != nil {

+ 4 - 3
daemon/containerd/image_list.go

@@ -10,6 +10,7 @@ import (
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
@@ -91,7 +92,7 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions)
 
 			available, err := img.CheckContentAvailable(ctx)
 			if err != nil {
-				logrus.WithFields(logrus.Fields{
+				log.G(ctx).WithFields(logrus.Fields{
 					logrus.ErrorKey: err,
 					"manifest":      img.Target(),
 					"image":         img.Name(),
@@ -182,7 +183,7 @@ func (i *ImageService) singlePlatformImage(ctx context.Context, contentStore con
 	rawImg := image.Metadata()
 	target := rawImg.Target.Digest
 
-	logger := logrus.WithFields(logrus.Fields{
+	logger := log.G(ctx).WithFields(logrus.Fields{
 		"name":   rawImg.Name,
 		"digest": target,
 	})
@@ -434,7 +435,7 @@ func setupLabelFilter(store content.Store, fltrs filters.Args) (func(image image
 			return true
 		}
 		if err != nil {
-			logrus.WithFields(logrus.Fields{
+			log.G(ctx).WithFields(logrus.Fields{
 				logrus.ErrorKey: err,
 				"image":         image.Name,
 				"checks":        checks,

+ 5 - 4
daemon/containerd/image_prune.go

@@ -5,6 +5,7 @@ import (
 
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
@@ -79,7 +80,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu
 	// Apply filters
 	for name, img := range imagesToPrune {
 		filteredOut := !filterFunc(img)
-		logrus.WithFields(logrus.Fields{
+		log.G(ctx).WithFields(logrus.Fields{
 			"image":       name,
 			"filteredOut": filteredOut,
 		}).Debug("filtering image")
@@ -99,7 +100,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu
 		// Warning: This doesn't handle truncated ids:
 		//          `docker run 124c7d2` will have Image="124c7d270790"
 		ref, err := reference.ParseNormalizedNamed(ctr.Config.Image)
-		logrus.WithFields(logrus.Fields{
+		log.G(ctx).WithFields(logrus.Fields{
 			"ctr":          ctr.ID,
 			"image":        ref,
 			"nameParseErr": err,
@@ -121,7 +122,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu
 	}()
 
 	for _, img := range imagesToPrune {
-		logrus.WithField("image", img).Debug("pruning image")
+		log.G(ctx).WithField("image", img).Debug("pruning image")
 
 		blobs := []ocispec.Descriptor{}
 
@@ -207,7 +208,7 @@ func (i *ImageService) unleaseSnapshotsFromDeletedConfigs(ctx context.Context, p
 		info, err := store.Info(ctx, cfgDigest)
 		if err != nil {
 			if cerrdefs.IsNotFound(err) {
-				logrus.WithField("config", cfgDigest).Debug("config already gone")
+				log.G(ctx).WithField("config", cfgDigest).Debug("config already gone")
 			} else {
 				errs = multierror.Append(errs, err)
 				if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {

+ 2 - 1
daemon/containerd/image_pull.go

@@ -7,6 +7,7 @@ import (
 	"github.com/containerd/containerd"
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/snapshotters"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/distribution/reference"
@@ -75,7 +76,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string,
 		return err
 	}
 
-	logger := logrus.WithFields(logrus.Fields{
+	logger := log.G(ctx).WithFields(logrus.Fields{
 		"digest": img.Target().Digest,
 		"remote": ref.String(),
 	})

+ 4 - 4
daemon/containerd/image_push.go

@@ -11,6 +11,7 @@ import (
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
@@ -21,7 +22,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/semaphore"
 )
 
@@ -48,7 +48,7 @@ func (i *ImageService) PushImage(ctx context.Context, targetRef reference.Named,
 	defer func() {
 		err := release(leasedCtx)
 		if err != nil && !cerrdefs.IsNotFound(err) {
-			logrus.WithField("image", targetRef).WithError(err).Error("failed to delete lease created for push")
+			log.G(ctx).WithField("image", targetRef).WithError(err).Error("failed to delete lease created for push")
 		}
 	}()
 
@@ -136,7 +136,7 @@ func (i *ImageService) PushImage(ctx context.Context, targetRef reference.Named,
 		if err := containerdimages.Dispatch(ctx, appendSource, nil, target); err != nil {
 			// Shouldn't happen, but even if it would fail, then make it only a warning
 			// because it doesn't affect the pushed data.
-			logrus.WithError(err).Warn("failed to append distribution source labels to pushed content")
+			log.G(ctx).WithError(err).Warn("failed to append distribution source labels to pushed content")
 		}
 	}
 
@@ -157,7 +157,7 @@ func findMissingMountable(ctx context.Context, store content.Store, queue *jobs,
 		if !errdefs.IsNotFound(err) {
 			return nil, err
 		}
-		logrus.WithField("target", target).Debug("distribution source label not found")
+		log.G(ctx).WithField("target", target).Debug("distribution source label not found")
 		return mountableBlobs, nil
 	}
 

+ 2 - 1
daemon/containerd/image_tag.go

@@ -5,6 +5,7 @@ import (
 
 	cerrdefs "github.com/containerd/containerd/errdefs"
 	containerdimages "github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
@@ -54,7 +55,7 @@ func (i *ImageService) TagImage(ctx context.Context, imageID image.ID, newTag re
 		}
 	}
 
-	logger := logrus.WithFields(logrus.Fields{
+	logger := log.G(ctx).WithFields(logrus.Fields{
 		"imageID": imageID.String(),
 		"tag":     newTag.String(),
 	})

+ 3 - 3
daemon/containerd/mount.go

@@ -4,8 +4,8 @@ import (
 	"context"
 	"fmt"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/container"
-	"github.com/sirupsen/logrus"
 )
 
 // Mount mounts the container filesystem in a temporary location, use defer imageService.Unmount
@@ -22,7 +22,7 @@ func (i *ImageService) Mount(ctx context.Context, container *container.Container
 		return fmt.Errorf("failed to mount %s: %w", root, err)
 	}
 
-	logrus.WithField("container", container.ID).Debugf("container mounted via snapshotter: %v", root)
+	log.G(ctx).WithField("container", container.ID).Debugf("container mounted via snapshotter: %v", root)
 
 	container.BaseFS = root
 	return nil
@@ -33,7 +33,7 @@ func (i *ImageService) Unmount(ctx context.Context, container *container.Contain
 	root := container.BaseFS
 
 	if err := i.refCountMounter.Unmount(root); err != nil {
-		logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container")
+		log.G(ctx).WithField("container", container.ID).WithError(err).Error("error unmounting container")
 		return fmt.Errorf("failed to unmount %s: %w", root, err)
 	}
 

+ 3 - 3
daemon/containerd/progress.go

@@ -8,13 +8,13 @@ import (
 
 	"github.com/containerd/containerd/content"
 	cerrdefs "github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/sirupsen/logrus"
 )
 
 type progressUpdater interface {
@@ -48,7 +48,7 @@ func (j *jobs) showProgress(ctx context.Context, out progress.Output, updater pr
 			case <-ticker.C:
 				if err := updater.UpdateProgress(ctx, j, out, start); err != nil {
 					if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
-						logrus.WithError(err).Error("Updating progress failed")
+						log.G(ctx).WithError(err).Error("Updating progress failed")
 					}
 				}
 			case <-ctx.Done():
@@ -114,7 +114,7 @@ func (p pullProgress) UpdateProgress(ctx context.Context, ongoing *jobs, out pro
 		if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
 			return err
 		}
-		logrus.WithError(err).Error("status check failed")
+		log.G(ctx).WithError(err).Error("status check failed")
 		return nil
 	}
 	pulling := make(map[string]content.Status, len(actives))

+ 2 - 1
daemon/containerd/resolver.go

@@ -6,6 +6,7 @@ import (
 	"errors"
 	"net/http"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/version"
@@ -63,7 +64,7 @@ func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docke
 
 	return docker.WithAuthCreds(func(host string) (string, string, error) {
 		if cfgHost != host {
-			logrus.WithFields(logrus.Fields{
+			log.G(context.TODO()).WithFields(logrus.Fields{
 				"host":    host,
 				"cfgHost": cfgHost,
 			}).Warn("Host doesn't match")

+ 2 - 1
daemon/containerd/service.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/containerd/containerd"
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/plugin"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/snapshots"
@@ -177,7 +178,7 @@ func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID st
 		mfst, err := i.GetImageManifest(ctx, ctr.Config.Image, imagetypes.GetImageOpts{})
 		if err != nil {
 			// Log error, don't error out whole operation.
-			logrus.WithFields(logrus.Fields{
+			log.G(ctx).WithFields(logrus.Fields{
 				logrus.ErrorKey: err,
 				"container":     containerID,
 			}).Warn("empty ImageManifest, can't calculate base image size")

+ 2 - 2
daemon/containerfs_linux.go

@@ -9,10 +9,10 @@ import (
 	"runtime"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/hashicorp/go-multierror"
 	"github.com/moby/sys/mount"
 	"github.com/moby/sys/symlink"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 
 	"github.com/docker/docker/api/types"
@@ -136,7 +136,7 @@ func (daemon *Daemon) openContainerFS(container *container.Container) (_ *contai
 						if m.ReadOnlyForceRecursive {
 							return err
 						} else {
-							logrus.WithError(err).Debugf("Failed to make %q recursively read-only", dest)
+							log.G(context.TODO()).WithError(err).Debugf("Failed to make %q recursively read-only", dest)
 						}
 					}
 				}

+ 3 - 3
daemon/create.go

@@ -8,6 +8,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/platforms"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
@@ -23,7 +24,6 @@ import (
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/opencontainers/selinux/go-selinux"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	archvariant "github.com/tonistiigi/go-archvariant"
 )
 
@@ -135,7 +135,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
 		if daemon.UsesSnapshotter() {
 			imgManifest, err = daemon.imageService.GetImageManifest(ctx, opts.params.Config.Image, imagetypes.GetImageOpts{Platform: opts.params.Platform})
 			if err != nil {
-				logrus.WithError(err).Error("failed to find image manifest")
+				log.G(ctx).WithError(err).Error("failed to find image manifest")
 				return nil, err
 			}
 		}
@@ -171,7 +171,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
 				RemoveVolume: true,
 			})
 			if err != nil {
-				logrus.WithError(err).Error("failed to cleanup container on create error")
+				log.G(ctx).WithError(err).Error("failed to cleanup container on create error")
 			}
 		}
 	}()

+ 3 - 3
daemon/create_unix.go

@@ -8,13 +8,13 @@ import (
 	"os"
 	"path/filepath"
 
+	"github.com/containerd/containerd/log"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/oci"
 	volumeopts "github.com/docker/docker/volume/service/opts"
 	"github.com/opencontainers/selinux/go-selinux/label"
-	"github.com/sirupsen/logrus"
 )
 
 // createContainerOSSpecificSettings performs host-OS specific container create functionality
@@ -45,7 +45,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
 		// Skip volumes for which we already have something mounted on that
 		// destination because of a --volume-from.
 		if container.HasMountFor(destination) {
-			logrus.WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
+			log.G(context.TODO()).WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
 			// Not an error, this could easily have come from the image config.
 			continue
 		}
@@ -85,7 +85,7 @@ func (daemon *Daemon) populateVolumes(c *container.Container) error {
 			continue
 		}
 
-		logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name)
+		log.G(context.TODO()).Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name)
 		if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil {
 			return err
 		}

+ 39 - 37
daemon/daemon.go

@@ -19,8 +19,11 @@ import (
 	"sync/atomic"
 	"time"
 
+	"github.com/sirupsen/logrus"
+
 	"github.com/containerd/containerd"
 	"github.com/containerd/containerd/defaults"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/dialer"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/containerd/containerd/remotes/docker"
@@ -69,7 +72,6 @@ import (
 	resolverconfig "github.com/moby/buildkit/util/resolver/config"
 	"github.com/moby/locker"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"go.etcd.io/bbolt"
 	"golang.org/x/sync/semaphore"
 	"google.golang.org/grpc"
@@ -258,7 +260,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 	var mapLock sync.Mutex
 	containers := make(map[string]*container.Container)
 
-	logrus.Info("Loading containers: start.")
+	log.G(context.TODO()).Info("Loading containers: start.")
 
 	dir, err := os.ReadDir(daemon.repository)
 	if err != nil {
@@ -283,7 +285,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 			defer sem.Release(1)
 
-			log := logrus.WithField("container", id)
+			log := log.G(context.TODO()).WithField("container", id)
 
 			c, err := daemon.load(id)
 			if err != nil {
@@ -326,7 +328,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 			defer sem.Release(1)
 
-			log := logrus.WithField("container", c.ID)
+			log := log.G(context.TODO()).WithField("container", c.ID)
 
 			if err := daemon.registerName(c); err != nil {
 				log.WithError(err).Errorf("failed to register container name: %s", c.Name)
@@ -353,7 +355,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 			defer sem.Release(1)
 
-			log := logrus.WithField("container", c.ID)
+			log := log.G(context.TODO()).WithField("container", c.ID)
 
 			if err := daemon.checkpointAndSave(c); err != nil {
 				log.WithError(err).Error("error saving backported mountspec to disk")
@@ -543,7 +545,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 
 			if err := daemon.registerLinks(c, c.HostConfig); err != nil {
-				logrus.WithField("container", c.ID).WithError(err).Error("failed to register link for container")
+				log.G(context.TODO()).WithField("container", c.ID).WithError(err).Error("failed to register link for container")
 			}
 
 			sem.Release(1)
@@ -557,7 +559,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 		go func(c *container.Container, chNotify chan struct{}) {
 			_ = sem.Acquire(context.Background(), 1)
 
-			log := logrus.WithField("container", c.ID)
+			log := log.G(context.TODO()).WithField("container", c.ID)
 
 			log.Debug("starting container")
 
@@ -596,7 +598,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 
 			if err := daemon.containerRm(&cfg.Config, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
-				logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
+				log.G(context.TODO()).WithField("container", cid).WithError(err).Error("failed to remove container")
 			}
 
 			sem.Release(1)
@@ -627,7 +629,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 			_ = sem.Acquire(context.Background(), 1)
 
 			if err := daemon.prepareMountPoints(c); err != nil {
-				logrus.WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container")
+				log.G(context.TODO()).WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container")
 			}
 
 			sem.Release(1)
@@ -636,7 +638,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 	}
 	group.Wait()
 
-	logrus.Info("Loading containers: done.")
+	log.G(context.TODO()).Info("Loading containers: done.")
 
 	return nil
 }
@@ -673,7 +675,7 @@ func (daemon *Daemon) restartSwarmContainers(ctx context.Context, cfg *configSto
 					}
 
 					if err := daemon.containerStart(ctx, cfg, c, "", "", true); err != nil {
-						logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
+						log.G(ctx).WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
 					}
 
 					sem.Release(1)
@@ -699,7 +701,7 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str
 	fullName := path.Join(parent.Name, alias)
 	if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil {
 		if errors.Is(err, container.ErrNameReserved) {
-			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
+			log.G(context.TODO()).Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
 			return nil
 		}
 		return err
@@ -737,10 +739,10 @@ func (daemon *Daemon) DaemonLeavesCluster() {
 		select {
 		case <-done:
 		case <-timeout.C:
-			logrus.Warn("timeout while waiting for ingress network removal")
+			log.G(context.TODO()).Warn("timeout while waiting for ingress network removal")
 		}
 	} else {
-		logrus.Warnf("failed to initiate ingress network removal: %v", err)
+		log.G(context.TODO()).Warnf("failed to initiate ingress network removal: %v", err)
 	}
 
 	daemon.attachmentStore.ClearAttachments()
@@ -775,7 +777,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 	// Ensure that we have a correct root key limit for launching containers.
 	if err := modifyRootKeyLimit(); err != nil {
-		logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
+		log.G(ctx).Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
 	}
 
 	// Ensure we have compatible and valid configuration options
@@ -795,7 +797,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	}
 	rootIDs := idMapping.RootPair()
 	if err := setMayDetachMounts(); err != nil {
-		logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
+		log.G(ctx).WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
 	}
 
 	// set up the tmpDir to use a canonical path
@@ -848,7 +850,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		if err != nil {
 			// Use a fresh context here. Passed context could be cancelled.
 			if err := d.Shutdown(context.Background()); err != nil {
-				logrus.Error(err)
+				log.G(ctx).Error(err)
 			}
 		}
 	}()
@@ -874,12 +876,12 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	}
 
 	if err := configureMaxThreads(&configStore.Config); err != nil {
-		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
+		log.G(ctx).Warnf("Failed to configure golang's threads limit: %v", err)
 	}
 
 	// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
 	if err := ensureDefaultAppArmorProfile(); err != nil {
-		logrus.Errorf(err.Error())
+		log.G(ctx).Errorf(err.Error())
 	}
 
 	daemonRepo := filepath.Join(configStore.Root, "containers")
@@ -990,7 +992,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to set log opts")
 	}
-	logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
+	log.G(ctx).Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
 
 	d.volumes, err = volumesservice.NewVolumeService(configStore.Root, d.PluginStore, rootIDs, d)
 	if err != nil {
@@ -1035,16 +1037,16 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	if isWindows {
 		driverName = "windowsfilter"
 	} else if driverName != "" {
-		logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
+		log.G(ctx).Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
 	} else {
 		driverName = configStore.GraphDriver
 	}
 
 	if d.UsesSnapshotter() {
 		if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
-			logrus.Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.")
+			log.G(ctx).Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.")
 		}
-		logrus.Info("Starting daemon with containerd snapshotter integration enabled")
+		log.G(ctx).Info("Starting daemon with containerd snapshotter integration enabled")
 
 		// FIXME(thaJeztah): implement automatic snapshotter-selection similar to graph-driver selection; see https://github.com/moby/moby/issues/44076
 		if driverName == "" {
@@ -1152,9 +1154,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		// if migration is called from daemon/images. layerStore might move as well.
 		d.imageService = images.NewImageService(imgSvcConfig)
 
-		logrus.Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads)
-		logrus.Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads)
-		logrus.Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts)
+		log.G(ctx).Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads)
+		log.G(ctx).Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads)
+		log.G(ctx).Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts)
 	}
 
 	go d.execCommandGC()
@@ -1170,7 +1172,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 	info := d.SystemInfo()
 	for _, w := range info.Warnings {
-		logrus.Warn(w)
+		log.G(ctx).Warn(w)
 	}
 
 	engineInfo.WithValues(
@@ -1187,7 +1189,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	engineCpus.Set(float64(info.NCPU))
 	engineMemory.Set(float64(info.MemTotal))
 
-	logrus.WithFields(logrus.Fields{
+	log.G(ctx).WithFields(logrus.Fields{
 		"version":     dockerversion.Version,
 		"commit":      dockerversion.GitCommit,
 		"graphdriver": d.ImageService().StorageDriver(),
@@ -1267,13 +1269,13 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 	}
 
 	if daemon.containers != nil {
-		logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
-		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
+		log.G(ctx).Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
+		log.G(ctx).Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
 		daemon.containers.ApplyAll(func(c *container.Container) {
 			if !c.IsRunning() {
 				return
 			}
-			log := logrus.WithField("container", c.ID)
+			log := log.G(ctx).WithField("container", c.ID)
 			log.Debug("shutting down container")
 			if err := daemon.shutdownContainer(c); err != nil {
 				log.WithError(err).Error("failed to shut down container")
@@ -1288,19 +1290,19 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 
 	if daemon.volumes != nil {
 		if err := daemon.volumes.Shutdown(); err != nil {
-			logrus.Errorf("Error shutting down volume store: %v", err)
+			log.G(ctx).Errorf("Error shutting down volume store: %v", err)
 		}
 	}
 
 	if daemon.imageService != nil {
 		if err := daemon.imageService.Cleanup(); err != nil {
-			logrus.Error(err)
+			log.G(ctx).Error(err)
 		}
 	}
 
 	// If we are part of a cluster, clean up cluster's stuff
 	if daemon.clusterProvider != nil {
-		logrus.Debugf("start clean shutdown of cluster resources...")
+		log.G(ctx).Debugf("start clean shutdown of cluster resources...")
 		daemon.DaemonLeavesCluster()
 	}
 
@@ -1370,13 +1372,13 @@ func prepareTempDir(rootDir string) (string, error) {
 		if err := os.Rename(tmpDir, newName); err == nil {
 			go func() {
 				if err := os.RemoveAll(newName); err != nil {
-					logrus.Warnf("failed to delete old tmp directory: %s", newName)
+					log.G(context.TODO()).Warnf("failed to delete old tmp directory: %s", newName)
 				}
 			}()
 		} else if !os.IsNotExist(err) {
-			logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
+			log.G(context.TODO()).Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
 			if err := os.RemoveAll(tmpDir); err != nil {
-				logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
+				log.G(context.TODO()).Warnf("failed to delete old tmp directory: %s", tmpDir)
 			}
 		}
 	}

+ 7 - 6
daemon/daemon_linux.go

@@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon"
 
 import (
 	"bufio"
+	"context"
 	"fmt"
 	"io"
 	"net"
@@ -10,13 +11,13 @@ import (
 	"strings"
 	"sync"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libnetwork/ns"
 	"github.com/docker/docker/libnetwork/resolvconf"
 	"github.com/moby/sys/mount"
 	"github.com/moby/sys/mountinfo"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"github.com/vishvananda/netlink"
 	"golang.org/x/sys/unix"
 )
@@ -30,7 +31,7 @@ func getPluginExecRoot(_ *config.Config) string {
 }
 
 func (daemon *Daemon) cleanupMountsByID(id string) error {
-	logrus.Debugf("Cleaning up old mountid %s: start.", id)
+	log.G(context.TODO()).Debugf("Cleaning up old mountid %s: start.", id)
 	f, err := os.Open("/proc/self/mountinfo")
 	if err != nil {
 		return err
@@ -54,7 +55,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
 				for _, p := range regexps {
 					if p.MatchString(mnt) {
 						if err := unmount(mnt); err != nil {
-							logrus.Error(err)
+							log.G(context.TODO()).Error(err)
 							errs = append(errs, err.Error())
 						}
 					}
@@ -71,7 +72,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
 		return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errs, "\n"))
 	}
 
-	logrus.Debugf("Cleaning up old mountid %v: done.", id)
+	log.G(context.TODO()).Debugf("Cleaning up old mountid %v: done.", id)
 	return nil
 }
 
@@ -105,7 +106,7 @@ func (daemon *Daemon) cleanupMounts(cfg *config.Config) error {
 		return nil
 	}
 
-	logrus.WithField("mountpoint", daemon.root).Debug("unmounting daemon root")
+	log.G(context.TODO()).WithField("mountpoint", daemon.root).Debug("unmounting daemon root")
 	if err := mount.Unmount(daemon.root); err != nil {
 		return err
 	}
@@ -214,7 +215,7 @@ func kernelSupportsRecursivelyReadOnly() error {
 				}
 			}
 			if umErr != nil {
-				logrus.WithError(umErr).Warnf("Failed to unmount %q", tmpMnt)
+				log.G(context.TODO()).WithError(umErr).Warnf("Failed to unmount %q", tmpMnt)
 			}
 		}()
 		attr := &unix.MountAttr{

+ 18 - 18
daemon/daemon_unix.go

@@ -18,6 +18,7 @@ import (
 	"time"
 
 	"github.com/containerd/cgroups/v3"
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/docker/docker/api/types/blkiodev"
 	pblkiodev "github.com/docker/docker/api/types/blkiodev"
@@ -45,7 +46,6 @@ import (
 	"github.com/opencontainers/selinux/go-selinux"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"github.com/vishvananda/netlink"
 	"golang.org/x/sys/unix"
 )
@@ -222,7 +222,7 @@ func parseSecurityOpt(securityOptions *container.SecurityOptions, config *contai
 			k, v, ok = strings.Cut(opt, "=")
 		} else if strings.Contains(opt, ":") {
 			k, v, ok = strings.Cut(opt, ":")
-			logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
+			log.G(context.TODO()).Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
 		}
 		if !ok {
 			return fmt.Errorf("invalid --security-opt 1: %q", opt)
@@ -284,7 +284,7 @@ func adjustParallelLimit(n int, limit int) int {
 	// ulimits to the largest possible value for dockerd).
 	var rlim unix.Rlimit
 	if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil {
-		logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
+		log.G(context.TODO()).Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
 		return limit
 	}
 	softRlimit := int(rlim.Cur)
@@ -299,7 +299,7 @@ func adjustParallelLimit(n int, limit int) int {
 		return limit
 	}
 
-	logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
+	log.G(context.TODO()).Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
 	return softRlimit / overhead
 }
 
@@ -309,10 +309,10 @@ func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfi
 	if adjustCPUShares && hostConfig.CPUShares > 0 {
 		// Handle unsupported CPUShares
 		if hostConfig.CPUShares < linuxMinCPUShares {
-			logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
+			log.G(context.TODO()).Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
 			hostConfig.CPUShares = linuxMinCPUShares
 		} else if hostConfig.CPUShares > linuxMaxCPUShares {
-			logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
+			log.G(context.TODO()).Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
 			hostConfig.CPUShares = linuxMaxCPUShares
 		}
 	}
@@ -781,7 +781,7 @@ func configureMaxThreads(config *config.Config) error {
 	}
 	maxThreads := (mtint / 100) * 90
 	debug.SetMaxThreads(maxThreads)
-	logrus.Debugf("Golang's threads limit set to %d", maxThreads)
+	log.G(context.TODO()).Debugf("Golang's threads limit set to %d", maxThreads)
 	return nil
 }
 
@@ -809,7 +809,7 @@ func overlaySupportsSelinux() (bool, error) {
 func configureKernelSecuritySupport(config *config.Config, driverName string) error {
 	if config.EnableSelinuxSupport {
 		if !selinux.GetEnabled() {
-			logrus.Warn("Docker could not enable SELinux on the host system")
+			log.G(context.TODO()).Warn("Docker could not enable SELinux on the host system")
 			return nil
 		}
 
@@ -822,7 +822,7 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er
 			}
 
 			if !supported {
-				logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
+				log.G(context.TODO()).Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
 			}
 		}
 	} else {
@@ -846,7 +846,7 @@ func (daemon *Daemon) initNetworkController(cfg *config.Config, activeSandboxes
 	}
 
 	if len(activeSandboxes) > 0 {
-		logrus.Info("there are running containers, updated network configuration will not take affect")
+		log.G(context.TODO()).Info("there are running containers, updated network configuration will not take affect")
 	} else if err := configureNetworking(daemon.netController, cfg); err != nil {
 		return err
 	}
@@ -983,7 +983,7 @@ func initBridgeDriver(controller *libnetwork.Controller, config *config.Config)
 		ipamV4Conf.PreferredPool = ipNet.String()
 		ipamV4Conf.Gateway = ip.String()
 	} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
-		logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
+		log.G(context.TODO()).Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
 	}
 
 	if config.BridgeConfig.FixedCIDR != "" {
@@ -1068,7 +1068,7 @@ func initBridgeDriver(controller *libnetwork.Controller, config *config.Config)
 func removeDefaultBridgeInterface() {
 	if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
 		if err := netlink.LinkDel(lnk); err != nil {
-			logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
+			log.G(context.TODO()).Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
 		}
 	}
 }
@@ -1187,10 +1187,10 @@ func setupRemappedRoot(config *config.Config) (idtools.IdentityMapping, error) {
 		if username == "root" {
 			// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
 			// effectively
-			logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
+			log.G(context.TODO()).Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
 			return idtools.IdentityMapping{}, nil
 		}
-		logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username)
+		log.G(context.TODO()).Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username)
 		// update remapped root setting now that we have resolved them to actual names
 		config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
 
@@ -1235,7 +1235,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools
 	// `chdir()` to work for containers namespaced to that uid/gid)
 	if config.RemappedRoot != "" {
 		config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID))
-		logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
+		log.G(context.TODO()).Debugf("Creating user namespaced daemon root: %s", config.Root)
 		// Create the root directory if it doesn't exist
 		if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil {
 			return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
@@ -1257,7 +1257,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools
 	}
 
 	if err := setupDaemonRootPropagation(config); err != nil {
-		logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
+		log.G(context.TODO()).WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
 	}
 	return nil
 }
@@ -1303,7 +1303,7 @@ func setupDaemonRootPropagation(cfg *config.Config) error {
 			return
 		}
 		if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
-			logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
+			log.G(context.TODO()).WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
 		}
 	}()
 
@@ -1427,7 +1427,7 @@ func setMayDetachMounts() error {
 		// unprivileged container. Ignore the error, but log
 		// it if we appear not to be in that situation.
 		if !userns.RunningInUserNS() {
-			logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
+			log.G(context.TODO()).Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
 		}
 		return nil
 	}

+ 6 - 6
daemon/daemon_windows.go

@@ -28,7 +28,7 @@ import (
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/runconfig"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 	"golang.org/x/sys/windows"
 	"golang.org/x/sys/windows/svc/mgr"
 )
@@ -274,7 +274,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand
 
 				err = v.Delete()
 				if err != nil {
-					logrus.Errorf("Error occurred when removing network %v", err)
+					log.G(context.TODO()).Errorf("Error occurred when removing network %v", err)
 				}
 
 				_, err := daemon.netController.NewNetwork("nat", name, id,
@@ -284,7 +284,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand
 					libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
 				)
 				if err != nil {
-					logrus.Errorf("Error occurred when creating network %v", err)
+					log.G(context.TODO()).Errorf("Error occurred when creating network %v", err)
 				}
 				continue
 			}
@@ -293,7 +293,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand
 			if v.Info().Scope() != datastore.GlobalScope {
 				err = v.Delete()
 				if err != nil {
-					logrus.Errorf("Error occurred when removing network %v", err)
+					log.G(context.TODO()).Errorf("Error occurred when removing network %v", err)
 				}
 			}
 		}
@@ -392,7 +392,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand
 		)
 
 		if err != nil {
-			logrus.Errorf("Error occurred when creating network %v", err)
+			log.G(context.TODO()).Errorf("Error occurred when creating network %v", err)
 		}
 	}
 
@@ -544,7 +544,7 @@ func (daemon *Daemon) setDefaultIsolation(config *config.Config) error {
 		}
 	}
 
-	logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation)
+	log.G(context.TODO()).Infof("Windows default isolation mode: %s", daemon.defaultIsolation)
 	return nil
 }
 

+ 4 - 3
daemon/debugtrap_unix.go

@@ -3,11 +3,12 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"os"
 	"os/signal"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/stack"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -18,9 +19,9 @@ func (daemon *Daemon) setupDumpStackTrap(root string) {
 		for range c {
 			path, err := stack.DumpToFile(root)
 			if err != nil {
-				logrus.WithError(err).Error("failed to write goroutines dump")
+				log.G(context.TODO()).WithError(err).Error("failed to write goroutines dump")
 			} else {
-				logrus.Infof("goroutine stacks written to %s", path)
+				log.G(context.TODO()).Infof("goroutine stacks written to %s", path)
 			}
 		}
 	}()

+ 7 - 6
daemon/debugtrap_windows.go

@@ -1,12 +1,13 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"unsafe"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/stack"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/windows"
 )
 
@@ -18,7 +19,7 @@ func (daemon *Daemon) setupDumpStackTrap(root string) {
 	ev, _ := windows.UTF16PtrFromString(event)
 	sd, err := windows.SecurityDescriptorFromString("D:P(A;;GA;;;BA)(A;;GA;;;SY)")
 	if err != nil {
-		logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error())
+		log.G(context.TODO()).Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error())
 		return
 	}
 	var sa windows.SecurityAttributes
@@ -27,18 +28,18 @@ func (daemon *Daemon) setupDumpStackTrap(root string) {
 	sa.SecurityDescriptor = sd
 	h, err := windows.CreateEvent(&sa, 0, 0, ev)
 	if h == 0 || err != nil {
-		logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error())
+		log.G(context.TODO()).Errorf("failed to create debug stackdump event %s: %s", event, err.Error())
 		return
 	}
 	go func() {
-		logrus.Debugf("Stackdump - waiting signal at %s", event)
+		log.G(context.TODO()).Debugf("Stackdump - waiting signal at %s", event)
 		for {
 			windows.WaitForSingleObject(h, windows.INFINITE)
 			path, err := stack.DumpToFile(root)
 			if err != nil {
-				logrus.WithError(err).Error("failed to write goroutines dump")
+				log.G(context.TODO()).WithError(err).Error("failed to write goroutines dump")
 			} else {
-				logrus.Infof("goroutine stacks written to %s", path)
+				log.G(context.TODO()).Infof("goroutine stacks written to %s", path)
 			}
 		}
 	}()

+ 4 - 4
daemon/delete.go

@@ -9,6 +9,7 @@ import (
 	"time"
 
 	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
@@ -17,7 +18,6 @@ import (
 	"github.com/docker/docker/pkg/containerfs"
 	"github.com/opencontainers/selinux/go-selinux"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // ContainerRm removes the container id from the filesystem. An error
@@ -77,7 +77,7 @@ func (daemon *Daemon) rmLink(cfg *config.Config, container *container.Container,
 	if parentContainer != nil {
 		daemon.linkIndex.unlink(name, container, parentContainer)
 		if err := daemon.updateNetwork(cfg, parentContainer); err != nil {
-			logrus.Debugf("Could not update network to remove link %s: %v", n, err)
+			log.G(context.TODO()).Debugf("Could not update network to remove link %s: %v", n, err)
 		}
 	}
 	return nil
@@ -129,7 +129,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty
 	// container meta file got removed from disk, then a restart of
 	// docker should not make a dead container alive.
 	if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
-		logrus.Errorf("Error saving dying container to disk: %v", err)
+		log.G(context.TODO()).Errorf("Error saving dying container to disk: %v", err)
 	}
 	container.Unlock()
 
@@ -173,7 +173,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty
 	daemon.containers.Delete(container.ID)
 	daemon.containersReplica.Delete(container)
 	if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil {
-		logrus.Error(err)
+		log.G(context.TODO()).Error(err)
 	}
 	for _, name := range linkNames {
 		daemon.releaseName(name)

+ 6 - 6
daemon/events.go

@@ -6,6 +6,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types/events"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/container"
@@ -13,7 +14,6 @@ import (
 	"github.com/docker/docker/libnetwork"
 	gogotypes "github.com/gogo/protobuf/types"
 	swarmapi "github.com/moby/swarmkit/v2/api"
-	"github.com/sirupsen/logrus"
 )
 
 var (
@@ -127,7 +127,7 @@ func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStre
 			return
 		case message, ok := <-watchStream:
 			if !ok {
-				logrus.Debug("cluster event channel has stopped")
+				log.G(ctx).Debug("cluster event channel has stopped")
 				return
 			}
 			daemon.generateClusterEvent(message)
@@ -138,7 +138,7 @@ func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStre
 func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) {
 	for _, event := range msg.Events {
 		if event.Object == nil {
-			logrus.Errorf("event without object: %v", event)
+			log.G(context.TODO()).Errorf("event without object: %v", event)
 			continue
 		}
 		switch v := event.Object.GetObject().(type) {
@@ -153,7 +153,7 @@ func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) {
 		case *swarmapi.Object_Config:
 			daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig())
 		default:
-			logrus.Warnf("unrecognized event: %v", event)
+			log.G(context.TODO()).Warnf("unrecognized event: %v", event)
 		}
 	}
 }
@@ -245,7 +245,7 @@ func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *
 				}
 			} else {
 				// This should not happen.
-				logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime())
+				log.G(context.TODO()).Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime())
 			}
 		}
 		// check replicated count change
@@ -259,7 +259,7 @@ func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *
 				}
 			} else {
 				// This should not happen.
-				logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode())
+				log.G(context.TODO()).Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode())
 			}
 		}
 		if service.UpdateStatus != nil {

+ 6 - 6
daemon/exec.go

@@ -10,6 +10,7 @@ import (
 	"time"
 
 	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
@@ -21,7 +22,6 @@ import (
 	"github.com/moby/term"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 func (daemon *Daemon) registerExecCommand(container *container.Container, config *container.ExecConfig) {
@@ -174,7 +174,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
 	ec.Running = true
 	ec.Unlock()
 
-	logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID)
+	log.G(ctx).Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID)
 	attributes := map[string]string{
 		"execID": ec.ID,
 	}
@@ -188,7 +188,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
 			exitCode := 126
 			ec.ExitCode = &exitCode
 			if err := ec.CloseStreams(); err != nil {
-				logrus.Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err)
+				log.G(ctx).Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err)
 			}
 			ec.Unlock()
 		}
@@ -198,7 +198,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
 		r, w := io.Pipe()
 		go func() {
 			defer w.Close()
-			defer logrus.Debug("Closing buffered stdin pipe")
+			defer log.G(ctx).Debug("Closing buffered stdin pipe")
 			pools.Copy(w, options.Stdin)
 		}()
 		cStdin = r
@@ -295,7 +295,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
 
 	select {
 	case <-ctx.Done():
-		log := logrus.
+		log := log.G(ctx).
 			WithField("container", ec.Container.ID).
 			WithField("exec", ec.ID)
 		log.Debug("Sending KILL signal to container process")
@@ -339,7 +339,7 @@ func (daemon *Daemon) execCommandGC() {
 			}
 		}
 		if cleaned > 0 {
-			logrus.Debugf("clean %d unused exec commands", cleaned)
+			log.G(context.TODO()).Debugf("clean %d unused exec commands", cleaned)
 		}
 	}
 }

+ 4 - 3
daemon/graphdriver/btrfs/btrfs.go

@@ -23,6 +23,7 @@ static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btr
 import "C"
 
 import (
+	"context"
 	"fmt"
 	"math"
 	"os"
@@ -33,6 +34,7 @@ import (
 	"sync"
 	"unsafe"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/containerfs"
@@ -42,7 +44,6 @@ import (
 	"github.com/moby/sys/mount"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -305,10 +306,10 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
 			_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
 				uintptr(unsafe.Pointer(&args)))
 			if errno != 0 {
-				logrus.WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
+				log.G(context.TODO()).WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
 			}
 		} else {
-			logrus.WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
+			log.G(context.TODO()).WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
 		}
 	}
 

+ 10 - 8
daemon/graphdriver/driver.go

@@ -1,17 +1,18 @@
 package graphdriver // import "github.com/docker/docker/daemon/graphdriver"
 
 import (
+	"context"
 	"fmt"
 	"io"
 	"os"
 	"path/filepath"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"github.com/vbatts/tar-split/tar/storage"
 )
 
@@ -168,7 +169,7 @@ func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Drive
 	if err == nil {
 		return pluginDriver, nil
 	}
-	logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph")
+	log.G(context.TODO()).WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph")
 	return nil, ErrNotSupported
 }
 
@@ -177,7 +178,7 @@ func getBuiltinDriver(name, home string, options []string, idMap idtools.Identit
 	if initFunc, exists := drivers[name]; exists {
 		return initFunc(filepath.Join(home, name), options, idMap)
 	}
-	logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
+	log.G(context.TODO()).Errorf("Failed to built-in GetDriver graph %s %s", name, home)
 	return nil, ErrNotSupported
 }
 
@@ -191,8 +192,9 @@ type Options struct {
 
 // New creates the driver and initializes it at the specified root.
 func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
+	ctx := context.TODO()
 	if name != "" {
-		logrus.Infof("[graphdriver] trying configured driver: %s", name)
+		log.G(ctx).Infof("[graphdriver] trying configured driver: %s", name)
 		if err := checkRemoved(name); err != nil {
 			return nil, err
 		}
@@ -202,7 +204,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
 	// Guess for prior driver
 	driversMap := scanPriorDrivers(config.Root)
 	priorityList := strings.Split(priority, ",")
-	logrus.Debugf("[graphdriver] priority list: %v", priorityList)
+	log.G(ctx).Debugf("[graphdriver] priority list: %v", priorityList)
 	for _, name := range priorityList {
 		if _, prior := driversMap[name]; prior {
 			// of the state found from prior drivers, check in order of our priority
@@ -213,7 +215,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
 				// state, and now it is no longer supported/prereq/compatible, so
 				// something changed and needs attention. Otherwise the daemon's
 				// images would just "disappear".
-				logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
+				log.G(ctx).Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
 				return nil, err
 			}
 
@@ -226,11 +228,11 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
 				}
 
 				err = errors.Errorf("%s contains several valid graphdrivers: %s; cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
-				logrus.Errorf("[graphdriver] %v", err)
+				log.G(ctx).Errorf("[graphdriver] %v", err)
 				return nil, err
 			}
 
-			logrus.Infof("[graphdriver] using prior storage driver: %s", name)
+			log.G(ctx).Infof("[graphdriver] using prior storage driver: %s", name)
 			return driver, nil
 		}
 	}

+ 4 - 3
daemon/graphdriver/fsdiff.go

@@ -1,14 +1,15 @@
 package graphdriver // import "github.com/docker/docker/daemon/graphdriver"
 
 import (
+	"context"
 	"io"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/sirupsen/logrus"
 )
 
 var (
@@ -143,11 +144,11 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
 	layerFs := layerRootFs
 	options := &archive.TarOptions{IDMap: gdw.IDMap, BestEffortXattrs: gdw.BestEffortXattrs}
 	start := time.Now().UTC()
-	logrus.WithField("id", id).Debug("Start untar layer")
+	log.G(context.TODO()).WithField("id", id).Debug("Start untar layer")
 	if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
 		return
 	}
-	logrus.WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
+	log.G(context.TODO()).WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
 
 	return
 }

+ 4 - 4
daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go

@@ -13,6 +13,7 @@ import (
 	"path/filepath"
 	"strings"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/overlayutils"
@@ -26,7 +27,6 @@ import (
 	"github.com/moby/sys/mount"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -66,7 +66,7 @@ type Driver struct {
 }
 
 var (
-	logger = logrus.WithField("storage-driver", driverName)
+	logger = log.G(context.TODO()).WithField("storage-driver", driverName)
 )
 
 func init() {
@@ -502,7 +502,7 @@ func fusermountU(mountpoint string) (unmounted bool) {
 	for _, v := range []string{"fusermount3", "fusermount"} {
 		err := exec.Command(v, "-u", mountpoint).Run()
 		if err != nil && !os.IsNotExist(err) {
-			logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
+			log.G(context.TODO()).Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
 		}
 		if err == nil {
 			unmounted = true
@@ -515,7 +515,7 @@ func fusermountU(mountpoint string) (unmounted bool) {
 		fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
 		if err == nil {
 			if err := unix.Syncfs(fd); err != nil {
-				logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
+				log.G(context.TODO()).Debugf("Error Syncfs(%s) - %v", mountpoint, err)
 			}
 			unix.Close(fd)
 		}

+ 2 - 2
daemon/graphdriver/overlay2/overlay.go

@@ -14,6 +14,7 @@ import (
 	"strings"
 	"sync"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/continuity/fs"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/overlayutils"
@@ -28,7 +29,6 @@ import (
 	"github.com/moby/locker"
 	"github.com/moby/sys/mount"
 	"github.com/opencontainers/selinux/go-selinux/label"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -102,7 +102,7 @@ type Driver struct {
 }
 
 var (
-	logger                = logrus.WithField("storage-driver", "overlay2")
+	logger                = log.G(context.TODO()).WithField("storage-driver", "overlay2")
 	backingFs             = "<unknown>"
 	projectQuotaSupported = false
 

+ 4 - 3
daemon/graphdriver/overlayutils/overlayutils.go

@@ -4,15 +4,16 @@
 package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils"
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path"
 	"path/filepath"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -54,7 +55,7 @@ func SupportsOverlay(d string, checkMultipleLowers bool) error {
 	}
 	defer func() {
 		if err := os.RemoveAll(td); err != nil {
-			logrus.Warnf("Failed to remove check directory %v: %v", td, err)
+			log.G(context.TODO()).Warnf("Failed to remove check directory %v: %v", td, err)
 		}
 	}()
 
@@ -74,7 +75,7 @@ func SupportsOverlay(d string, checkMultipleLowers bool) error {
 		return errors.Wrap(err, "failed to mount overlay")
 	}
 	if err := unix.Unmount(mnt, 0); err != nil {
-		logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err)
+		log.G(context.TODO()).Warnf("Failed to unmount check directory %v: %v", mnt, err)
 	}
 	return nil
 }

+ 6 - 5
daemon/graphdriver/overlayutils/userxattr.go

@@ -21,14 +21,15 @@
 package overlayutils
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/docker/docker/pkg/parsers/kernel"
-	"github.com/sirupsen/logrus"
 )
 
 // NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option.
@@ -67,7 +68,7 @@ func NeedsUserXAttr(d string) (bool, error) {
 
 	tdRoot := filepath.Join(d, "userxattr-check")
 	if err := os.RemoveAll(tdRoot); err != nil {
-		logrus.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
+		log.G(context.TODO()).WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
 	}
 
 	if err := os.MkdirAll(tdRoot, 0700); err != nil {
@@ -76,7 +77,7 @@ func NeedsUserXAttr(d string) (bool, error) {
 
 	defer func() {
 		if err := os.RemoveAll(tdRoot); err != nil {
-			logrus.WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
+			log.G(context.TODO()).WithError(err).Warnf("Failed to remove check directory %v", tdRoot)
 		}
 	}()
 
@@ -106,11 +107,11 @@ func NeedsUserXAttr(d string) (bool, error) {
 	if err := m.Mount(dest); err != nil {
 		// Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch.
 		// Return false without error.
-		logrus.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr")
+		log.G(context.TODO()).WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr")
 		return false, nil
 	}
 	if err := mount.UnmountAll(dest, 0); err != nil {
-		logrus.WithError(err).Warnf("Failed to unmount check directory %v", dest)
+		log.G(context.TODO()).WithError(err).Warnf("Failed to unmount check directory %v", dest)
 	}
 	return true, nil
 }

+ 4 - 2
daemon/graphdriver/vfs/quota_linux.go

@@ -1,8 +1,10 @@
 package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs"
 
 import (
+	"context"
+
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/quota"
-	"github.com/sirupsen/logrus"
 )
 
 type driverQuota struct {
@@ -14,7 +16,7 @@ func setupDriverQuota(driver *Driver) {
 	if quotaCtl, err := quota.NewControl(driver.home); err == nil {
 		driver.quotaCtl = quotaCtl
 	} else if err != quota.ErrQuotaNotSupported {
-		logrus.Warnf("Unable to setup quota: %v\n", err)
+		log.G(context.TODO()).Warnf("Unable to setup quota: %v\n", err)
 	}
 }
 

+ 16 - 15
daemon/graphdriver/windows/windows.go

@@ -7,6 +7,7 @@ import (
 	"archive/tar"
 	"bufio"
 	"bytes"
+	"context"
 	"encoding/json"
 	"fmt"
 	"io"
@@ -24,6 +25,7 @@ import (
 	"github.com/Microsoft/go-winio/vhd"
 	"github.com/Microsoft/hcsshim"
 	"github.com/Microsoft/hcsshim/osversion"
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/idtools"
@@ -33,7 +35,6 @@ import (
 	"github.com/docker/docker/pkg/system"
 	units "github.com/docker/go-units"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/windows"
 )
 
@@ -65,7 +66,7 @@ func init() {
 	// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
 	// debugging issues in the re-exec codepath significantly easier.
 	if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
-		logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
+		log.G(context.TODO()).Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
 		noreexec = true
 	} else {
 		reexec.Register("docker-windows-write-layer", writeLayerReexec)
@@ -97,7 +98,7 @@ type Driver struct {
 
 // InitFilter returns a new Windows storage filter driver.
 func InitFilter(home string, options []string, _ idtools.IdentityMapping) (graphdriver.Driver, error) {
-	logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
+	log.G(context.TODO()).Debugf("WindowsGraphDriver InitFilter at %s", home)
 
 	fsType, err := winiofs.GetFileSystemType(home)
 	if err != nil {
@@ -242,14 +243,14 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
 
 	if _, err := os.Lstat(d.dir(parent)); err != nil {
 		if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
-			logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
+			log.G(context.TODO()).Warnf("Failed to DestroyLayer %s: %s", id, err2)
 		}
 		return errors.Wrapf(err, "cannot create layer with missing parent %s", parent)
 	}
 
 	if err := d.setLayerChain(id, layerChain); err != nil {
 		if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
-			logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
+			log.G(context.TODO()).Warnf("Failed to DestroyLayer %s: %s", id, err2)
 		}
 		return err
 	}
@@ -352,7 +353,7 @@ func (d *Driver) Remove(id string) error {
 		}
 	}
 	if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
-		logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
+		log.G(context.TODO()).Errorf("Failed to DestroyLayer %s: %s", id, err)
 	}
 
 	return nil
@@ -365,7 +366,7 @@ func (d *Driver) GetLayerPath(id string) (string, error) {
 
 // Get returns the rootfs path for the id. This will mount the dir at its given path.
 func (d *Driver) Get(id, mountLabel string) (string, error) {
-	logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
+	log.G(context.TODO()).Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
 	var dir string
 
 	rID, err := d.resolveID(id)
@@ -390,7 +391,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 	if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
 		d.ctr.Decrement(rID)
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
-			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
+			log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", id, err)
 		}
 		return "", err
 	}
@@ -399,10 +400,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 	if err != nil {
 		d.ctr.Decrement(rID)
 		if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
-			logrus.Warnf("Failed to Unprepare %s: %s", id, err)
+			log.G(context.TODO()).Warnf("Failed to Unprepare %s: %s", id, err)
 		}
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
-			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
+			log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", id, err)
 		}
 		return "", err
 	}
@@ -423,7 +424,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 
 // Put adds a new layer to the driver.
 func (d *Driver) Put(id string) error {
-	logrus.Debugf("WindowsGraphDriver Put() id %s", id)
+	log.G(context.TODO()).Debugf("WindowsGraphDriver Put() id %s", id)
 
 	rID, err := d.resolveID(id)
 	if err != nil {
@@ -467,9 +468,9 @@ func (d *Driver) Cleanup() error {
 	for _, item := range items {
 		if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
 			if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil {
-				logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err)
+				log.G(context.TODO()).Warnf("Failed to cleanup %s: %s", item.Name(), err)
 			} else {
-				logrus.Infof("Cleaned up %s", item.Name())
+				log.G(context.TODO()).Infof("Cleaned up %s", item.Name())
 			}
 		}
 	}
@@ -497,7 +498,7 @@ func (d *Driver) Diff(id, _ string) (_ io.ReadCloser, err error) {
 	}
 	prepare := func() {
 		if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
-			logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
+			log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", rID, err)
 		}
 	}
 
@@ -531,7 +532,7 @@ func (d *Driver) Changes(id, _ string) ([]archive.Change, error) {
 	}
 	defer func() {
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
-			logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
+			log.G(context.TODO()).Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
 		}
 	}()
 

+ 9 - 8
daemon/graphdriver/zfs/zfs.go

@@ -3,6 +3,7 @@
 package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs"
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"os/exec"
@@ -12,6 +13,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/parsers"
@@ -21,7 +23,6 @@ import (
 	"github.com/moby/sys/mountinfo"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -39,7 +40,7 @@ type Logger struct{}
 
 // Log wraps log message from ZFS driver with a prefix '[zfs]'.
 func (*Logger) Log(cmd []string) {
-	logrus.WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " "))
+	log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " "))
 }
 
 // Init returns a new ZFS driver.
@@ -48,7 +49,7 @@ func (*Logger) Log(cmd []string) {
 func Init(base string, opt []string, idMap idtools.IdentityMapping) (graphdriver.Driver, error) {
 	var err error
 
-	logger := logrus.WithField("storage-driver", "zfs")
+	logger := log.G(context.TODO()).WithField("storage-driver", "zfs")
 
 	if _, err := exec.LookPath("zfs"); err != nil {
 		logger.Debugf("zfs command is not available: %v", err)
@@ -155,7 +156,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
 	}
 	for _, m := range mounts {
 		if err := unix.Stat(m.Mountpoint, &stat); err != nil {
-			logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
+			log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
 			continue // may fail on fuse file systems
 		}
 
@@ -372,10 +373,10 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
 		if retErr != nil {
 			if c := d.ctr.Decrement(mountpoint); c <= 0 {
 				if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil {
-					logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr)
+					log.G(context.TODO()).WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr)
 				}
 				if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) {
-					logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr)
+					log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr)
 				}
 			}
 		}
@@ -383,7 +384,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
 
 	filesystem := d.zfsPath(id)
 	options := label.FormatMountLabel("", mountLabel)
-	logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
+	log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
 
 	root := d.idMap.RootPair()
 	// Create the target directories if they don't exist
@@ -413,7 +414,7 @@ func (d *Driver) Put(id string) error {
 		return nil
 	}
 
-	logger := logrus.WithField("storage-driver", "zfs")
+	logger := log.G(context.TODO()).WithField("storage-driver", "zfs")
 
 	logger.Debugf(`unmount("%s")`, mountpoint)
 

+ 2 - 2
daemon/graphdriver/zfs/zfs_freebsd.go

@@ -4,7 +4,7 @@ import (
 	"strings"
 
 	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/sirupsen/logrus"
+	"github.com/containerd/containerd/log"
 	"golang.org/x/sys/unix"
 )
 
@@ -16,7 +16,7 @@ func checkRootdirFs(rootdir string) error {
 
 	// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
 	if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
-		logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
+		log.G(ctx).WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
 		return graphdriver.ErrPrerequisites
 	}
 

+ 4 - 2
daemon/graphdriver/zfs/zfs_linux.go

@@ -1,8 +1,10 @@
 package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs"
 
 import (
+	"context"
+
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/sirupsen/logrus"
 )
 
 func checkRootdirFs(rootDir string) error {
@@ -16,7 +18,7 @@ func checkRootdirFs(rootDir string) error {
 	}
 
 	if fsMagic != graphdriver.FsMagicZfs {
-		logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
+		log.G(context.TODO()).WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
 		return graphdriver.ErrPrerequisites
 	}
 

+ 9 - 9
daemon/health.go

@@ -9,11 +9,11 @@ import (
 	"sync"
 	"time"
 
+	"github.com/containerd/containerd/log"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/docker/container"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -130,7 +130,7 @@ func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container
 	select {
 	case <-tm.C:
 		cancelProbe()
-		logrus.WithContext(ctx).Debugf("Health check for container %s taking too long", cntr.ID)
+		log.G(ctx).WithContext(ctx).Debugf("Health check for container %s taking too long", cntr.ID)
 		// Wait for probe to exit (it might take some time to call containerd to kill
 		// the process and we don't want dying probes to pile up).
 		<-execErr
@@ -235,7 +235,7 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch
 	if err := c.CheckpointTo(d.containersReplica); err != nil {
 		// queries will be inconsistent until the next probe runs or other state mutations
 		// checkpoint the container
-		logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err)
+		log.G(context.TODO()).Errorf("Error replicating health state for container %s: %v", c.ID, err)
 	}
 
 	current := h.Status()
@@ -257,10 +257,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
 
 		select {
 		case <-stop:
-			logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID)
+			log.G(context.TODO()).Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID)
 			return
 		case <-intervalTimer.C:
-			logrus.Debugf("Running health check for container %s ...", c.ID)
+			log.G(context.TODO()).Debugf("Running health check for container %s ...", c.ID)
 			startTime := time.Now()
 			ctx, cancelProbe := context.WithCancel(context.Background())
 			results := make(chan *types.HealthcheckResult, 1)
@@ -269,7 +269,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
 				result, err := probe.run(ctx, d, c)
 				if err != nil {
 					healthChecksFailedCounter.Inc()
-					logrus.Warnf("Health check for container %s error: %v", c.ID, err)
+					log.G(ctx).Warnf("Health check for container %s error: %v", c.ID, err)
 					results <- &types.HealthcheckResult{
 						ExitCode: -1,
 						Output:   err.Error(),
@@ -278,14 +278,14 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
 					}
 				} else {
 					result.Start = startTime
-					logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode)
+					log.G(ctx).Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode)
 					results <- result
 				}
 				close(results)
 			}()
 			select {
 			case <-stop:
-				logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID)
+				log.G(ctx).Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID)
 				cancelProbe()
 				// Wait for probe to exit (it might take a while to respond to the TERM
 				// signal and we don't want dying probes to pile up).
@@ -314,7 +314,7 @@ func getProbe(c *container.Container) probe {
 	case "NONE":
 		return nil
 	default:
-		logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID)
+		log.G(context.TODO()).Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID)
 		return nil
 	}
 }

+ 2 - 2
daemon/images/cache.go

@@ -3,11 +3,11 @@ package images // import "github.com/docker/docker/daemon/images"
 import (
 	"context"
 
+	"github.com/containerd/containerd/log"
 	imagetypes "github.com/docker/docker/api/types/image"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/image/cache"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 
 // MakeImageCache creates a stateful image cache.
@@ -24,7 +24,7 @@ func (i *ImageService) MakeImageCache(ctx context.Context, sourceRefs []string)
 			if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
 				return nil, err
 			}
-			logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err)
+			log.G(ctx).Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err)
 			continue
 		}
 		cache.Populate(img)

Some files were not shown because too many files changed in this diff