From 74da6a6363d9d2991ea51ba44061a31a17b855cf Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 23 Jun 2023 00:33:17 +0000 Subject: [PATCH] Switch all logging to use containerd log pkg This unifies our logging and allows us to propagate logging and trace contexts together. Signed-off-by: Brian Goff --- api/server/httpstatus/status.go | 6 +- api/server/middleware/cors.go | 4 +- api/server/middleware/debug.go | 8 +- api/server/router/build/build_routes.go | 4 +- .../router/container/container_routes.go | 10 +- api/server/router/container/exec.go | 6 +- api/server/router/grpc/grpc.go | 4 +- api/server/router/swarm/cluster_routes.go | 33 +++--- api/server/router/system/system_routes.go | 6 +- api/server/router/volume/volume_routes.go | 6 +- api/server/server.go | 8 +- .../adapters/containerimage/pull.go | 8 +- .../adapters/snapshot/leasemanager.go | 4 +- builder/builder-next/executor_unix.go | 10 +- .../exporter/mobyexporter/writer.go | 4 +- builder/builder-next/worker/worker.go | 8 +- builder/dockerfile/builder.go | 6 +- builder/dockerfile/containerbackend.go | 6 +- builder/dockerfile/imagecontext.go | 4 +- builder/dockerfile/imageprobe.go | 6 +- builder/dockerfile/internals.go | 4 +- builder/remotecontext/detect.go | 5 +- builder/remotecontext/git.go | 7 +- cmd/dockerd/daemon.go | 60 +++++----- cmd/dockerd/daemon_unix.go | 4 +- cmd/dockerd/daemon_windows.go | 8 +- cmd/dockerd/docker_windows.go | 5 +- cmd/dockerd/grpclog.go | 5 +- cmd/dockerd/metrics.go | 7 +- cmd/dockerd/service_windows.go | 12 +- cmd/dockerd/trap/trap.go | 7 +- container/container.go | 8 +- container/container_unix.go | 21 ++-- container/exec.go | 5 +- container/health.go | 9 +- container/monitor.go | 7 +- container/stream/attach.go | 16 +-- container/stream/streams.go | 8 +- container/view.go | 7 +- daemon/attach.go | 10 +- daemon/cdi.go | 7 +- daemon/cluster/cluster.go | 10 +- .../cluster/controllers/plugin/controller.go | 3 +- daemon/cluster/convert/container.go | 7 +- daemon/cluster/executor/container/adapter.go | 2 +- .../cluster/executor/container/container.go | 5 +- daemon/cluster/executor/container/executor.go | 6 +- daemon/cluster/networks.go | 10 +- daemon/cluster/noderunner.go | 10 +- daemon/cluster/services.go | 18 +-- daemon/cluster/swarm.go | 8 +- daemon/config/config.go | 4 +- daemon/configs.go | 6 +- daemon/container.go | 7 +- daemon/container_operations.go | 21 ++-- daemon/container_operations_unix.go | 22 ++-- daemon/container_operations_windows.go | 18 +-- daemon/containerd/image.go | 8 +- daemon/containerd/image_builder.go | 12 +- daemon/containerd/image_changes.go | 4 +- daemon/containerd/image_children.go | 5 +- daemon/containerd/image_commit.go | 10 +- daemon/containerd/image_delete.go | 8 +- daemon/containerd/image_exporter.go | 15 +-- daemon/containerd/image_import.go | 3 +- daemon/containerd/image_list.go | 7 +- daemon/containerd/image_prune.go | 9 +- daemon/containerd/image_pull.go | 3 +- daemon/containerd/image_push.go | 8 +- daemon/containerd/image_tag.go | 3 +- daemon/containerd/mount.go | 6 +- daemon/containerd/progress.go | 6 +- daemon/containerd/resolver.go | 3 +- daemon/containerd/service.go | 3 +- daemon/containerfs_linux.go | 4 +- daemon/create.go | 6 +- daemon/create_unix.go | 6 +- daemon/daemon.go | 76 +++++++------ daemon/daemon_linux.go | 13 ++- daemon/daemon_unix.go | 36 +++--- daemon/daemon_windows.go | 12 +- daemon/debugtrap_unix.go | 7 +- daemon/debugtrap_windows.go | 13 ++- daemon/delete.go | 8 +- daemon/events.go | 12 +- daemon/exec.go | 12 +- daemon/graphdriver/btrfs/btrfs.go | 7 +- daemon/graphdriver/driver.go | 18 +-- daemon/graphdriver/fsdiff.go | 7 +- .../fuse-overlayfs/fuseoverlayfs.go | 8 +- daemon/graphdriver/overlay2/overlay.go | 4 +- .../graphdriver/overlayutils/overlayutils.go | 7 +- daemon/graphdriver/overlayutils/userxattr.go | 11 +- daemon/graphdriver/vfs/quota_linux.go | 6 +- daemon/graphdriver/windows/windows.go | 31 +++--- daemon/graphdriver/zfs/zfs.go | 17 +-- daemon/graphdriver/zfs/zfs_freebsd.go | 4 +- daemon/graphdriver/zfs/zfs_linux.go | 6 +- daemon/health.go | 18 +-- daemon/images/cache.go | 4 +- daemon/images/image.go | 4 +- daemon/images/image_builder.go | 4 +- daemon/images/image_exporter.go | 4 +- daemon/images/image_prune.go | 6 +- daemon/images/image_pull.go | 4 +- daemon/images/image_unix.go | 6 +- daemon/images/mount.go | 6 +- daemon/info.go | 15 +-- daemon/info_unix.go | 30 ++--- daemon/kill.go | 12 +- daemon/list.go | 4 +- daemon/listeners/listeners_linux.go | 7 +- daemon/logger/adapter.go | 7 +- daemon/logger/awslogs/cloudwatchlogs.go | 32 +++--- daemon/logger/copier.go | 5 +- daemon/logger/etwlogs/etwlogs_windows.go | 9 +- daemon/logger/fluentd/fluentd.go | 5 +- daemon/logger/gcplogs/gcplogging.go | 6 +- daemon/logger/journald/read.go | 8 +- daemon/logger/logentries/logentries.go | 5 +- daemon/logger/logger_error.go | 6 +- .../logger/loggerutils/cache/local_cache.go | 5 +- daemon/logger/loggerutils/follow.go | 4 +- daemon/logger/loggerutils/logfile.go | 18 +-- daemon/logger/splunk/splunk.go | 12 +- daemon/logs.go | 7 +- daemon/metrics.go | 7 +- daemon/metrics_unix.go | 11 +- daemon/monitor.go | 21 ++-- daemon/names.go | 5 +- daemon/network.go | 17 +-- daemon/oci_linux.go | 10 +- daemon/oci_windows.go | 6 +- daemon/pause.go | 4 +- daemon/prune.go | 12 +- daemon/reload.go | 14 ++- daemon/rename.go | 5 +- daemon/runtime_unix.go | 7 +- daemon/seccomp_linux.go | 4 +- daemon/secrets.go | 6 +- daemon/snapshotter/mount.go | 11 +- daemon/start.go | 22 ++-- daemon/stats/collector.go | 9 +- daemon/stop.go | 8 +- daemon/unpause.go | 4 +- daemon/volumes.go | 4 +- distribution/errors.go | 5 +- distribution/manifest.go | 5 +- distribution/pull.go | 12 +- distribution/pull_v2.go | 48 ++++---- distribution/pull_v2_unix.go | 4 +- distribution/pull_v2_windows.go | 14 +-- distribution/push.go | 10 +- distribution/push_v2.go | 40 +++---- distribution/registry_unit_test.go | 6 +- distribution/utils/progress.go | 7 +- distribution/xfer/download.go | 8 +- distribution/xfer/upload.go | 6 +- image/fs.go | 5 +- image/rootfs.go | 5 +- image/store.go | 14 ++- image/tarexport/load.go | 9 +- image/v1/imagev1.go | 5 +- integration-cli/events_utils_test.go | 5 +- layer/filestore.go | 17 +-- layer/layer.go | 7 +- layer/layer_store.go | 37 +++--- layer/migration.go | 7 +- libcontainerd/local/local_windows.go | 6 +- libcontainerd/remote/client.go | 5 +- libcontainerd/remote/client_linux.go | 3 +- libcontainerd/replace.go | 4 +- libcontainerd/supervisor/remote_daemon.go | 3 +- libnetwork/agent.go | 91 +++++++-------- libnetwork/cmd/diagnostic/main.go | 65 +++++------ .../cmd/networkdb-test/dbclient/ndbClient.go | 105 +++++++++--------- .../cmd/networkdb-test/dbserver/ndbServer.go | 18 +-- .../networkdb-test/dummyclient/dummyClient.go | 16 +-- libnetwork/cmd/networkdb-test/testMain.go | 10 +- libnetwork/config/config.go | 17 +-- libnetwork/controller.go | 33 +++--- libnetwork/default_gateway.go | 5 +- libnetwork/diagnostic/server.go | 19 ++-- libnetwork/drivers/bridge/bridge.go | 47 ++++---- libnetwork/drivers/bridge/bridge_store.go | 29 ++--- libnetwork/drivers/bridge/interface.go | 5 +- libnetwork/drivers/bridge/link.go | 5 +- libnetwork/drivers/bridge/port_mapping.go | 13 ++- .../bridge/setup_bridgenetfiltering.go | 7 +- libnetwork/drivers/bridge/setup_device.go | 13 ++- .../drivers/bridge/setup_ip_forwarding.go | 15 +-- libnetwork/drivers/bridge/setup_ip_tables.go | 17 +-- libnetwork/drivers/bridge/setup_ipv4.go | 5 +- libnetwork/drivers/bridge/setup_ipv6.go | 11 +- libnetwork/drivers/bridge/setup_verify.go | 5 +- libnetwork/drivers/ipvlan/ipvlan_endpoint.go | 11 +- libnetwork/drivers/ipvlan/ipvlan_joinleave.go | 15 +-- libnetwork/drivers/ipvlan/ipvlan_network.go | 17 +-- libnetwork/drivers/ipvlan/ipvlan_setup.go | 9 +- libnetwork/drivers/ipvlan/ipvlan_state.go | 5 +- libnetwork/drivers/ipvlan/ipvlan_store.go | 17 +-- .../drivers/macvlan/macvlan_endpoint.go | 11 +- .../drivers/macvlan/macvlan_joinleave.go | 11 +- libnetwork/drivers/macvlan/macvlan_network.go | 17 +-- libnetwork/drivers/macvlan/macvlan_setup.go | 9 +- libnetwork/drivers/macvlan/macvlan_state.go | 5 +- libnetwork/drivers/macvlan/macvlan_store.go | 17 +-- libnetwork/drivers/overlay/encryption.go | 69 ++++++------ libnetwork/drivers/overlay/joinleave.go | 23 ++-- libnetwork/drivers/overlay/ov_endpoint.go | 7 +- libnetwork/drivers/overlay/ov_network.go | 30 ++--- libnetwork/drivers/overlay/ov_utils.go | 5 +- libnetwork/drivers/overlay/overlay.go | 5 +- .../drivers/overlay/ovmanager/ovmanager.go | 5 +- libnetwork/drivers/overlay/peerdb.go | 27 ++--- libnetwork/drivers/remote/driver.go | 7 +- .../windows/overlay/joinleave_windows.go | 17 +-- .../windows/overlay/ov_endpoint_windows.go | 9 +- .../windows/overlay/ov_network_windows.go | 13 ++- .../windows/overlay/overlay_windows.go | 9 +- .../drivers/windows/overlay/peerdb_windows.go | 9 +- libnetwork/drivers/windows/port_mapping.go | 9 +- libnetwork/drivers/windows/windows.go | 21 ++-- libnetwork/drivers/windows/windows_store.go | 25 +++-- libnetwork/endpoint.go | 81 +++++++------- libnetwork/firewall_linux.go | 10 +- libnetwork/ipam/allocator.go | 15 +-- libnetwork/ipams/remote/remote.go | 11 +- libnetwork/ipams/windowsipam/windowsipam.go | 11 +- libnetwork/iptables/conntrack.go | 19 ++-- libnetwork/iptables/firewalld.go | 15 +-- libnetwork/iptables/iptables.go | 17 +-- libnetwork/libnetwork_linux_test.go | 7 +- libnetwork/network.go | 71 ++++++------ libnetwork/network_windows.go | 15 +-- libnetwork/networkdb/cluster.go | 66 +++++------ libnetwork/networkdb/delegate.go | 49 ++++---- libnetwork/networkdb/event_delegate.go | 17 +-- libnetwork/networkdb/networkdb.go | 14 +-- libnetwork/networkdb/networkdb_test.go | 9 +- libnetwork/networkdb/networkdbdiagnostic.go | 24 ++-- libnetwork/networkdb/nodemgmt.go | 11 +- libnetwork/ns/init_linux.go | 13 ++- libnetwork/osl/interface_linux.go | 13 ++- libnetwork/osl/kernel/knobs_linux.go | 9 +- libnetwork/osl/kernel/knobs_linux_test.go | 5 +- libnetwork/osl/namespace_linux.go | 21 ++-- libnetwork/osl/neigh_linux.go | 13 ++- libnetwork/portallocator/portallocator.go | 5 +- libnetwork/portmapper/mapper.go | 13 ++- libnetwork/resolvconf/resolvconf.go | 9 +- libnetwork/resolver.go | 3 +- libnetwork/resolver_test.go | 4 +- libnetwork/sandbox.go | 57 +++++----- libnetwork/sandbox_dns_unix.go | 19 ++-- libnetwork/sandbox_externalkey_unix.go | 9 +- libnetwork/sandbox_store.go | 21 ++-- libnetwork/service_common.go | 41 +++---- libnetwork/service_linux.go | 59 +++++----- libnetwork/service_windows.go | 19 ++-- libnetwork/store.go | 21 ++-- oci/caps/utils_linux.go | 5 +- pkg/archive/archive.go | 29 ++--- pkg/archive/changes.go | 13 ++- pkg/archive/copy.go | 5 +- pkg/archive/diff.go | 5 +- pkg/archive/example_changes.go | 6 +- pkg/authorization/authz.go | 9 +- pkg/authorization/middleware.go | 10 +- pkg/authorization/response.go | 5 +- pkg/fileutils/fileutils_unix.go | 5 +- pkg/loopback/attach_loopback.go | 21 ++-- pkg/loopback/loopback.go | 7 +- pkg/parsers/kernel/kernel_unix.go | 6 +- pkg/platform/platform.go | 6 +- pkg/plugins/client.go | 8 +- pkg/plugins/plugins.go | 9 +- pkg/rootless/specconv/specconv_linux.go | 7 +- pkg/sysinfo/cgroup2_linux.go | 7 +- pkg/sysinfo/sysinfo_linux.go | 5 +- plugin/backend_linux.go | 12 +- plugin/executor/containerd/containerd.go | 7 +- plugin/fetch_linux.go | 8 +- plugin/manager.go | 15 +-- plugin/manager_linux.go | 30 ++--- plugin/registry.go | 6 +- plugin/store.go | 5 +- quota/projectquota.go | 7 +- registry/auth.go | 5 +- registry/endpoint_v1.go | 15 +-- registry/registry.go | 11 +- registry/registry_mock_test.go | 5 +- registry/resumable/resumablerequestreader.go | 5 +- registry/search.go | 4 +- registry/service.go | 4 +- registry/session.go | 7 +- volume/drivers/adapter.go | 7 +- volume/drivers/extpoint.go | 5 +- volume/local/local.go | 7 +- volume/service/convert.go | 4 +- volume/service/db.go | 5 +- volume/service/restore.go | 8 +- volume/service/service.go | 6 +- volume/service/store.go | 20 ++-- 304 files changed, 2062 insertions(+), 1826 deletions(-) diff --git a/api/server/httpstatus/status.go b/api/server/httpstatus/status.go index 651e301dc1..456ec6e88b 100644 --- a/api/server/httpstatus/status.go +++ b/api/server/httpstatus/status.go @@ -1,10 +1,12 @@ package httpstatus // import "github.com/docker/docker/api/server/httpstatus" import ( + "context" "fmt" "net/http" cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/errdefs" "github.com/sirupsen/logrus" @@ -19,7 +21,7 @@ type causer interface { // FromError retrieves status code from error message. func FromError(err error) int { if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + log.G(context.TODO()).WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") return http.StatusInternalServerError } @@ -65,7 +67,7 @@ func FromError(err error) int { return FromError(e.Cause()) } - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "module": "api", "error_type": fmt.Sprintf("%T", err), }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) diff --git a/api/server/middleware/cors.go b/api/server/middleware/cors.go index 79bed14564..3cb754f1bb 100644 --- a/api/server/middleware/cors.go +++ b/api/server/middleware/cors.go @@ -4,8 +4,8 @@ import ( "context" "net/http" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" ) // CORSMiddleware injects CORS headers to each request @@ -29,7 +29,7 @@ func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.Res corsHeaders = "*" } - logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + log.G(ctx).Debugf("CORS header is enabled and set to: %s", corsHeaders) w.Header().Add("Access-Control-Allow-Origin", corsHeaders) w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, "+registry.AuthHeader) w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") diff --git a/api/server/middleware/debug.go b/api/server/middleware/debug.go index 1ec62602db..7e9d660121 100644 --- a/api/server/middleware/debug.go +++ b/api/server/middleware/debug.go @@ -8,15 +8,15 @@ import ( "net/http" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" ) // DebugRequestMiddleware dumps the request to logger func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + log.G(ctx).Debugf("Calling %s %s", r.Method, r.RequestURI) if r.Method != http.MethodPost { return handler(ctx, w, r, vars) @@ -44,9 +44,9 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri maskSecretKeys(postForm) formStr, errMarshal := json.Marshal(postForm) if errMarshal == nil { - logrus.Debugf("form data: %s", string(formStr)) + log.G(ctx).Debugf("form data: %s", string(formStr)) } else { - logrus.Debugf("form data: %q", postForm) + log.G(ctx).Debugf("form data: %q", postForm) } } diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go index a227a1d779..50fb106357 100644 --- a/api/server/router/build/build_routes.go +++ b/api/server/router/build/build_routes.go @@ -14,6 +14,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -26,7 +27,6 @@ import ( "github.com/docker/docker/pkg/streamformatter" units "github.com/docker/go-units" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type invalidParam struct { @@ -248,7 +248,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * } _, err = output.Write(streamformatter.FormatError(err)) if err != nil { - logrus.Warnf("could not write error response: %v", err) + log.G(ctx).Warnf("could not write error response: %v", err) } return nil } diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index 1d14de8b42..c6458cfa8e 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -9,6 +9,7 @@ import ( "runtime" "strconv" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/server/httpstatus" "github.com/docker/docker/api/server/httputils" @@ -23,7 +24,6 @@ import ( "github.com/docker/docker/pkg/ioutils" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/net/websocket" ) @@ -706,11 +706,11 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo } if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { - logrus.WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path) + log.G(ctx).WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path) // Remember to close stream if error happens conn, _, errHijack := hijacker.Hijack() if errHijack != nil { - logrus.WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path) + log.G(ctx).WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path) } else { statusCode := httpstatus.FromError(err) statusText := http.StatusText(statusCode) @@ -780,9 +780,9 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons select { case <-started: if err != nil { - logrus.Errorf("Error attaching websocket: %s", err) + log.G(ctx).Errorf("Error attaching websocket: %s", err) } else { - logrus.Debug("websocket connection was closed by client") + log.G(ctx).Debug("websocket connection was closed by client") } return nil default: diff --git a/api/server/router/container/exec.go b/api/server/router/container/exec.go index b86af42d7f..947216d943 100644 --- a/api/server/router/container/exec.go +++ b/api/server/router/container/exec.go @@ -7,13 +7,13 @@ import ( "net/http" "strconv" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/stdcopy" - "github.com/sirupsen/logrus" ) func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { @@ -56,7 +56,7 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re // Register an instance of Exec in container. id, err := s.backend.ContainerExecCreate(vars["name"], execConfig) if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %v", vars["name"], err) + log.G(ctx).Errorf("Error setting up exec command in container %s: %v", vars["name"], err) return err } @@ -154,7 +154,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res return err } stdout.Write([]byte(err.Error() + "\r\n")) - logrus.Errorf("Error running exec %s in container: %v", execName, err) + log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err) } return nil } diff --git a/api/server/router/grpc/grpc.go b/api/server/router/grpc/grpc.go index 7a1f4e5944..041fc3529b 100644 --- a/api/server/router/grpc/grpc.go +++ b/api/server/router/grpc/grpc.go @@ -4,11 +4,11 @@ import ( "context" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/router" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/tracing/detect" - "github.com/sirupsen/logrus" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -33,7 +33,7 @@ var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceCon func NewRouter(backends ...Backend) router.Router { tp, err := detect.TracerProvider() if err != nil { - logrus.WithError(err).Error("failed to detect trace provider") + log.G(context.TODO()).WithError(err).Error("failed to detect trace provider") } opts := []grpc.ServerOption{grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor)} diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go index f4469d1943..b077b51590 100644 --- a/api/server/router/swarm/cluster_routes.go +++ b/api/server/router/swarm/cluster_routes.go @@ -6,6 +6,7 @@ import ( "net/http" "strconv" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" basictypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -36,7 +37,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r } nodeID, err := sr.backend.Init(req) if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error initializing swarm") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error initializing swarm") return err } return httputils.WriteJSON(w, http.StatusOK, nodeID) @@ -62,7 +63,7 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { swarm, err := sr.backend.Inspect() if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error getting swarm") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting swarm") return err } @@ -114,7 +115,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, } if err := sr.backend.Update(version, swarm, flags); err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error configuring swarm") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error configuring swarm") return err } return nil @@ -127,7 +128,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, } if err := sr.backend.UnlockSwarm(req); err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error unlocking swarm") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error unlocking swarm") return err } return nil @@ -136,7 +137,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { unlockKey, err := sr.backend.GetUnlockKey() if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key") return err } @@ -168,7 +169,7 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status}) if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error getting services") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting services") return err } @@ -194,7 +195,7 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r service, err := sr.backend.GetService(vars["id"], insertDefaults) if err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "service-id": vars["id"], }).Debug("Error getting service") @@ -221,7 +222,7 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, } resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) if err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "error": err, "service-name": service.Name, }).Debug("Error creating service") @@ -260,7 +261,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) if err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "service-id": vars["id"], }).Debug("Error updating service") @@ -271,7 +272,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := sr.backend.RemoveService(vars["id"]); err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "service-id": vars["id"], }).Debug("Error removing service") @@ -315,7 +316,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error getting nodes") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting nodes") return err } @@ -325,7 +326,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { node, err := sr.backend.GetNode(vars["id"]) if err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "node-id": vars["id"], }).Debug("Error getting node") @@ -349,7 +350,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r } if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "node-id": vars["id"], }).Debug("Error updating node") @@ -366,7 +367,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r force := httputils.BoolValue(r, "force") if err := sr.backend.RemoveNode(vars["id"], force); err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "node-id": vars["id"], }).Debug("Error removing node") @@ -386,7 +387,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) if err != nil { - logrus.WithContext(ctx).WithError(err).Debug("Error getting tasks") + log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting tasks") return err } @@ -396,7 +397,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { task, err := sr.backend.GetTask(vars["id"]) if err != nil { - logrus.WithContext(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithContext(ctx).WithFields(logrus.Fields{ "error": err, "task-id": vars["id"], }).Debug("Error getting task") diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go index 750c6e558c..56839b883c 100644 --- a/api/server/router/system/system_routes.go +++ b/api/server/router/system/system_routes.go @@ -7,6 +7,7 @@ import ( "net/http" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" @@ -18,7 +19,6 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -279,7 +279,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r * case ev := <-l: jev, ok := ev.(events.Message) if !ok { - logrus.Warnf("unexpected event message: %q", ev) + log.G(ctx).Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { @@ -288,7 +288,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r * case <-timeout: return nil case <-ctx.Done(): - logrus.Debug("Client context cancelled, stop sending events") + log.G(ctx).Debug("Client context cancelled, stop sending events") return nil } } diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go index 9bbe13687f..0d16c7e63d 100644 --- a/api/server/router/volume/volume_routes.go +++ b/api/server/router/volume/volume_routes.go @@ -6,6 +6,7 @@ import ( "net/http" "strconv" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" @@ -13,7 +14,6 @@ import ( "github.com/docker/docker/errdefs" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -116,10 +116,10 @@ func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWri // Instead, we will allow creating a volume with a duplicate name, which // should not break anything. if req.ClusterVolumeSpec != nil && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) { - logrus.Debug("using cluster volume") + log.G(ctx).Debug("using cluster volume") vol, err = v.cluster.CreateVolume(req) } else { - logrus.Debug("using regular volume") + log.G(ctx).Debug("using regular volume") vol, err = v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels)) } diff --git a/api/server/server.go b/api/server/server.go index fdf0a8ef6d..c1abfbee51 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -4,6 +4,7 @@ import ( "context" "net/http" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/server/httpstatus" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/middleware" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/api/server/router/debug" "github.com/docker/docker/dockerversion" "github.com/gorilla/mux" - "github.com/sirupsen/logrus" ) // versionMatcher defines a variable matcher to be parsed by the router @@ -53,7 +53,7 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { if err := handlerFunc(ctx, w, r, vars); err != nil { statusCode := httpstatus.FromError(err) if statusCode >= 500 { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) } makeErrorHandler(err)(w, r) } @@ -72,12 +72,12 @@ func (pageNotFoundError) NotFound() {} func (s *Server) CreateMux(routers ...router.Router) *mux.Router { m := mux.NewRouter() - logrus.Debug("Registering routers") + log.G(context.TODO()).Debug("Registering routers") for _, apiRouter := range routers { for _, r := range apiRouter.Routes() { f := s.makeHTTPHandler(r.Handler()) - logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + log.G(context.TODO()).Debugf("Registering %s, %s", r.Method(), r.Path()) m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) m.Path(r.Path()).Methods(r.Method()).Handler(f) } diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index f6a9539365..cd526f1995 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -14,6 +14,7 @@ import ( "github.com/containerd/containerd/gc" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" ctdreference "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" @@ -42,7 +43,6 @@ import ( "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/time/rate" ) @@ -147,7 +147,7 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re img, err := is.resolveLocal(ref) if err == nil { if opt.Platform != nil && !platformMatches(img, opt.Platform) { - logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote", + log.G(ctx).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote", path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant), path.Join(img.OS, img.Architecture, img.Variant), ) @@ -245,7 +245,7 @@ func (p *puller) resolveLocal() { img, err := p.is.resolveLocal(ref) if err == nil { if !platformMatches(img, &p.platform) { - logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving", + log.G(context.TODO()).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving", path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant), path.Join(img.OS, img.Architecture, img.Variant), ) @@ -828,7 +828,7 @@ func cacheKeyFromConfig(dt []byte) digest.Digest { var img ocispec.Image err := json.Unmarshal(dt, &img) if err != nil { - logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err) + log.G(context.TODO()).WithError(err).Errorf("failed to unmarshal image config for cache key %v", err) return digest.FromBytes(dt) } if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 { diff --git a/builder/builder-next/adapters/snapshot/leasemanager.go b/builder/builder-next/adapters/snapshot/leasemanager.go index 713af013a1..7cdfca279a 100644 --- a/builder/builder-next/adapters/snapshot/leasemanager.go +++ b/builder/builder-next/adapters/snapshot/leasemanager.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/containerd/containerd/leases" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" bolt "go.etcd.io/bbolt" ) @@ -126,7 +126,7 @@ func (l *sLM) delRef(lID, sID string) { if len(leases) == 0 { delete(l.bySnapshot, sID) if err := l.s.remove(context.TODO(), sID); err != nil { - logrus.Warnf("failed to remove snapshot %v", sID) + log.G(context.TODO()).Warnf("failed to remove snapshot %v", sID) } } } diff --git a/builder/builder-next/executor_unix.go b/builder/builder-next/executor_unix.go index 60c4f8ddf1..32a14aae6a 100644 --- a/builder/builder-next/executor_unix.go +++ b/builder/builder-next/executor_unix.go @@ -9,6 +9,7 @@ import ( "strconv" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" "github.com/docker/docker/pkg/idtools" @@ -20,7 +21,6 @@ import ( "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" ) const networkName = "bridge" @@ -39,7 +39,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi for _, fi := range fis { fp := filepath.Join(netRoot, fi.Name()) if err := os.RemoveAll(fp); err != nil { - logrus.WithError(err).Errorf("failed to delete old network state: %v", fp) + log.G(context.TODO()).WithError(err).Errorf("failed to delete old network state: %v", fp) } } } @@ -124,7 +124,7 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n libnetwork.Network) { func (iface *lnInterface) Set(s *specs.Spec) error { <-iface.ready if iface.err != nil { - logrus.WithError(iface.err).Error("failed to set networking spec") + log.G(context.TODO()).WithError(iface.err).Error("failed to set networking spec") return iface.err } shortNetCtlrID := stringid.TruncateID(iface.provider.Controller.ID()) @@ -143,10 +143,10 @@ func (iface *lnInterface) Close() error { if iface.sbx != nil { go func() { if err := iface.sbx.Delete(); err != nil { - logrus.WithError(err).Errorf("failed to delete builder network sandbox") + log.G(context.TODO()).WithError(err).Errorf("failed to delete builder network sandbox") } if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil { - logrus.WithError(err).Errorf("failed to delete builder sandbox directory") + log.G(context.TODO()).WithError(err).Errorf("failed to delete builder sandbox directory") } }() } diff --git a/builder/builder-next/exporter/mobyexporter/writer.go b/builder/builder-next/exporter/mobyexporter/writer.go index 7ad04858f5..e0e5618f5d 100644 --- a/builder/builder-next/exporter/mobyexporter/writer.go +++ b/builder/builder-next/exporter/mobyexporter/writer.go @@ -5,6 +5,7 @@ import ( "encoding/json" "time" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/util/progress" @@ -12,7 +13,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func emptyImageConfig() ([]byte, error) { @@ -97,7 +97,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, if historyLayers > len(diffs) { // this case shouldn't happen but if it does force set history layers empty // from the bottom - logrus.Warn("invalid image config with unaccounted layers") + log.G(context.TODO()).Warn("invalid image config with unaccounted layers") historyCopy := make([]ocispec.History, 0, len(history)) var l int for _, h := range history { diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index dccb42742c..b41604444d 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" "github.com/docker/docker/builder/builder-next/adapters/containerimage" @@ -46,7 +47,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -110,7 +110,7 @@ func NewWorker(opt Opt) (*Worker, error) { if err == nil { sm.Register(gs) } else { - logrus.Warnf("Could not register builder git source: %s", err) + log.G(context.TODO()).Warnf("Could not register builder git source: %s", err) } hs, err := http.NewSource(http.Opt{ @@ -120,7 +120,7 @@ func NewWorker(opt Opt) (*Worker, error) { if err == nil { sm.Register(hs) } else { - logrus.Warnf("Could not register builder http source: %s", err) + log.G(context.TODO()).Warnf("Could not register builder http source: %s", err) } ss, err := local.NewSource(local.Opt{ @@ -129,7 +129,7 @@ func NewWorker(opt Opt) (*Worker, error) { if err == nil { sm.Register(ss) } else { - logrus.Warnf("Could not register builder local source: %s", err) + log.G(context.TODO()).Warnf("Could not register builder local source: %s", err) } return &Worker{ diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index 04cc07337e..bbdf658592 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -8,6 +8,7 @@ import ( "sort" "strings" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -23,7 +24,6 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/shell" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/syncmap" ) @@ -76,7 +76,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( defer func() { if source != nil { if err := source.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + log.G(ctx).Debugf("[BUILDER] failed to remove temporary context: %v", err) } } }() @@ -283,7 +283,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR for _, cmd := range stage.Commands { select { case <-ctx.Done(): - logrus.Debug("Builder: build cancelled!") + log.G(ctx).Debug("Builder: build cancelled!") fmt.Fprint(b.Stdout, "Build cancelled\n") buildsFailed.WithValues(metricsBuildCanceled).Inc() return nil, errors.New("Build cancelled") diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go index ebb261a1c4..5fcac4467b 100644 --- a/builder/dockerfile/containerbackend.go +++ b/builder/dockerfile/containerbackend.go @@ -5,13 +5,13 @@ import ( "fmt" "io" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type containerManager struct { @@ -60,7 +60,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i go func() { select { case <-ctx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) + log.G(ctx).Debugln("Build cancelled, killing and removing container:", cID) c.backend.ContainerKill(cID, "") c.removeContainer(cID, stdout) cancelErrCh <- errCancelled @@ -102,7 +102,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i func logCancellationError(cancelErrCh chan error, msg string) { if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) + log.G(context.TODO()).Debugf("Build cancelled (%v): %s", cancelErr, msg) } } diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go index 7dada66596..e376727145 100644 --- a/builder/dockerfile/imagecontext.go +++ b/builder/dockerfile/imagecontext.go @@ -4,13 +4,13 @@ import ( "context" "runtime" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" dockerimage "github.com/docker/docker/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type getAndMountFunc func(context.Context, string, bool, *ocispec.Platform) (builder.Image, builder.ROLayer, error) @@ -64,7 +64,7 @@ func (m *imageSources) Get(ctx context.Context, idOrRef string, localOnly bool, func (m *imageSources) Unmount() (retErr error) { for _, im := range m.mounts { if err := im.unmount(); err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) retErr = err } } diff --git a/builder/dockerfile/imageprobe.go b/builder/dockerfile/imageprobe.go index 5ef622193b..886e2a2249 100644 --- a/builder/dockerfile/imageprobe.go +++ b/builder/dockerfile/imageprobe.go @@ -3,9 +3,9 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "context" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - "github.com/sirupsen/logrus" ) // ImageProber exposes an Image cache to the Builder. It supports resetting a @@ -60,11 +60,11 @@ func (c *imageProber) Probe(parentID string, runConfig *container.Config) (strin return "", err } if len(cacheID) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + log.G(context.TODO()).Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) c.cacheBusted = true return "", nil } - logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + log.G(context.TODO()).Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) return cacheID, nil } diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 9d103f6b44..2b9ba77f8c 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -10,6 +10,7 @@ import ( "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -21,7 +22,6 @@ import ( "github.com/docker/go-connections/nat" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func (b *Builder) getArchiver() *archive.Archiver { @@ -348,7 +348,7 @@ func (b *Builder) probeAndCreate(ctx context.Context, dispatchState *dispatchSta } func (b *Builder) create(ctx context.Context, runConfig *container.Config) (string, error) { - logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + log.G(ctx).Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) hostConfig := hostConfigFromOptions(b.options) container, err := b.containerManager.Create(ctx, runConfig, hostConfig) diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go index 38d4fed321..1c3d6fa059 100644 --- a/builder/remotecontext/detect.go +++ b/builder/remotecontext/detect.go @@ -2,12 +2,14 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "bufio" + "context" "fmt" "io" "os" "runtime" "strings" + "github.com/containerd/containerd/log" "github.com/containerd/continuity/driver" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" @@ -18,7 +20,6 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/patternmatcher" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ClientSessionRemote is identifier for client-session context transport @@ -133,7 +134,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error { for _, fileToRemove := range filesToRemove { if rm, _ := patternmatcher.MatchesOrParentMatches(fileToRemove, excludes); rm { if err := c.Remove(fileToRemove); err != nil { - logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + log.G(context.TODO()).Errorf("failed to remove %s: %v", fileToRemove, err) } } } diff --git a/builder/remotecontext/git.go b/builder/remotecontext/git.go index 85efba24f3..f083179fa9 100644 --- a/builder/remotecontext/git.go +++ b/builder/remotecontext/git.go @@ -1,12 +1,13 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( + "context" "os" + "github.com/containerd/containerd/log" "github.com/docker/docker/builder" "github.com/docker/docker/builder/remotecontext/git" "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" ) // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. @@ -24,11 +25,11 @@ func MakeGitContext(gitURL string) (builder.Source, error) { defer func() { err := c.Close() if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") + log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") } err = os.RemoveAll(root) if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") + log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") } }() return FromArchive(c) diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 00d0a5d1c6..fd7b50f5b4 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -16,6 +16,7 @@ import ( "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" containerddefaults "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/log" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" buildbackend "github.com/docker/docker/api/server/backend/build" @@ -83,6 +84,8 @@ func NewDaemonCli() *DaemonCli { } func (cli *DaemonCli) start(opts *daemonOptions) (err error) { + ctx := context.TODO() + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err } @@ -101,7 +104,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { configureProxyEnv(cli.Config) configureDaemonLogs(cli.Config) - logrus.Info("Starting up") + log.G(ctx).Info("Starting up") cli.configFile = &opts.configFile cli.flags = opts.flags @@ -111,14 +114,14 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { } if cli.Config.Experimental { - logrus.Warn("Running experimental build") + log.G(ctx).Warn("Running experimental build") } if cli.Config.IsRootless() { - logrus.Warn("Running in rootless mode. This mode has feature limitations.") + log.G(ctx).Warn("Running in rootless mode. This mode has feature limitations.") } if rootless.RunningWithRootlessKit() { - logrus.Info("Running with RootlessKit integration") + log.G(ctx).Info("Running with RootlessKit integration") if !cli.Config.IsRootless() { return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit") } @@ -155,7 +158,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile) defer func() { if err := os.Remove(cli.Pidfile); err != nil { - logrus.Error(err) + log.G(ctx).Error(err) } }() } @@ -164,7 +167,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset - logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") + log.G(ctx).WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR") } } @@ -199,7 +202,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { <-cli.apiShutdown err := httpServer.Shutdown(apiShutdownCtx) if err != nil { - logrus.WithError(err).Error("Error shutting down http server") + log.G(ctx).WithError(err).Error("Error shutting down http server") } close(apiShutdownDone) }() @@ -217,7 +220,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { // e.g. because the daemon failed to start. // Stop the HTTP server with no grace period. if closeErr := httpServer.Close(); closeErr != nil { - logrus.WithError(closeErr).Error("Error closing http server") + log.G(ctx).WithError(closeErr).Error("Error closing http server") } } }() @@ -262,7 +265,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { c, err := createAndStartCluster(cli, d) if err != nil { - logrus.Fatalf("Error starting cluster component: %v", err) + log.G(ctx).Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint @@ -270,7 +273,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { // initialized the cluster. d.RestartSwarmContainers() - logrus.Info("Daemon has completed initialization") + log.G(ctx).Info("Daemon has completed initialization") routerCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -300,9 +303,9 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { apiWG.Add(1) go func(ls net.Listener) { defer apiWG.Done() - logrus.Infof("API listen on %s", ls.Addr()) + log.G(ctx).Infof("API listen on %s", ls.Addr()) if err := httpServer.Serve(ls); err != http.ErrServerClosed { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "listener": ls.Addr(), }).Error("ServeAPI error") @@ -330,7 +333,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { return errors.Wrap(err, "shutting down due to ServeAPI error") } - logrus.Info("Daemon shutdown complete") + log.G(ctx).Info("Daemon shutdown complete") return nil } @@ -396,14 +399,15 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem } func (cli *DaemonCli) reloadConfig() { + ctx := context.TODO() reload := func(c *config.Config) { if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { - logrus.Fatalf("Error validating authorization plugin: %v", err) + log.G(ctx).Fatalf("Error validating authorization plugin: %v", err) return } if err := cli.d.Reload(c); err != nil { - logrus.Errorf("Error reconfiguring the daemon: %v", err) + log.G(ctx).Errorf("Error reconfiguring the daemon: %v", err) return } @@ -424,7 +428,7 @@ func (cli *DaemonCli) reloadConfig() { } if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { - logrus.Error(err) + log.G(ctx).Error(err) } } @@ -457,9 +461,9 @@ func shutdownDaemon(ctx context.Context, d *daemon.Daemon) { <-ctx.Done() if errors.Is(ctx.Err(), context.DeadlineExceeded) { - logrus.Error("Force shutdown daemon") + log.G(ctx).Error("Force shutdown daemon") } else { - logrus.Debug("Clean shutdown succeeded") + log.G(ctx).Debug("Clean shutdown succeeded") } } @@ -724,6 +728,8 @@ func checkTLSAuthOK(c *config.Config) bool { } func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, []string, error) { + ctx := context.TODO() + if len(cfg.Hosts) == 0 { return nil, nil, errors.New("no hosts configured") } @@ -742,8 +748,8 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [ // It's a bad idea to bind to TCP without tlsverify. authEnabled := tlsConfig != nil && tlsConfig.ClientAuth == tls.RequireAndVerifyClientCert if proto == "tcp" && !authEnabled { - logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") - logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") + log.G(ctx).WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.") + log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!") time.Sleep(time.Second) // If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot" @@ -761,17 +767,17 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [ if ip == nil { ipA, err := net.ResolveIPAddr("ip", ipAddr) if err != nil { - logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") + log.G(ctx).WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address") } if ipA != nil { ip = ipA.IP } } if ip == nil || !ip.IsLoopback() { - logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") - logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") - logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) - logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") + log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message") + log.G(ctx).WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network") + log.G(ctx).WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify) + log.G(ctx).WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release") time.Sleep(15 * time.Second) } @@ -788,7 +794,7 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [ if err != nil { return nil, nil, err } - logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + log.G(ctx).Debugf("Listener created for HTTP on %s (%s)", proto, addr) hosts = append(hosts, addr) lss = append(lss, ls...) } @@ -885,7 +891,7 @@ func configureProxyEnv(conf *config.Config) { func overrideProxyEnv(name, val string) { if oldVal := os.Getenv(name); oldVal != "" && oldVal != val { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "name": name, "old-value": config.MaskCredentials(oldVal), "new-value": config.MaskCredentials(val), diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go index f9ab3dcc4e..44ca7124c6 100644 --- a/cmd/dockerd/daemon_unix.go +++ b/cmd/dockerd/daemon_unix.go @@ -11,13 +11,13 @@ import ( "strconv" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libcontainerd/supervisor" "github.com/docker/docker/libnetwork/portallocator" "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -132,7 +132,7 @@ func (cli *DaemonCli) initContainerd(ctx context.Context) (func(time.Duration) e return nil, nil } - logrus.Info("containerd not running, starting managed containerd") + log.G(ctx).Info("containerd not running, starting managed containerd") opts, err := cli.getContainerdDaemonOpts() if err != nil { return nil, errors.Wrap(err, "failed to generate containerd options") diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go index f139b926b3..4e16714442 100644 --- a/cmd/dockerd/daemon_windows.go +++ b/cmd/dockerd/daemon_windows.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/daemon/config" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "golang.org/x/sys/windows" ) @@ -28,7 +28,7 @@ func preNotifyReady() { if service != nil { err := service.started() if err != nil { - logrus.Fatal(err) + log.G(context.TODO()).Fatal(err) } } } @@ -45,7 +45,7 @@ func notifyStopping() { func notifyShutdown(err error) { if service != nil { if err != nil { - logrus.Fatal(err) + log.G(context.TODO()).Fatal(err) } service.stopped(err) } @@ -60,7 +60,7 @@ func (cli *DaemonCli) setupConfigReloadTrap() { event := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) ev, _ := windows.UTF16PtrFromString(event) if h, _ := windows.CreateEvent(&sa, 0, 0, ev); h != 0 { - logrus.Debugf("Config reload - waiting signal at %s", event) + log.G(context.TODO()).Debugf("Config reload - waiting signal at %s", event) for { windows.WaitForSingleObject(h, windows.INFINITE) cli.reloadConfig() diff --git a/cmd/dockerd/docker_windows.go b/cmd/dockerd/docker_windows.go index 815c036896..6a2bd0997b 100644 --- a/cmd/dockerd/docker_windows.go +++ b/cmd/dockerd/docker_windows.go @@ -1,11 +1,10 @@ package main import ( - "io" - "path/filepath" - "github.com/Microsoft/go-winio/pkg/etwlogrus" "github.com/sirupsen/logrus" + "io" + "path/filepath" ) func runDaemon(opts *daemonOptions) error { diff --git a/cmd/dockerd/grpclog.go b/cmd/dockerd/grpclog.go index 2d726c7f76..be3f0b550c 100644 --- a/cmd/dockerd/grpclog.go +++ b/cmd/dockerd/grpclog.go @@ -1,6 +1,9 @@ package main import ( + "context" + + "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" "google.golang.org/grpc/grpclog" ) @@ -12,6 +15,6 @@ import ( // warn => debug // error => warn func configureGRPCLog() { - l := logrus.WithField("library", "grpc") + l := log.G(context.TODO()).WithField("library", "grpc") grpclog.SetLoggerV2(grpclog.NewLoggerV2(l.WriterLevel(logrus.TraceLevel), l.WriterLevel(logrus.DebugLevel), l.WriterLevel(logrus.WarnLevel))) } diff --git a/cmd/dockerd/metrics.go b/cmd/dockerd/metrics.go index a13a5d2670..5ef0590f95 100644 --- a/cmd/dockerd/metrics.go +++ b/cmd/dockerd/metrics.go @@ -1,13 +1,14 @@ package main import ( + "context" "net" "net/http" "strings" "time" + "github.com/containerd/containerd/log" metrics "github.com/docker/go-metrics" - "github.com/sirupsen/logrus" ) func startMetricsServer(addr string) error { @@ -24,13 +25,13 @@ func startMetricsServer(addr string) error { mux := http.NewServeMux() mux.Handle("/metrics", metrics.Handler()) go func() { - logrus.Infof("metrics API listening on %s", l.Addr()) + log.G(context.TODO()).Infof("metrics API listening on %s", l.Addr()) srv := &http.Server{ Handler: mux, ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout. } if err := srv.Serve(l); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Error("error serving metrics API") + log.G(context.TODO()).WithError(err).Error("error serving metrics API") } }() return nil diff --git a/cmd/dockerd/service_windows.go b/cmd/dockerd/service_windows.go index 4510d79f88..6bd63b8cfa 100644 --- a/cmd/dockerd/service_windows.go +++ b/cmd/dockerd/service_windows.go @@ -2,16 +2,18 @@ package main import ( "bytes" + "context" "errors" "fmt" "io" - "log" "os" "os/exec" "path/filepath" "time" "github.com/sirupsen/logrus" + + "github.com/containerd/containerd/log" "github.com/spf13/pflag" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" @@ -295,7 +297,7 @@ func (h *handler) started() error { } func (h *handler) stopped(err error) { - logrus.Debugf("Stopping service: %v", err) + log.G(context.TODO()).Debugf("Stopping service: %v", err) h.tosvc <- err != nil <-h.fromsvc } @@ -308,12 +310,12 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S // Wait for initialization to complete. failed := <-h.tosvc if failed { - logrus.Debug("Aborting service start due to failure during initialization") + log.G(context.TODO()).Debug("Aborting service start due to failure during initialization") return true, 1 } s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - logrus.Debug("Service running") + log.G(context.TODO()).Debug("Service running") Loop: for { select { @@ -380,7 +382,7 @@ func initPanicFile(path string) error { os.Stderr = os.NewFile(panicFile.Fd(), "/dev/stderr") // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether - log.SetOutput(os.Stderr) + logrus.SetOutput(os.Stderr) return nil } diff --git a/cmd/dockerd/trap/trap.go b/cmd/dockerd/trap/trap.go index 4d9ac521e7..ce24a2d709 100644 --- a/cmd/dockerd/trap/trap.go +++ b/cmd/dockerd/trap/trap.go @@ -1,11 +1,12 @@ package trap // import "github.com/docker/docker/cmd/dockerd/trap" import ( + "context" "os" "os/signal" "syscall" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) const ( @@ -29,7 +30,7 @@ func Trap(cleanup func()) { go func() { var interruptCount int for sig := range c { - logrus.Infof("Processing signal '%v'", sig) + log.G(context.TODO()).Infof("Processing signal '%v'", sig) if interruptCount < forceQuitCount { interruptCount++ // Initiate the cleanup only once @@ -39,7 +40,7 @@ func Trap(cleanup func()) { continue } - logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + log.G(context.TODO()).Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") os.Exit(128 + int(sig.(syscall.Signal))) } }() diff --git a/container/container.go b/container/container.go index e04696bd65..47a4af4018 100644 --- a/container/container.go +++ b/container/container.go @@ -15,6 +15,7 @@ import ( "time" "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" swarmtypes "github.com/docker/docker/api/types/swarm" @@ -41,7 +42,6 @@ import ( "github.com/moby/sys/symlink" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -317,7 +317,7 @@ func (container *Container) GetResourcePath(path string) (string, error) { // from the error being propagated all the way back to the client. This makes // debugging significantly easier and clearly indicates the error comes from the daemon. if e != nil { - logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e) + log.G(context.TODO()).Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e) } return r, e } @@ -432,7 +432,7 @@ func (container *Container) StartLogger() (logger.Logger, error) { } if !container.LocalLogCacheMeta.HaveNotifyEnabled { - logrus.WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs") + log.G(context.TODO()).WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs") container.LocalLogCacheMeta.HaveNotifyEnabled = true } info.LogPath = logPath @@ -673,7 +673,7 @@ func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { if container.StreamConfig.Stdin() == nil && !container.Config.Tty { if iop.Stdin != nil { if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("error closing stdin: %+v", err) + log.G(context.TODO()).Warnf("error closing stdin: %+v", err) } } } diff --git a/container/container_unix.go b/container/container_unix.go index ae771e2d4d..6509c0a99b 100644 --- a/container/container_unix.go +++ b/container/container_unix.go @@ -3,10 +3,12 @@ package container // import "github.com/docker/docker/container" import ( + "context" "os" "path/filepath" "syscall" + "github.com/containerd/containerd/log" "github.com/containerd/continuity/fs" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -18,7 +20,6 @@ import ( "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -64,12 +65,14 @@ func (container *Container) BuildHostnameFile() error { // NetworkMounts returns the list of network mounts. func (container *Container) NetworkMounts() []Mount { + ctx := context.TODO() + var mounts []Mount shared := container.HostConfig.NetworkMode.IsContainer() parser := volumemounts.NewParser() if container.ResolvConfPath != "" { if _, err := os.Stat(container.ResolvConfPath); err != nil { - logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + log.G(ctx).Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { @@ -87,7 +90,7 @@ func (container *Container) NetworkMounts() []Mount { } if container.HostnamePath != "" { if _, err := os.Stat(container.HostnamePath); err != nil { - logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + log.G(ctx).Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hostname"]; exists { @@ -105,7 +108,7 @@ func (container *Container) NetworkMounts() []Mount { } if container.HostsPath != "" { if _, err := os.Stat(container.HostsPath); err != nil { - logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + log.G(ctx).Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hosts"]; exists { @@ -146,7 +149,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st defer func() { if err := v.Unmount(id); err != nil { - logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + log.G(context.TODO()).Warnf("error while unmounting volume %s: %v", v.Name(), err) } }() if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) { @@ -363,13 +366,15 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi // unmounts each volume normally. // This is used from daemon/archive for `docker cp` func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + ctx := context.TODO() + networkMounts := container.NetworkMounts() mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) for _, mntPoint := range container.MountPoints { dest, err := container.GetResourcePath(mntPoint.Destination) if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) continue } mountPaths = append(mountPaths, dest) @@ -378,7 +383,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st for _, m := range networkMounts { dest, err := container.GetResourcePath(m.Destination) if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) continue } mountPaths = append(mountPaths, dest) @@ -386,7 +391,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st for _, mountPath := range mountPaths { if err := mount.Unmount(mountPath); err != nil { - logrus.WithError(err).WithField("container", container.ID). + log.G(ctx).WithError(err).WithField("container", container.ID). Warn("Unable to unmount") } } diff --git a/container/exec.go b/container/exec.go index 18e86c6a4f..13d83468e0 100644 --- a/container/exec.go +++ b/container/exec.go @@ -1,14 +1,15 @@ package container // import "github.com/docker/docker/container" import ( + "context" "runtime" "sync" "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/log" "github.com/docker/docker/container/stream" "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) // ExecConfig holds the configurations for execs. The Daemon keeps @@ -55,7 +56,7 @@ func (c *ExecConfig) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { if iop.Stdin != nil { if err := iop.Stdin.Close(); err != nil { - logrus.Errorf("error closing exec stdin: %+v", err) + log.G(context.TODO()).Errorf("error closing exec stdin: %+v", err) } } } diff --git a/container/health.go b/container/health.go index 3e93142b98..97faa28ccf 100644 --- a/container/health.go +++ b/container/health.go @@ -1,10 +1,11 @@ package container // import "github.com/docker/docker/container" import ( + "context" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" - "github.com/sirupsen/logrus" ) // Health holds the current container health-check state @@ -59,7 +60,7 @@ func (s *Health) OpenMonitorChannel() chan struct{} { defer s.mu.Unlock() if s.stop == nil { - logrus.Debug("OpenMonitorChannel") + log.G(context.TODO()).Debug("OpenMonitorChannel") s.stop = make(chan struct{}) return s.stop } @@ -72,11 +73,11 @@ func (s *Health) CloseMonitorChannel() { defer s.mu.Unlock() if s.stop != nil { - logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + log.G(context.TODO()).Debug("CloseMonitorChannel: waiting for probe to stop") close(s.stop) s.stop = nil // unhealthy when the monitor has stopped for compatibility reasons s.Health.Status = types.Unhealthy - logrus.Debug("CloseMonitorChannel done") + log.G(context.TODO()).Debug("CloseMonitorChannel done") } } diff --git a/container/monitor.go b/container/monitor.go index ff4b3439e5..1e2922f60f 100644 --- a/container/monitor.go +++ b/container/monitor.go @@ -1,9 +1,10 @@ package container // import "github.com/docker/docker/container" import ( + "context" "time" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) const ( @@ -18,7 +19,7 @@ func (container *Container) Reset(lock bool) { } if err := container.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", container.ID, err) + log.G(context.TODO()).Errorf("%s: %s", container.ID, err) } // Re-create a brand new stdin pipe once the container exited @@ -38,7 +39,7 @@ func (container *Container) Reset(lock bool) { defer timer.Stop() select { case <-timer.C: - logrus.Warn("Logger didn't exit in time: logs may be truncated") + log.G(context.TODO()).Warn("Logger didn't exit in time: logs may be truncated") case <-exit: } } diff --git a/container/stream/attach.go b/container/stream/attach.go index 0269a226b1..6480abc102 100644 --- a/container/stream/attach.go +++ b/container/stream/attach.go @@ -4,10 +4,10 @@ import ( "context" "io" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/moby/term" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -63,8 +63,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro // Connect stdin of container to the attach stdin stream. if cfg.Stdin != nil { group.Go(func() error { - logrus.Debug("attach: stdin: begin") - defer logrus.Debug("attach: stdin: end") + log.G(ctx).Debug("attach: stdin: begin") + defer log.G(ctx).Debug("attach: stdin: end") defer func() { if cfg.CloseStdin && !cfg.TTY { @@ -90,7 +90,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro err = nil } if err != nil { - logrus.WithError(err).Debug("error on attach stdin") + log.G(ctx).WithError(err).Debug("error on attach stdin") return errors.Wrap(err, "error on attach stdin") } return nil @@ -98,8 +98,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro } attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error { - logrus.Debugf("attach: %s: begin", name) - defer logrus.Debugf("attach: %s: end", name) + log.G(ctx).Debugf("attach: %s: begin", name) + defer log.G(ctx).Debugf("attach: %s: end", name) defer func() { // Make sure stdin gets closed if cfg.Stdin != nil { @@ -113,7 +113,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro err = nil } if err != nil { - logrus.WithError(err).Debugf("attach: %s", name) + log.G(ctx).WithError(err).Debugf("attach: %s", name) return errors.Wrapf(err, "error attaching %s stream", name) } return nil @@ -132,7 +132,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro errs := make(chan error, 1) go func() { - defer logrus.Debug("attach done") + defer log.G(ctx).Debug("attach done") groupErr := make(chan error, 1) go func() { groupErr <- group.Wait() diff --git a/container/stream/streams.go b/container/stream/streams.go index 83e6ded611..e7446ed116 100644 --- a/container/stream/streams.go +++ b/container/stream/streams.go @@ -8,10 +8,10 @@ import ( "sync" "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" - "github.com/sirupsen/logrus" ) // Config holds information about I/O streams managed together. @@ -116,12 +116,14 @@ func (c *Config) CloseStreams() error { // CopyToPipe connects streamconfig with a libcontainerd.IOPipe func (c *Config) CopyToPipe(iop *cio.DirectIO) { + ctx := context.TODO() + c.dio = iop copyFunc := func(w io.Writer, r io.ReadCloser) { c.wg.Add(1) go func() { if _, err := pools.Copy(w, r); err != nil { - logrus.Errorf("stream copy error: %v", err) + log.G(ctx).Errorf("stream copy error: %v", err) } r.Close() c.wg.Done() @@ -140,7 +142,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) { go func() { pools.Copy(iop.Stdin, stdin) if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("failed to close stdin: %v", err) + log.G(ctx).Warnf("failed to close stdin: %v", err) } }() } diff --git a/container/view.go b/container/view.go index 9d300cb4ce..1174a01e88 100644 --- a/container/view.go +++ b/container/view.go @@ -2,17 +2,18 @@ package container // import "github.com/docker/docker/container" import ( "bytes" + "context" "errors" "fmt" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" "github.com/docker/docker/errdefs" "github.com/docker/go-connections/nat" memdb "github.com/hashicorp/go-memdb" - "github.com/sirupsen/logrus" ) const ( @@ -387,7 +388,7 @@ func (v *View) transform(container *Container) *Snapshot { for port, bindings := range container.NetworkSettings.Ports { p, err := nat.ParsePort(port.Port()) if err != nil { - logrus.WithError(err).Warn("invalid port map") + log.G(context.TODO()).WithError(err).Warn("invalid port map") continue } if len(bindings) == 0 { @@ -400,7 +401,7 @@ func (v *View) transform(container *Container) *Snapshot { for _, binding := range bindings { h, err := nat.ParsePort(binding.HostPort) if err != nil { - logrus.WithError(err).Warn("invalid host port map") + log.G(context.TODO()).WithError(err).Warn("invalid host port map") continue } snapshot.Ports = append(snapshot.Ports, types.Port{ diff --git a/daemon/attach.go b/daemon/attach.go index b33d2ee86f..cbdf4b9e41 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -5,6 +5,7 @@ import ( "fmt" "io" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/container" "github.com/docker/docker/container/stream" @@ -13,7 +14,6 @@ import ( "github.com/docker/docker/pkg/stdcopy" "github.com/moby/term" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. @@ -115,7 +115,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach if logCreated { defer func() { if err = logDriver.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) + log.G(context.TODO()).Errorf("Error closing logger: %v", err) } }() } @@ -140,7 +140,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach cfg.Stderr.Write(msg.Line) } case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) + log.G(context.TODO()).Errorf("Error streaming logs: %v", err) break LogLoop } } @@ -156,7 +156,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach r, w := io.Pipe() go func(stdin io.ReadCloser) { defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") + defer log.G(context.TODO()).Debug("Closing buffered stdin pipe") io.Copy(w, stdin) }(cfg.Stdin) cfg.Stdin = r @@ -181,7 +181,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach if errors.Is(err, context.Canceled) || errors.As(err, &ierr) { daemon.LogContainerEvent(c, "detach") } else { - logrus.Errorf("attach failed with error: %v", err) + log.G(ctx).Errorf("attach failed with error: %v", err) } } diff --git a/daemon/cdi.go b/daemon/cdi.go index 2b80cfe1e1..cf77b978e0 100644 --- a/daemon/cdi.go +++ b/daemon/cdi.go @@ -1,14 +1,15 @@ package daemon import ( + "context" "fmt" "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/hashicorp/go-multierror" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type cdiHandler struct { @@ -20,7 +21,7 @@ type cdiHandler struct { func RegisterCDIDriver(opts ...cdi.Option) { cache, err := cdi.NewCache(opts...) if err != nil { - logrus.WithError(err).Error("CDI registry initialization failed") + log.G(context.TODO()).WithError(err).Error("CDI registry initialization failed") // We create a spec updater that always returns an error. // This error will be returned only when a CDI device is requested. // This ensures that daemon startup is not blocked by a CDI registry initialization failure. @@ -66,7 +67,7 @@ func (c *cdiHandler) injectCDIDevices(s *specs.Spec, dev *deviceInstance) error // We log the errors that may have been generated while refreshing the CDI registry. // These may be due to malformed specifications or device name conflicts that could be // the cause of an injection failure. - logrus.WithError(rerrs).Warning("Refreshing the CDI registry generated errors") + log.G(context.TODO()).WithError(rerrs).Warning("Refreshing the CDI registry generated errors") } return fmt.Errorf("CDI device injection failed: %w", err) diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go index c39001c51c..1ccfb9a4e9 100644 --- a/daemon/cluster/cluster.go +++ b/daemon/cluster/cluster.go @@ -49,6 +49,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/network" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/controllers/plugin" @@ -58,7 +59,6 @@ import ( swarmapi "github.com/moby/swarmkit/v2/api" swarmnode "github.com/moby/swarmkit/v2/node" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc" ) @@ -193,10 +193,10 @@ func (c *Cluster) Start() error { select { case <-timer.C: - logrus.Error("swarm component could not be started before timeout was reached") + log.G(context.TODO()).Error("swarm component could not be started before timeout was reached") case err := <-nr.Ready(): if err != nil { - logrus.WithError(err).Error("swarm component could not be started") + log.G(context.TODO()).WithError(err).Error("swarm component could not be started") return nil } } @@ -386,13 +386,13 @@ func (c *Cluster) Cleanup() { if err == nil { singlenode := active && isLastManager(reachable, unreachable) if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { - logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + log.G(context.TODO()).Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) } } } if err := node.Stop(); err != nil { - logrus.Errorf("failed to shut down cluster node: %v", err) + log.G(context.TODO()).Errorf("failed to shut down cluster node: %v", err) stack.Dump() } diff --git a/daemon/cluster/controllers/plugin/controller.go b/daemon/cluster/controllers/plugin/controller.go index b51cf86cd9..dd7424ef04 100644 --- a/daemon/cluster/controllers/plugin/controller.go +++ b/daemon/cluster/controllers/plugin/controller.go @@ -5,6 +5,7 @@ import ( "io" "net/http" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" @@ -61,7 +62,7 @@ func NewController(backend Backend, t *api.Task) (*Controller, error) { backend: backend, spec: spec, serviceID: t.ServiceID, - logger: logrus.WithFields(logrus.Fields{ + logger: log.G(context.TODO()).WithFields(logrus.Fields{ "controller": "plugin", "task": t.ID, "plugin": spec.Name, diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go index 2db29b6744..fb88eefe37 100644 --- a/daemon/cluster/convert/container.go +++ b/daemon/cluster/convert/container.go @@ -1,9 +1,11 @@ package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" types "github.com/docker/docker/api/types/swarm" @@ -11,7 +13,6 @@ import ( gogotypes "github.com/gogo/protobuf/types" swarmapi "github.com/moby/swarmkit/v2/api" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { @@ -168,7 +169,7 @@ func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretRef target := s.GetFile() if target == nil { // not a file target - logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + log.G(context.TODO()).Warnf("secret target not a file: secret=%s", s.SecretID) continue } refs = append(refs, &types.SecretReference{ @@ -240,7 +241,7 @@ func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigRef } } else { // not a file target - logrus.Warnf("config target not known: config=%s", s.ConfigID) + log.G(context.TODO()).Warnf("config target not known: config=%s", s.ConfigID) continue } refs = append(refs, r) diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index 28fcaaeb7f..1f01a3d176 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -95,7 +95,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { authConfig := ®istry.AuthConfig{} if encodedAuthConfig != "" { if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) + log.G(ctx).Warnf("invalid authconfig: %v", err) } } diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go index ac49aeb130..945ca3b291 100644 --- a/daemon/cluster/executor/container/container.go +++ b/daemon/cluster/executor/container/container.go @@ -1,12 +1,14 @@ package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "context" "errors" "fmt" "net" "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" enginecontainer "github.com/docker/docker/api/types/container" @@ -26,7 +28,6 @@ import ( "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/api/genericresource" "github.com/moby/swarmkit/v2/template" - "github.com/sirupsen/logrus" ) const ( @@ -594,7 +595,7 @@ func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { return nil } - logrus.Debugf("Creating service config in agent for t = %+v", c.task) + log.G(context.TODO()).Debugf("Creating service config in agent for t = %+v", c.task) svcCfg := &clustertypes.ServiceConfig{ Name: c.task.ServiceAnnotations.Name, Aliases: make(map[string][]string), diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go index 8af800aa3c..2cec252e73 100644 --- a/daemon/cluster/executor/container/executor.go +++ b/daemon/cluster/executor/container/executor.go @@ -160,7 +160,7 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error { if na == nil || na.Network == nil || len(na.Addresses) == 0 { // this should not happen, but we got a panic here and don't have a // good idea about what the underlying data structure looks like. - logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). + log.G(ctx).WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). Warnf("skipping nil or malformed node network attachment entry") continue } @@ -192,7 +192,7 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error { // same thing as above, check sanity of the attachments so we don't // get a panic. if na == nil || na.Network == nil || len(na.Addresses) == 0 { - logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). + log.G(ctx).WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). Warnf("skipping nil or malformed node network attachment entry") continue } @@ -301,7 +301,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) { var ctlr exec.Controller switch r := t.Spec.GetRuntime().(type) { case *api.TaskSpec_Generic: - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "kind": r.Generic.Kind, "type_url": r.Generic.Payload.TypeUrl, }).Debug("custom runtime requested") diff --git a/daemon/cluster/networks.go b/daemon/cluster/networks.go index 65fd9735cd..1bfbe4361e 100644 --- a/daemon/cluster/networks.go +++ b/daemon/cluster/networks.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/containerd/containerd/log" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" @@ -14,7 +15,6 @@ import ( "github.com/docker/docker/runconfig" swarmapi "github.com/moby/swarmkit/v2/api" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // GetNetworks returns all current cluster managed networks. @@ -127,7 +127,7 @@ func (c *Cluster) UpdateAttachment(target, containerID string, config *network.N return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) } if attacher.inProgress { - logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) + log.G(context.TODO()).Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) c.mu.Unlock() return nil } @@ -219,13 +219,13 @@ func (c *Cluster) AttachNetwork(target string, containerID string, addresses []s close(attachCompleteCh) c.mu.Unlock() - logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + log.G(ctx).Debugf("Successfully attached to network %s with task id %s", target, taskID) release := func() { ctx, cancel := c.getRequestContext() defer cancel() if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { - logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + log.G(ctx).Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", taskID, target, err) } } @@ -242,7 +242,7 @@ func (c *Cluster) AttachNetwork(target string, containerID string, addresses []s c.attachers[aKey].config = config c.mu.Unlock() - logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + log.G(ctx).Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) return config, nil } diff --git a/daemon/cluster/noderunner.go b/daemon/cluster/noderunner.go index 38c41e80be..11f5b565a0 100644 --- a/daemon/cluster/noderunner.go +++ b/daemon/cluster/noderunner.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/executor/container" lncluster "github.com/docker/docker/libnetwork/cluster" @@ -15,7 +16,6 @@ import ( swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator" swarmnode "github.com/moby/swarmkit/v2/node" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -231,7 +231,7 @@ func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientCo IncludeOldObject: true, }) if err != nil { - logrus.WithError(err).Error("failed to watch cluster store") + log.G(ctx).WithError(err).Error("failed to watch cluster store") return } for { @@ -240,7 +240,7 @@ func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientCo // store watch is broken errStatus, ok := status.FromError(err) if !ok || errStatus.Code() != codes.Canceled { - logrus.WithError(err).Error("failed to receive changes from store watch API") + log.G(ctx).WithError(err).Error("failed to receive changes from store watch API") } return } @@ -271,7 +271,7 @@ func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { err := detectLockedError(node.Err(context.Background())) if err != nil { - logrus.Errorf("cluster exited with error: %v", err) + log.G(context.TODO()).Errorf("cluster exited with error: %v", err) } n.mu.Lock() n.swarmNode = nil @@ -352,7 +352,7 @@ func (n *nodeRunner) enableReconnectWatcher() { if n.reconnectDelay > maxReconnectDelay { n.reconnectDelay = maxReconnectDelay } - logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + log.G(context.TODO()).Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) n.cancelReconnect = cancel diff --git a/daemon/cluster/services.go b/daemon/cluster/services.go index 12af0cbf4f..dcf8e3fe9b 100644 --- a/daemon/cluster/services.go +++ b/daemon/cluster/services.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -23,7 +24,6 @@ import ( gogotypes "github.com/gogo/protobuf/types" swarmapi "github.com/moby/swarmkit/v2/api" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc" ) @@ -234,7 +234,7 @@ func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRe authReader := strings.NewReader(encodedAuth) dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) if err := dec.Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) + log.G(ctx).Warnf("invalid authconfig: %v", err) } } @@ -245,14 +245,14 @@ func (c *Cluster) CreateService(s swarm.ServiceSpec, encodedAuth string, queryRe if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + log.G(ctx).Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) // warning in the client response should be concise resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) } else if ctnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + log.G(ctx).Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) ctnr.Image = digestImage } else { - logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + log.G(ctx).Debugf("creating service using supplied digest reference %s", ctnr.Image) } // Replace the context with a fresh one. @@ -349,7 +349,7 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swa authConfig := ®istry.AuthConfig{} if encodedAuth != "" { if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) + log.G(ctx).Warnf("invalid authconfig: %v", err) } } @@ -360,14 +360,14 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec swa if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + log.G(ctx).Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) // warning in the client response should be concise resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) } else if newCtnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + log.G(ctx).Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) newCtnr.Image = digestImage } else { - logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + log.G(ctx).Debugf("updating service using supplied digest reference %s", newCtnr.Image) } // Replace the context with a fresh one. diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go index f358f89cc9..5681f29d82 100644 --- a/daemon/cluster/swarm.go +++ b/daemon/cluster/swarm.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" types "github.com/docker/docker/api/types/swarm" @@ -18,7 +19,6 @@ import ( "github.com/moby/swarmkit/v2/manager/encryption" swarmnode "github.com/moby/swarmkit/v2/node" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc" ) @@ -87,7 +87,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) { if !found { ip, err := c.resolveSystemAddr() if err != nil { - logrus.Warnf("Could not find a local address: %v", err) + log.G(context.TODO()).Warnf("Could not find a local address: %v", err) return "", errMustSpecifyListenAddr } localAddr = ip.String() @@ -398,7 +398,7 @@ func (c *Cluster) Leave(ctx context.Context, force bool) error { } // release readers in here if err := nr.Stop(); err != nil { - logrus.Errorf("failed to shut down cluster node: %v", err) + log.G(ctx).Errorf("failed to shut down cluster node: %v", err) stack.Dump() return err } @@ -414,7 +414,7 @@ func (c *Cluster) Leave(ctx context.Context, force bool) error { } for _, id := range nodeContainers { if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { - logrus.Errorf("error removing %v: %v", id, err) + log.G(ctx).Errorf("error removing %v: %v", id, err) } } } diff --git a/daemon/config/config.go b/daemon/config/config.go index e4d6271fef..7e1cdf99a8 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -2,6 +2,7 @@ package config // import "github.com/docker/docker/daemon/config" import ( "bytes" + "context" "encoding/json" "fmt" "net" @@ -14,6 +15,7 @@ import ( "golang.org/x/text/transform" "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/containerd/containerd/log" "github.com/docker/docker/opts" "github.com/docker/docker/registry" "github.com/imdario/mergo" @@ -324,7 +326,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) { // Reload reads the configuration in the host and reloads the daemon and server. func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { - logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + log.G(context.TODO()).Infof("Got signal to reload configuration, reloading from: %s", configFile) newConfig, err := getConflictFreeConfiguration(configFile, flags) if err != nil { if flags.Changed("config-file") || !os.IsNotExist(err) { diff --git a/daemon/configs.go b/daemon/configs.go index 4fd0d2272c..63546d7e0a 100644 --- a/daemon/configs.go +++ b/daemon/configs.go @@ -1,14 +1,16 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + + "github.com/containerd/containerd/log" swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/sirupsen/logrus" ) // SetContainerConfigReferences sets the container config references needed func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { if !configsSupported() && len(refs) > 0 { - logrus.Warn("configs are not supported on this platform") + log.G(context.TODO()).Warn("configs are not supported on this platform") return nil } diff --git a/daemon/container.go b/daemon/container.go index d189670545..0e928b9b19 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -1,12 +1,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" "path/filepath" "runtime" "time" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" @@ -23,7 +25,6 @@ import ( "github.com/moby/sys/signal" "github.com/opencontainers/selinux/go-selinux" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // GetContainer looks for a container using the provided information, which could be @@ -59,7 +60,7 @@ func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, e // or consistent w.r.t. the live daemon.containers Store so // while reaching this code path may be indicative of a bug, // it is not _necessarily_ the case. - logrus.WithField("prefixOrName", prefixOrName). + log.G(context.TODO()).WithField("prefixOrName", prefixOrName). WithField("id", containerID). Debugf("daemon.GetContainer: container is known to daemon.containersReplica but not daemon.containers") return nil, containerNotFound(prefixOrName) @@ -247,7 +248,7 @@ func (daemon *Daemon) verifyContainerSettings(daemonCfg *configStore, hostConfig // Now do platform-specific verification warnings, err = verifyPlatformContainerSettings(daemon, daemonCfg, hostConfig, update) for _, w := range warnings { - logrus.Warn(w) + log.G(context.TODO()).Warn(w) } return warnings, err } diff --git a/daemon/container_operations.go b/daemon/container_operations.go index f69646e190..8ea7df60a3 100644 --- a/daemon/container_operations.go +++ b/daemon/container_operations.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "errors" "fmt" "net" @@ -9,6 +10,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" @@ -24,7 +26,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" - "github.com/sirupsen/logrus" ) func (daemon *Daemon) getDNSSearchSettings(cfg *config.Config, container *container.Container) []string { @@ -224,7 +225,7 @@ func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *contain } _, alias = path.Split(alias) - logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + log.G(context.TODO()).Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( parent.ID, alias, @@ -415,7 +416,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN if err != nil { if daemon.clusterProvider != nil { if err := daemon.clusterProvider.DetachNetwork(id, container.ID); err != nil { - logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + log.G(context.TODO()).Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) } } @@ -532,7 +533,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C // Cleanup any stale sandbox left over due to ungraceful daemon shutdown if err := controller.SandboxDestroy(container.ID); err != nil { - logrus.WithError(err).Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + log.G(context.TODO()).WithError(err).Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) } if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { @@ -780,7 +781,7 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container. defer func() { if err != nil { if e := ep.Delete(false); e != nil { - logrus.Warnf("Could not rollback container connection to network %s", idOrName) + log.G(context.TODO()).Warnf("Could not rollback container connection to network %s", idOrName) } } }() @@ -935,9 +936,9 @@ func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n li func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", network.Name(), err) + log.G(context.TODO()).Warnf("error detaching from network %s: %v", network.Name(), err) if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", network.ID(), err) + log.G(context.TODO()).Warnf("error detaching from network %s: %v", network.ID(), err) } } } @@ -1033,12 +1034,12 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) { sb, err := daemon.netController.SandboxByID(sid) if err != nil { - logrus.Warnf("error locating sandbox id %s: %v", sid, err) + log.G(context.TODO()).Warnf("error locating sandbox id %s: %v", sid, err) return } if err := sb.Delete(); err != nil { - logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + log.G(context.TODO()).Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) } for _, nw := range networks { @@ -1149,7 +1150,7 @@ func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) er sb := daemon.getNetworkSandbox(ctr) if sb == nil { // If the network sandbox is not found, then there is nothing to deactivate - logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) + log.G(context.TODO()).Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) return nil } return sb.DisableService() diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go index 961b87cd5c..2db1f7188b 100644 --- a/daemon/container_operations_unix.go +++ b/daemon/container_operations_unix.go @@ -3,12 +3,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" "path/filepath" "strconv" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/links" @@ -184,7 +186,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { for _, s := range c.SecretReferences { // TODO (ehazlett): use type switch when more are supported if s.File == nil { - logrus.Error("secret target type is not a file target") + log.G(context.TODO()).Error("secret target type is not a file target") continue } @@ -198,7 +200,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { return errors.Wrap(err, "error creating secret mount path") } - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "name": s.File.Name, "path": fPath, }).Debug("injecting secret") @@ -234,7 +236,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { // a valid type of config so we should not error when we encounter // one. if configRef.Runtime == nil { - logrus.Error("config target type is not a file or runtime target") + log.G(context.TODO()).Error("config target type is not a file or runtime target") } // However, in any case, this isn't a file config, so we have no // further work to do @@ -249,7 +251,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { return errors.Wrap(err, "error creating config mount path") } - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "name": configRef.File.Name, "path": fPath, }).Debug("injecting config") @@ -309,7 +311,7 @@ func (daemon *Daemon) remountSecretDir(c *container.Container) error { return errors.Wrap(err, "error getting container secrets path") } if err := label.Relabel(dir, c.MountLabel, false); err != nil { - logrus.WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label") + log.G(context.TODO()).WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label") } rootIDs := daemon.idMapping.RootPair() tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) @@ -325,13 +327,13 @@ func (daemon *Daemon) remountSecretDir(c *container.Container) error { func (daemon *Daemon) cleanupSecretDir(c *container.Container) { dir, err := c.SecretMountPath() if err != nil { - logrus.WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container") + log.G(context.TODO()).WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container") } if err := mount.RecursiveUnmount(dir); err != nil { - logrus.WithField("dir", dir).WithError(err).Warn("Error while attempting to unmount dir, this may prevent removal of container.") + log.G(context.TODO()).WithField("dir", dir).WithError(err).Warn("Error while attempting to unmount dir, this may prevent removal of container.") } if err := os.RemoveAll(dir); err != nil { - logrus.WithField("dir", dir).WithError(err).Error("Error removing dir.") + log.G(context.TODO()).WithField("dir", dir).WithError(err).Error("Error removing dir.") } } @@ -347,7 +349,7 @@ func killProcessDirectly(container *container.Container) error { return errdefs.System(err) } err = errNoSuchProcess{pid, syscall.SIGKILL} - logrus.WithError(err).WithField("container", container.ID).Debug("no such process") + log.G(context.TODO()).WithError(err).WithField("container", container.ID).Debug("no such process") return err } @@ -356,7 +358,7 @@ func killProcessDirectly(container *container.Container) error { // Since we can not kill a zombie pid, add zombie check here isZombie, err := process.Zombie(pid) if err != nil { - logrus.WithError(err).WithField("container", container.ID).Warn("Container state is invalid") + log.G(context.TODO()).WithError(err).WithField("container", container.ID).Warn("Container state is invalid") return err } if isZombie { diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go index 7e8324f729..f122038ed8 100644 --- a/daemon/container_operations_windows.go +++ b/daemon/container_operations_windows.go @@ -1,9 +1,11 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" @@ -22,7 +24,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { } localPath := c.ConfigsDirPath() - logrus.Debugf("configs: setting up config dir: %s", localPath) + log.G(context.TODO()).Debugf("configs: setting up config dir: %s", localPath) // create local config root if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { @@ -32,7 +34,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { defer func() { if setupErr != nil { if err := os.RemoveAll(localPath); err != nil { - logrus.Errorf("error cleaning up config dir: %s", err) + log.G(context.TODO()).Errorf("error cleaning up config dir: %s", err) } } }() @@ -48,7 +50,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { // a valid type of config so we should not error when we encounter // one. if configRef.Runtime == nil { - logrus.Error("config target type is not a file or runtime target") + log.G(context.TODO()).Error("config target type is not a file or runtime target") } // However, in any case, this isn't a file config, so we have no // further work to do @@ -59,7 +61,7 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { if err != nil { return errors.Wrap(err, "error getting config file path for container") } - log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) log.Debug("injecting config") config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) @@ -97,7 +99,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { if err != nil { return err } - logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + log.G(context.TODO()).Debugf("secrets: setting up secret dir: %s", localMountPath) // create local secret root if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { @@ -107,7 +109,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { defer func() { if setupErr != nil { if err := os.RemoveAll(localMountPath); err != nil { - logrus.Errorf("error cleaning up secret mount: %s", err) + log.G(context.TODO()).Errorf("error cleaning up secret mount: %s", err) } } }() @@ -119,7 +121,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { for _, s := range c.SecretReferences { // TODO (ehazlett): use type switch when more are supported if s.File == nil { - logrus.Error("secret target type is not a file target") + log.G(context.TODO()).Error("secret target type is not a file target") continue } @@ -129,7 +131,7 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { if err != nil { return err } - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "name": s.File.Name, "path": fPath, }).Debug("injecting secret") diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index df46b25963..418c5dbb7d 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -12,6 +12,7 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" cplatforms "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" containertypes "github.com/docker/docker/api/types/container" @@ -25,7 +26,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -113,7 +113,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima // This is unexpected - dangling image should be deleted // as soon as another image with the same target is created. // Log a warning, but don't error out the whole operation. - logrus.WithField("refs", tagged).Warn("multiple images have the same target, but one of them is still dangling") + log.G(ctx).WithField("refs", tagged).Warn("multiple images have the same target, but one of them is still dangling") } continue } @@ -122,7 +122,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima if err != nil { // This is inconsistent with `docker image ls` which will // still include the malformed name in RepoTags. - logrus.WithField("name", name).WithError(err).Error("failed to parse image name as reference") + log.G(ctx).WithField("name", name).WithError(err).Error("failed to parse image name as reference") continue } refs = append(refs, name) @@ -132,7 +132,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima // This could only happen if digest is invalid, but considering that // we get it from the Descriptor it's highly unlikely. // Log error just in case. - logrus.WithError(err).Error("failed to create digested reference") + log.G(ctx).WithError(err).Error("failed to create digested reference") continue } refs = append(refs, digested) diff --git a/daemon/containerd/image_builder.go b/daemon/containerd/image_builder.go index 3ee8aa7642..a27b102c85 100644 --- a/daemon/containerd/image_builder.go +++ b/daemon/containerd/image_builder.go @@ -22,6 +22,7 @@ import ( // "github.com/docker/docker/api/types/container" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/image" "github.com/docker/docker/builder" "github.com/docker/docker/errdefs" @@ -34,7 +35,6 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) // GetImageAndReleasableLayer returns an image and releaseable layer for a @@ -154,7 +154,7 @@ This is most likely caused by a bug in the build system that created the fetched Please notify the image author to correct the configuration.`, platforms.Format(p), platforms.Format(imgPlat), name, ) - logrus.WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.") + log.G(ctx).WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.") } } else { return nil, err @@ -257,11 +257,11 @@ func (rl *rolayer) Release() error { return nil } if err := mount.UnmountAll(rl.root, 0); err != nil { - logrus.WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer") + log.G(context.TODO()).WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer") return err } if err := os.Remove(rl.root); err != nil { - logrus.WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir") + log.G(context.TODO()).WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir") return err } rl.root = "" @@ -370,11 +370,11 @@ func (rw *rwlayer) Release() error { return nil } if err := mount.UnmountAll(rw.root, 0); err != nil { - logrus.WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer") + log.G(context.TODO()).WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer") return err } if err := os.Remove(rw.root); err != nil { - logrus.WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir") + log.G(context.TODO()).WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir") return err } rw.root = "" diff --git a/daemon/containerd/image_changes.go b/daemon/containerd/image_changes.go index 94c56cf616..8ddd3fdff1 100644 --- a/daemon/containerd/image_changes.go +++ b/daemon/containerd/image_changes.go @@ -5,13 +5,13 @@ import ( "encoding/json" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/docker/docker/container" "github.com/docker/docker/pkg/archive" "github.com/google/uuid" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) { @@ -54,7 +54,7 @@ func (i *ImageService) Changes(ctx context.Context, container *container.Contain } defer func() { if err := snapshotter.Remove(ctx, rnd.String()); err != nil { - logrus.WithError(err).WithField("key", rnd.String()).Warn("remove temporary snapshot") + log.G(ctx).WithError(err).WithField("key", rnd.String()).Warn("remove temporary snapshot") } }() diff --git a/daemon/containerd/image_children.go b/daemon/containerd/image_children.go index ba8c939549..d7cfe3369a 100644 --- a/daemon/containerd/image_children.go +++ b/daemon/containerd/image_children.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" @@ -35,7 +36,7 @@ func (i *ImageService) Children(ctx context.Context, id image.ID) ([]image.ID, e rootfs, err := platformRootfs(ctx, cs, target, platform) if err != nil { if !cerrdefs.IsNotFound(err) { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "image": target.Digest, "platform": platform, @@ -59,7 +60,7 @@ func (i *ImageService) Children(ctx context.Context, id image.ID) ([]image.ID, e rootfs, err := platformRootfs(ctx, cs, img.Target, platform) if err != nil { if !cerrdefs.IsNotFound(err) { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "image": img.Target.Digest, "platform": platform, diff --git a/daemon/containerd/image_commit.go b/daemon/containerd/image_commit.go index 56941ee586..b46e0321dc 100644 --- a/daemon/containerd/image_commit.go +++ b/daemon/containerd/image_commit.go @@ -16,6 +16,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types/backend" @@ -24,7 +25,6 @@ import ( "github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) /* @@ -122,14 +122,14 @@ func generateCommitImageConfig(baseConfig ocispec.Image, diffID digest.Digest, o arch := baseConfig.Architecture if arch == "" { arch = runtime.GOARCH - logrus.Warnf("assuming arch=%q", arch) + log.G(context.TODO()).Warnf("assuming arch=%q", arch) } os := baseConfig.OS if os == "" { os = runtime.GOOS - logrus.Warnf("assuming os=%q", os) + log.G(context.TODO()).Warnf("assuming os=%q", os) } - logrus.Debugf("generateCommitImageConfig(): arch=%q, os=%q", arch, os) + log.G(context.TODO()).Debugf("generateCommitImageConfig(): arch=%q, os=%q", arch, os) return ocispec.Image{ Platform: ocispec.Platform{ Architecture: arch, @@ -262,7 +262,7 @@ func applyDiffLayer(ctx context.Context, name string, baseImg ocispec.Image, sn // NOTE: the snapshotter should be hold by lease. Even // if the cleanup fails, the containerd gc can delete it. if err := sn.Remove(ctx, key); err != nil { - logrus.Warnf("failed to cleanup aborted apply %s: %s", key, err) + log.G(ctx).Warnf("failed to cleanup aborted apply %s: %s", key, err) } } }() diff --git a/daemon/containerd/image_delete.go b/daemon/containerd/image_delete.go index 7566cdbd9b..3907a6a85e 100644 --- a/daemon/containerd/image_delete.go +++ b/daemon/containerd/image_delete.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" @@ -14,7 +15,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) // ImageDelete deletes the image referenced by the given imageRef from this @@ -135,7 +135,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p } defer func() { if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil { - logrus.WithError(err).Warn("failed to unlease snapshots") + log.G(ctx).WithError(err).Warn("failed to unlease snapshots") } }() @@ -145,7 +145,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p if prune { parents, err = i.parents(ctx, image.ID(imgID)) if err != nil { - logrus.WithError(err).Warn("failed to get image parents") + log.G(ctx).WithError(err).Warn("failed to get image parents") } sortParentsByAffinity(parents) } @@ -168,7 +168,7 @@ func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, p } err = i.imageDeleteHelper(ctx, parent.img, &records, false) if err != nil { - logrus.WithError(err).Warn("failed to remove image parent") + log.G(ctx).WithError(err).Warn("failed to remove image parent") break } parentID := parent.img.Target.Digest.String() diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 2eec15c7b8..eeb6392d02 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -9,6 +9,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" cplatforms "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" @@ -84,14 +85,14 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea ref = reference.TagNameOnly(ref) opts = append(opts, archive.WithManifest(target, ref.String())) - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "target": target, "name": ref.String(), }).Debug("export image") } else { opts = append(opts, archive.WithManifest(target)) - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "target": target, }).Debug("export image without name") } @@ -122,7 +123,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt imgs, err := i.client.Import(ctx, inTar, opts...) if err != nil { - logrus.WithError(err).Debug("failed to import image to containerd") + log.G(ctx).WithError(err).Debug("failed to import image to containerd") return errdefs.System(err) } @@ -140,7 +141,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt } err = i.walkImageManifests(ctx, img, func(platformImg *ImageManifest) error { - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "image": name, "manifest": platformImg.Target().Digest, }) @@ -213,16 +214,16 @@ func (i *ImageService) getBestDescriptorForExport(ctx context.Context, indexDesc available, _, _, missing, err := containerdimages.Check(ctx, store, mfst, nil) if err != nil { hasMissingManifests = true - logrus.WithField("manifest", mfst.Digest).Warn("failed to check manifest's blob availability, won't export") + log.G(ctx).WithField("manifest", mfst.Digest).Warn("failed to check manifest's blob availability, won't export") continue } if available && len(missing) == 0 { presentManifests = append(presentManifests, mfst) - logrus.WithField("manifest", mfst.Digest).Debug("manifest content present, will export") + log.G(ctx).WithField("manifest", mfst.Digest).Debug("manifest content present, will export") } else { hasMissingManifests = true - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "manifest": mfst.Digest, "missing": missing, }).Debug("manifest is missing, won't export") diff --git a/daemon/containerd/image_import.go b/daemon/containerd/image_import.go index e4716642df..7738b6e203 100644 --- a/daemon/containerd/image_import.go +++ b/daemon/containerd/image_import.go @@ -12,6 +12,7 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/container" @@ -40,7 +41,7 @@ func (i *ImageService) ImportImage(ctx context.Context, ref reference.Named, pla if ref != nil { refString = ref.String() } - logger := logrus.WithField("ref", refString) + logger := log.G(ctx).WithField("ref", refString) ctx, release, err := i.client.WithLease(ctx) if err != nil { diff --git a/daemon/containerd/image_list.go b/daemon/containerd/image_list.go index 0e53af57b2..6048f03a19 100644 --- a/daemon/containerd/image_list.go +++ b/daemon/containerd/image_list.go @@ -10,6 +10,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -91,7 +92,7 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) available, err := img.CheckContentAvailable(ctx) if err != nil { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "manifest": img.Target(), "image": img.Name(), @@ -182,7 +183,7 @@ func (i *ImageService) singlePlatformImage(ctx context.Context, contentStore con rawImg := image.Metadata() target := rawImg.Target.Digest - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "name": rawImg.Name, "digest": target, }) @@ -434,7 +435,7 @@ func setupLabelFilter(store content.Store, fltrs filters.Args) (func(image image return true } if err != nil { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "image": image.Name, "checks": checks, diff --git a/daemon/containerd/image_prune.go b/daemon/containerd/image_prune.go index d4c2052a13..7864cda4a0 100644 --- a/daemon/containerd/image_prune.go +++ b/daemon/containerd/image_prune.go @@ -5,6 +5,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -79,7 +80,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu // Apply filters for name, img := range imagesToPrune { filteredOut := !filterFunc(img) - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "image": name, "filteredOut": filteredOut, }).Debug("filtering image") @@ -99,7 +100,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu // Warning: This doesn't handle truncated ids: // `docker run 124c7d2` will have Image="124c7d270790" ref, err := reference.ParseNormalizedNamed(ctr.Config.Image) - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "ctr": ctr.ID, "image": ref, "nameParseErr": err, @@ -121,7 +122,7 @@ func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFu }() for _, img := range imagesToPrune { - logrus.WithField("image", img).Debug("pruning image") + log.G(ctx).WithField("image", img).Debug("pruning image") blobs := []ocispec.Descriptor{} @@ -207,7 +208,7 @@ func (i *ImageService) unleaseSnapshotsFromDeletedConfigs(ctx context.Context, p info, err := store.Info(ctx, cfgDigest) if err != nil { if cerrdefs.IsNotFound(err) { - logrus.WithField("config", cfgDigest).Debug("config already gone") + log.G(ctx).WithField("config", cfgDigest).Debug("config already gone") } else { errs = multierror.Append(errs, err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index 8d2c873143..d8c1626335 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -7,6 +7,7 @@ import ( "github.com/containerd/containerd" cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/snapshotters" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" @@ -75,7 +76,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, return err } - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "digest": img.Target().Digest, "remote": ref.String(), }) diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index f0a38ef63b..feaa8356d8 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -11,6 +11,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" @@ -21,7 +22,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -48,7 +48,7 @@ func (i *ImageService) PushImage(ctx context.Context, targetRef reference.Named, defer func() { err := release(leasedCtx) if err != nil && !cerrdefs.IsNotFound(err) { - logrus.WithField("image", targetRef).WithError(err).Error("failed to delete lease created for push") + log.G(ctx).WithField("image", targetRef).WithError(err).Error("failed to delete lease created for push") } }() @@ -136,7 +136,7 @@ func (i *ImageService) PushImage(ctx context.Context, targetRef reference.Named, if err := containerdimages.Dispatch(ctx, appendSource, nil, target); err != nil { // Shouldn't happen, but even if it would fail, then make it only a warning // because it doesn't affect the pushed data. - logrus.WithError(err).Warn("failed to append distribution source labels to pushed content") + log.G(ctx).WithError(err).Warn("failed to append distribution source labels to pushed content") } } @@ -157,7 +157,7 @@ func findMissingMountable(ctx context.Context, store content.Store, queue *jobs, if !errdefs.IsNotFound(err) { return nil, err } - logrus.WithField("target", target).Debug("distribution source label not found") + log.G(ctx).WithField("target", target).Debug("distribution source label not found") return mountableBlobs, nil } diff --git a/daemon/containerd/image_tag.go b/daemon/containerd/image_tag.go index 24dcb20a80..79ce2895ce 100644 --- a/daemon/containerd/image_tag.go +++ b/daemon/containerd/image_tag.go @@ -5,6 +5,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" @@ -54,7 +55,7 @@ func (i *ImageService) TagImage(ctx context.Context, imageID image.ID, newTag re } } - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "imageID": imageID.String(), "tag": newTag.String(), }) diff --git a/daemon/containerd/mount.go b/daemon/containerd/mount.go index 6f36364f7c..edd63776eb 100644 --- a/daemon/containerd/mount.go +++ b/daemon/containerd/mount.go @@ -4,8 +4,8 @@ import ( "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" - "github.com/sirupsen/logrus" ) // Mount mounts the container filesystem in a temporary location, use defer imageService.Unmount @@ -22,7 +22,7 @@ func (i *ImageService) Mount(ctx context.Context, container *container.Container return fmt.Errorf("failed to mount %s: %w", root, err) } - logrus.WithField("container", container.ID).Debugf("container mounted via snapshotter: %v", root) + log.G(ctx).WithField("container", container.ID).Debugf("container mounted via snapshotter: %v", root) container.BaseFS = root return nil @@ -33,7 +33,7 @@ func (i *ImageService) Unmount(ctx context.Context, container *container.Contain root := container.BaseFS if err := i.refCountMounter.Unmount(root); err != nil { - logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container") + log.G(ctx).WithField("container", container.ID).WithError(err).Error("error unmounting container") return fmt.Errorf("failed to unmount %s: %w", root, err) } diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index 80ea23b6ac..bb8eed82a6 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -8,13 +8,13 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) type progressUpdater interface { @@ -48,7 +48,7 @@ func (j *jobs) showProgress(ctx context.Context, out progress.Output, updater pr case <-ticker.C: if err := updater.UpdateProgress(ctx, j, out, start); err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - logrus.WithError(err).Error("Updating progress failed") + log.G(ctx).WithError(err).Error("Updating progress failed") } } case <-ctx.Done(): @@ -114,7 +114,7 @@ func (p pullProgress) UpdateProgress(ctx context.Context, ongoing *jobs, out pro if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return err } - logrus.WithError(err).Error("status check failed") + log.G(ctx).WithError(err).Error("status check failed") return nil } pulling := make(map[string]content.Status, len(actives)) diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index 85f115f83a..3ed3d19625 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -6,6 +6,7 @@ import ( "errors" "net/http" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/version" @@ -63,7 +64,7 @@ func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docke return docker.WithAuthCreds(func(host string) (string, string, error) { if cfgHost != host { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "host": host, "cfgHost": cfgHost, }).Warn("Host doesn't match") diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 7699ad7109..6b9fa26afb 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -7,6 +7,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/snapshots" @@ -177,7 +178,7 @@ func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID st mfst, err := i.GetImageManifest(ctx, ctr.Config.Image, imagetypes.GetImageOpts{}) if err != nil { // Log error, don't error out whole operation. - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": containerID, }).Warn("empty ImageManifest, can't calculate base image size") diff --git a/daemon/containerfs_linux.go b/daemon/containerfs_linux.go index 747112fd15..78926afdd9 100644 --- a/daemon/containerfs_linux.go +++ b/daemon/containerfs_linux.go @@ -9,10 +9,10 @@ import ( "runtime" "strings" + "github.com/containerd/containerd/log" "github.com/hashicorp/go-multierror" "github.com/moby/sys/mount" "github.com/moby/sys/symlink" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "github.com/docker/docker/api/types" @@ -136,7 +136,7 @@ func (daemon *Daemon) openContainerFS(container *container.Container) (_ *contai if m.ReadOnlyForceRecursive { return err } else { - logrus.WithError(err).Debugf("Failed to make %q recursively read-only", dest) + log.G(context.TODO()).WithError(err).Debugf("Failed to make %q recursively read-only", dest) } } } diff --git a/daemon/create.go b/daemon/create.go index 1751cb6012..153e7e2511 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -23,7 +24,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux" "github.com/pkg/errors" - "github.com/sirupsen/logrus" archvariant "github.com/tonistiigi/go-archvariant" ) @@ -135,7 +135,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts if daemon.UsesSnapshotter() { imgManifest, err = daemon.imageService.GetImageManifest(ctx, opts.params.Config.Image, imagetypes.GetImageOpts{Platform: opts.params.Platform}) if err != nil { - logrus.WithError(err).Error("failed to find image manifest") + log.G(ctx).WithError(err).Error("failed to find image manifest") return nil, err } } @@ -171,7 +171,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts RemoveVolume: true, }) if err != nil { - logrus.WithError(err).Error("failed to cleanup container on create error") + log.G(ctx).WithError(err).Error("failed to cleanup container on create error") } } }() diff --git a/daemon/create_unix.go b/daemon/create_unix.go index 46ebc32555..5a664333e5 100644 --- a/daemon/create_unix.go +++ b/daemon/create_unix.go @@ -8,13 +8,13 @@ import ( "os" "path/filepath" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" "github.com/docker/docker/oci" volumeopts "github.com/docker/docker/volume/service/opts" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" ) // createContainerOSSpecificSettings performs host-OS specific container create functionality @@ -45,7 +45,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.HasMountFor(destination) { - logrus.WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume") + log.G(context.TODO()).WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume") // Not an error, this could easily have come from the image config. continue } @@ -85,7 +85,7 @@ func (daemon *Daemon) populateVolumes(c *container.Container) error { continue } - logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + log.G(context.TODO()).Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { return err } diff --git a/daemon/daemon.go b/daemon/daemon.go index bd20e158f0..b58a13b1e7 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -19,8 +19,11 @@ import ( "sync/atomic" "time" + "github.com/sirupsen/logrus" + "github.com/containerd/containerd" "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/containerd/remotes/docker" @@ -69,7 +72,6 @@ import ( resolverconfig "github.com/moby/buildkit/util/resolver/config" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "go.etcd.io/bbolt" "golang.org/x/sync/semaphore" "google.golang.org/grpc" @@ -258,7 +260,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { var mapLock sync.Mutex containers := make(map[string]*container.Container) - logrus.Info("Loading containers: start.") + log.G(context.TODO()).Info("Loading containers: start.") dir, err := os.ReadDir(daemon.repository) if err != nil { @@ -283,7 +285,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) defer sem.Release(1) - log := logrus.WithField("container", id) + log := log.G(context.TODO()).WithField("container", id) c, err := daemon.load(id) if err != nil { @@ -326,7 +328,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) defer sem.Release(1) - log := logrus.WithField("container", c.ID) + log := log.G(context.TODO()).WithField("container", c.ID) if err := daemon.registerName(c); err != nil { log.WithError(err).Errorf("failed to register container name: %s", c.Name) @@ -353,7 +355,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) defer sem.Release(1) - log := logrus.WithField("container", c.ID) + log := log.G(context.TODO()).WithField("container", c.ID) if err := daemon.checkpointAndSave(c); err != nil { log.WithError(err).Error("error saving backported mountspec to disk") @@ -541,7 +543,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) if err := daemon.registerLinks(c, c.HostConfig); err != nil { - logrus.WithField("container", c.ID).WithError(err).Error("failed to register link for container") + log.G(context.TODO()).WithField("container", c.ID).WithError(err).Error("failed to register link for container") } sem.Release(1) @@ -555,7 +557,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { go func(c *container.Container, chNotify chan struct{}) { _ = sem.Acquire(context.Background(), 1) - log := logrus.WithField("container", c.ID) + log := log.G(context.TODO()).WithField("container", c.ID) log.Debug("starting container") @@ -594,7 +596,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) if err := daemon.containerRm(&cfg.Config, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.WithField("container", cid).WithError(err).Error("failed to remove container") + log.G(context.TODO()).WithField("container", cid).WithError(err).Error("failed to remove container") } sem.Release(1) @@ -625,7 +627,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { _ = sem.Acquire(context.Background(), 1) if err := daemon.prepareMountPoints(c); err != nil { - logrus.WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container") + log.G(context.TODO()).WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container") } sem.Release(1) @@ -634,7 +636,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { } group.Wait() - logrus.Info("Loading containers: done.") + log.G(context.TODO()).Info("Loading containers: done.") return nil } @@ -671,7 +673,7 @@ func (daemon *Daemon) restartSwarmContainers(ctx context.Context, cfg *configSto } if err := daemon.containerStart(ctx, cfg, c, "", "", true); err != nil { - logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container") + log.G(ctx).WithField("container", c.ID).WithError(err).Error("failed to start swarm container") } sem.Release(1) @@ -697,7 +699,7 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str fullName := path.Join(parent.Name, alias) if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { if errors.Is(err, container.ErrNameReserved) { - logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + log.G(context.TODO()).Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) return nil } return err @@ -735,10 +737,10 @@ func (daemon *Daemon) DaemonLeavesCluster() { select { case <-done: case <-timeout.C: - logrus.Warn("timeout while waiting for ingress network removal") + log.G(context.TODO()).Warn("timeout while waiting for ingress network removal") } } else { - logrus.Warnf("failed to initiate ingress network removal: %v", err) + log.G(context.TODO()).Warnf("failed to initiate ingress network removal: %v", err) } daemon.attachmentStore.ClearAttachments() @@ -773,7 +775,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // Ensure that we have a correct root key limit for launching containers. if err := modifyRootKeyLimit(); err != nil { - logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + log.G(ctx).Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) } // Ensure we have compatible and valid configuration options @@ -793,7 +795,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } rootIDs := idMapping.RootPair() if err := setMayDetachMounts(); err != nil { - logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") + log.G(ctx).WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } // set up the tmpDir to use a canonical path @@ -846,7 +848,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S if err != nil { // Use a fresh context here. Passed context could be cancelled. if err := d.Shutdown(context.Background()); err != nil { - logrus.Error(err) + log.G(ctx).Error(err) } } }() @@ -872,12 +874,12 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } if err := configureMaxThreads(&configStore.Config); err != nil { - logrus.Warnf("Failed to configure golang's threads limit: %v", err) + log.G(ctx).Warnf("Failed to configure golang's threads limit: %v", err) } // ensureDefaultAppArmorProfile does nothing if apparmor is disabled if err := ensureDefaultAppArmorProfile(); err != nil { - logrus.Errorf(err.Error()) + log.G(ctx).Errorf(err.Error()) } daemonRepo := filepath.Join(configStore.Root, "containers") @@ -988,7 +990,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S if err != nil { return nil, errors.Wrap(err, "failed to set log opts") } - logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type) + log.G(ctx).Debugf("Using default logging driver %s", d.defaultLogConfig.Type) d.volumes, err = volumesservice.NewVolumeService(configStore.Root, d.PluginStore, rootIDs, d) if err != nil { @@ -1033,16 +1035,16 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S if isWindows { driverName = "windowsfilter" } else if driverName != "" { - logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) + log.G(ctx).Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) } else { driverName = configStore.GraphDriver } if d.UsesSnapshotter() { if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" { - logrus.Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.") + log.G(ctx).Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.") } - logrus.Info("Starting daemon with containerd snapshotter integration enabled") + log.G(ctx).Info("Starting daemon with containerd snapshotter integration enabled") // FIXME(thaJeztah): implement automatic snapshotter-selection similar to graph-driver selection; see https://github.com/moby/moby/issues/44076 if driverName == "" { @@ -1150,9 +1152,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // if migration is called from daemon/images. layerStore might move as well. d.imageService = images.NewImageService(imgSvcConfig) - logrus.Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads) - logrus.Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads) - logrus.Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts) + log.G(ctx).Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads) + log.G(ctx).Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads) + log.G(ctx).Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts) } go d.execCommandGC() @@ -1168,7 +1170,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S info := d.SystemInfo() for _, w := range info.Warnings { - logrus.Warn(w) + log.G(ctx).Warn(w) } engineInfo.WithValues( @@ -1185,7 +1187,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S engineCpus.Set(float64(info.NCPU)) engineMemory.Set(float64(info.MemTotal)) - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "version": dockerversion.Version, "commit": dockerversion.GitCommit, "graphdriver": d.ImageService().StorageDriver(), @@ -1265,13 +1267,13 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error { } if daemon.containers != nil { - logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout) - logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg)) + log.G(ctx).Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout) + log.G(ctx).Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg)) daemon.containers.ApplyAll(func(c *container.Container) { if !c.IsRunning() { return } - log := logrus.WithField("container", c.ID) + log := log.G(ctx).WithField("container", c.ID) log.Debug("shutting down container") if err := daemon.shutdownContainer(c); err != nil { log.WithError(err).Error("failed to shut down container") @@ -1286,19 +1288,19 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error { if daemon.volumes != nil { if err := daemon.volumes.Shutdown(); err != nil { - logrus.Errorf("Error shutting down volume store: %v", err) + log.G(ctx).Errorf("Error shutting down volume store: %v", err) } } if daemon.imageService != nil { if err := daemon.imageService.Cleanup(); err != nil { - logrus.Error(err) + log.G(ctx).Error(err) } } // If we are part of a cluster, clean up cluster's stuff if daemon.clusterProvider != nil { - logrus.Debugf("start clean shutdown of cluster resources...") + log.G(ctx).Debugf("start clean shutdown of cluster resources...") daemon.DaemonLeavesCluster() } @@ -1368,13 +1370,13 @@ func prepareTempDir(rootDir string) (string, error) { if err := os.Rename(tmpDir, newName); err == nil { go func() { if err := os.RemoveAll(newName); err != nil { - logrus.Warnf("failed to delete old tmp directory: %s", newName) + log.G(context.TODO()).Warnf("failed to delete old tmp directory: %s", newName) } }() } else if !os.IsNotExist(err) { - logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) + log.G(context.TODO()).Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) if err := os.RemoveAll(tmpDir); err != nil { - logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) + log.G(context.TODO()).Warnf("failed to delete old tmp directory: %s", tmpDir) } } } diff --git a/daemon/daemon_linux.go b/daemon/daemon_linux.go index f046ebee18..f2fec32f21 100644 --- a/daemon/daemon_linux.go +++ b/daemon/daemon_linux.go @@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( "bufio" + "context" "fmt" "io" "net" @@ -10,13 +11,13 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/resolvconf" "github.com/moby/sys/mount" "github.com/moby/sys/mountinfo" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) @@ -30,7 +31,7 @@ func getPluginExecRoot(_ *config.Config) string { } func (daemon *Daemon) cleanupMountsByID(id string) error { - logrus.Debugf("Cleaning up old mountid %s: start.", id) + log.G(context.TODO()).Debugf("Cleaning up old mountid %s: start.", id) f, err := os.Open("/proc/self/mountinfo") if err != nil { return err @@ -54,7 +55,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u for _, p := range regexps { if p.MatchString(mnt) { if err := unmount(mnt); err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) errs = append(errs, err.Error()) } } @@ -71,7 +72,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errs, "\n")) } - logrus.Debugf("Cleaning up old mountid %v: done.", id) + log.G(context.TODO()).Debugf("Cleaning up old mountid %v: done.", id) return nil } @@ -105,7 +106,7 @@ func (daemon *Daemon) cleanupMounts(cfg *config.Config) error { return nil } - logrus.WithField("mountpoint", daemon.root).Debug("unmounting daemon root") + log.G(context.TODO()).WithField("mountpoint", daemon.root).Debug("unmounting daemon root") if err := mount.Unmount(daemon.root); err != nil { return err } @@ -214,7 +215,7 @@ func kernelSupportsRecursivelyReadOnly() error { } } if umErr != nil { - logrus.WithError(umErr).Warnf("Failed to unmount %q", tmpMnt) + log.G(context.TODO()).WithError(umErr).Warnf("Failed to unmount %q", tmpMnt) } }() attr := &unix.MountAttr{ diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index f1fb95a1b0..113f5684f7 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -18,6 +18,7 @@ import ( "time" "github.com/containerd/cgroups/v3" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" @@ -45,7 +46,6 @@ import ( "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) @@ -222,7 +222,7 @@ func parseSecurityOpt(securityOptions *container.SecurityOptions, config *contai k, v, ok = strings.Cut(opt, "=") } else if strings.Contains(opt, ":") { k, v, ok = strings.Cut(opt, ":") - logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") + log.G(context.TODO()).Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } if !ok { return fmt.Errorf("invalid --security-opt 1: %q", opt) @@ -284,7 +284,7 @@ func adjustParallelLimit(n int, limit int) int { // ulimits to the largest possible value for dockerd). var rlim unix.Rlimit if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { - logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) + log.G(context.TODO()).Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err) return limit } softRlimit := int(rlim.Cur) @@ -299,7 +299,7 @@ func adjustParallelLimit(n int, limit int) int { return limit } - logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) + log.G(context.TODO()).Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit) return softRlimit / overhead } @@ -309,10 +309,10 @@ func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfi if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + log.G(context.TODO()).Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + log.G(context.TODO()).Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } @@ -781,7 +781,7 @@ func configureMaxThreads(config *config.Config) error { } maxThreads := (mtint / 100) * 90 debug.SetMaxThreads(maxThreads) - logrus.Debugf("Golang's threads limit set to %d", maxThreads) + log.G(context.TODO()).Debugf("Golang's threads limit set to %d", maxThreads) return nil } @@ -809,7 +809,7 @@ func overlaySupportsSelinux() (bool, error) { func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinux.GetEnabled() { - logrus.Warn("Docker could not enable SELinux on the host system") + log.G(context.TODO()).Warn("Docker could not enable SELinux on the host system") return nil } @@ -822,7 +822,7 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er } if !supported { - logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) + log.G(context.TODO()).Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) } } } else { @@ -846,7 +846,7 @@ func (daemon *Daemon) initNetworkController(cfg *config.Config, activeSandboxes } if len(activeSandboxes) > 0 { - logrus.Info("there are running containers, updated network configuration will not take affect") + log.G(context.TODO()).Info("there are running containers, updated network configuration will not take affect") } else if err := configureNetworking(daemon.netController, cfg); err != nil { return err } @@ -983,7 +983,7 @@ func initBridgeDriver(controller *libnetwork.Controller, config *config.Config) ipamV4Conf.PreferredPool = ipNet.String() ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { - logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + log.G(context.TODO()).Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.BridgeConfig.FixedCIDR != "" { @@ -1068,7 +1068,7 @@ func initBridgeDriver(controller *libnetwork.Controller, config *config.Config) func removeDefaultBridgeInterface() { if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { if err := netlink.LinkDel(lnk); err != nil { - logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + log.G(context.TODO()).Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) } } } @@ -1187,10 +1187,10 @@ func setupRemappedRoot(config *config.Config) (idtools.IdentityMapping, error) { if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively - logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + log.G(context.TODO()).Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return idtools.IdentityMapping{}, nil } - logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) + log.G(context.TODO()).Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) @@ -1235,7 +1235,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID)) - logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + log.G(context.TODO()).Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) @@ -1257,7 +1257,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools } if err := setupDaemonRootPropagation(config); err != nil { - logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") + log.G(context.TODO()).WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") } return nil } @@ -1303,7 +1303,7 @@ func setupDaemonRootPropagation(cfg *config.Config) error { return } if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") + log.G(context.TODO()).WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") } }() @@ -1427,7 +1427,7 @@ func setMayDetachMounts() error { // unprivileged container. Ignore the error, but log // it if we appear not to be in that situation. if !userns.RunningInUserNS() { - logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") + log.G(context.TODO()).Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") } return nil } diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go index 40e73d13b1..5800deac78 100644 --- a/daemon/daemon_windows.go +++ b/daemon/daemon_windows.go @@ -28,7 +28,7 @@ import ( "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) @@ -274,7 +274,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand err = v.Delete() if err != nil { - logrus.Errorf("Error occurred when removing network %v", err) + log.G(context.TODO()).Errorf("Error occurred when removing network %v", err) } _, err := daemon.netController.NewNetwork("nat", name, id, @@ -284,7 +284,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { - logrus.Errorf("Error occurred when creating network %v", err) + log.G(context.TODO()).Errorf("Error occurred when creating network %v", err) } continue } @@ -293,7 +293,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { - logrus.Errorf("Error occurred when removing network %v", err) + log.G(context.TODO()).Errorf("Error occurred when removing network %v", err) } } } @@ -392,7 +392,7 @@ func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSand ) if err != nil { - logrus.Errorf("Error occurred when creating network %v", err) + log.G(context.TODO()).Errorf("Error occurred when creating network %v", err) } } @@ -544,7 +544,7 @@ func (daemon *Daemon) setDefaultIsolation(config *config.Config) error { } } - logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + log.G(context.TODO()).Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } diff --git a/daemon/debugtrap_unix.go b/daemon/debugtrap_unix.go index 467e5118a2..35d3e5a554 100644 --- a/daemon/debugtrap_unix.go +++ b/daemon/debugtrap_unix.go @@ -3,11 +3,12 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "os" "os/signal" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/stack" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -18,9 +19,9 @@ func (daemon *Daemon) setupDumpStackTrap(root string) { for range c { path, err := stack.DumpToFile(root) if err != nil { - logrus.WithError(err).Error("failed to write goroutines dump") + log.G(context.TODO()).WithError(err).Error("failed to write goroutines dump") } else { - logrus.Infof("goroutine stacks written to %s", path) + log.G(context.TODO()).Infof("goroutine stacks written to %s", path) } } }() diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go index 56e505e49b..64eba67975 100644 --- a/daemon/debugtrap_windows.go +++ b/daemon/debugtrap_windows.go @@ -1,12 +1,13 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" "unsafe" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/stack" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -18,7 +19,7 @@ func (daemon *Daemon) setupDumpStackTrap(root string) { ev, _ := windows.UTF16PtrFromString(event) sd, err := windows.SecurityDescriptorFromString("D:P(A;;GA;;;BA)(A;;GA;;;SY)") if err != nil { - logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) + log.G(context.TODO()).Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) return } var sa windows.SecurityAttributes @@ -27,18 +28,18 @@ func (daemon *Daemon) setupDumpStackTrap(root string) { sa.SecurityDescriptor = sd h, err := windows.CreateEvent(&sa, 0, 0, ev) if h == 0 || err != nil { - logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) + log.G(context.TODO()).Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) return } go func() { - logrus.Debugf("Stackdump - waiting signal at %s", event) + log.G(context.TODO()).Debugf("Stackdump - waiting signal at %s", event) for { windows.WaitForSingleObject(h, windows.INFINITE) path, err := stack.DumpToFile(root) if err != nil { - logrus.WithError(err).Error("failed to write goroutines dump") + log.G(context.TODO()).WithError(err).Error("failed to write goroutines dump") } else { - logrus.Infof("goroutine stacks written to %s", path) + log.G(context.TODO()).Infof("goroutine stacks written to %s", path) } } }() diff --git a/daemon/delete.go b/daemon/delete.go index 6bb7fdf03e..5a230d13c5 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -9,6 +9,7 @@ import ( "time" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" @@ -17,7 +18,6 @@ import ( "github.com/docker/docker/pkg/containerfs" "github.com/opencontainers/selinux/go-selinux" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ContainerRm removes the container id from the filesystem. An error @@ -77,7 +77,7 @@ func (daemon *Daemon) rmLink(cfg *config.Config, container *container.Container, if parentContainer != nil { daemon.linkIndex.unlink(name, container, parentContainer) if err := daemon.updateNetwork(cfg, parentContainer); err != nil { - logrus.Debugf("Could not update network to remove link %s: %v", n, err) + log.G(context.TODO()).Debugf("Could not update network to remove link %s: %v", n, err) } } return nil @@ -129,7 +129,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { - logrus.Errorf("Error saving dying container to disk: %v", err) + log.G(context.TODO()).Errorf("Error saving dying container to disk: %v", err) } container.Unlock() @@ -173,7 +173,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty daemon.containers.Delete(container.ID) daemon.containersReplica.Delete(container) if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) } for _, name := range linkNames { daemon.releaseName(name) diff --git a/daemon/events.go b/daemon/events.go index 1812c0eebf..684ddff949 100644 --- a/daemon/events.go +++ b/daemon/events.go @@ -6,6 +6,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/container" @@ -13,7 +14,6 @@ import ( "github.com/docker/docker/libnetwork" gogotypes "github.com/gogo/protobuf/types" swarmapi "github.com/moby/swarmkit/v2/api" - "github.com/sirupsen/logrus" ) var ( @@ -127,7 +127,7 @@ func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStre return case message, ok := <-watchStream: if !ok { - logrus.Debug("cluster event channel has stopped") + log.G(ctx).Debug("cluster event channel has stopped") return } daemon.generateClusterEvent(message) @@ -138,7 +138,7 @@ func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStre func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { for _, event := range msg.Events { if event.Object == nil { - logrus.Errorf("event without object: %v", event) + log.G(context.TODO()).Errorf("event without object: %v", event) continue } switch v := event.Object.GetObject().(type) { @@ -153,7 +153,7 @@ func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { case *swarmapi.Object_Config: daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig()) default: - logrus.Warnf("unrecognized event: %v", event) + log.G(context.TODO()).Warnf("unrecognized event: %v", event) } } } @@ -245,7 +245,7 @@ func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service * } } else { // This should not happen. - logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + log.G(context.TODO()).Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) } } // check replicated count change @@ -259,7 +259,7 @@ func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service * } } else { // This should not happen. - logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + log.G(context.TODO()).Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) } } if service.UpdateStatus != nil { diff --git a/daemon/exec.go b/daemon/exec.go index f458ae7f50..e62b67bd37 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -10,6 +10,7 @@ import ( "time" "github.com/containerd/containerd" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" @@ -21,7 +22,6 @@ import ( "github.com/moby/term" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func (daemon *Daemon) registerExecCommand(container *container.Container, config *container.ExecConfig) { @@ -174,7 +174,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio ec.Running = true ec.Unlock() - logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) + log.G(ctx).Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) attributes := map[string]string{ "execID": ec.ID, } @@ -188,7 +188,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio exitCode := 126 ec.ExitCode = &exitCode if err := ec.CloseStreams(); err != nil { - logrus.Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err) + log.G(ctx).Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err) } ec.Unlock() } @@ -198,7 +198,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio r, w := io.Pipe() go func() { defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") + defer log.G(ctx).Debug("Closing buffered stdin pipe") pools.Copy(w, options.Stdin) }() cStdin = r @@ -295,7 +295,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio select { case <-ctx.Done(): - log := logrus. + log := log.G(ctx). WithField("container", ec.Container.ID). WithField("exec", ec.ID) log.Debug("Sending KILL signal to container process") @@ -339,7 +339,7 @@ func (daemon *Daemon) execCommandGC() { } } if cleaned > 0 { - logrus.Debugf("clean %d unused exec commands", cleaned) + log.G(context.TODO()).Debugf("clean %d unused exec commands", cleaned) } } } diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index fe349bcaaa..1b82b01c1a 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -23,6 +23,7 @@ static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btr import "C" import ( + "context" "fmt" "math" "os" @@ -33,6 +34,7 @@ import ( "sync" "unsafe" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/containerfs" @@ -42,7 +44,6 @@ import ( "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -305,10 +306,10 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { - logrus.WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + log.G(context.TODO()).WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) } } else { - logrus.WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + log.G(context.TODO()).WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) } } diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 37ddb9dde7..ce64fb1ddc 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -1,17 +1,18 @@ package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( + "context" "fmt" "io" "os" "path/filepath" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" ) @@ -168,7 +169,7 @@ func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Drive if err == nil { return pluginDriver, nil } - logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + log.G(context.TODO()).WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") return nil, ErrNotSupported } @@ -177,7 +178,7 @@ func getBuiltinDriver(name, home string, options []string, idMap idtools.Identit if initFunc, exists := drivers[name]; exists { return initFunc(filepath.Join(home, name), options, idMap) } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + log.G(context.TODO()).Errorf("Failed to built-in GetDriver graph %s %s", name, home) return nil, ErrNotSupported } @@ -191,8 +192,9 @@ type Options struct { // New creates the driver and initializes it at the specified root. func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + ctx := context.TODO() if name != "" { - logrus.Infof("[graphdriver] trying configured driver: %s", name) + log.G(ctx).Infof("[graphdriver] trying configured driver: %s", name) if err := checkRemoved(name); err != nil { return nil, err } @@ -202,7 +204,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // Guess for prior driver driversMap := scanPriorDrivers(config.Root) priorityList := strings.Split(priority, ",") - logrus.Debugf("[graphdriver] priority list: %v", priorityList) + log.G(ctx).Debugf("[graphdriver] priority list: %v", priorityList) for _, name := range priorityList { if _, prior := driversMap[name]; prior { // of the state found from prior drivers, check in order of our priority @@ -213,7 +215,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // state, and now it is no longer supported/prereq/compatible, so // something changed and needs attention. Otherwise the daemon's // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + log.G(ctx).Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) return nil, err } @@ -226,11 +228,11 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err } err = errors.Errorf("%s contains several valid graphdrivers: %s; cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) - logrus.Errorf("[graphdriver] %v", err) + log.G(ctx).Errorf("[graphdriver] %v", err) return nil, err } - logrus.Infof("[graphdriver] using prior storage driver: %s", name) + log.G(ctx).Infof("[graphdriver] using prior storage driver: %s", name) return driver, nil } } diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index 51dc0aa1b8..a43c5203b0 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -1,14 +1,15 @@ package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( + "context" "io" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" ) var ( @@ -143,11 +144,11 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i layerFs := layerRootFs options := &archive.TarOptions{IDMap: gdw.IDMap, BestEffortXattrs: gdw.BestEffortXattrs} start := time.Now().UTC() - logrus.WithField("id", id).Debug("Start untar layer") + log.G(context.TODO()).WithField("id", id).Debug("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { return } - logrus.WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + log.G(context.TODO()).WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) return } diff --git a/daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go b/daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go index 431b1e1198..176fd1bb8e 100644 --- a/daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go +++ b/daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go @@ -13,6 +13,7 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" @@ -26,7 +27,6 @@ import ( "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -66,7 +66,7 @@ type Driver struct { } var ( - logger = logrus.WithField("storage-driver", driverName) + logger = log.G(context.TODO()).WithField("storage-driver", driverName) ) func init() { @@ -502,7 +502,7 @@ func fusermountU(mountpoint string) (unmounted bool) { for _, v := range []string{"fusermount3", "fusermount"} { err := exec.Command(v, "-u", mountpoint).Run() if err != nil && !os.IsNotExist(err) { - logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) + log.G(context.TODO()).Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) } if err == nil { unmounted = true @@ -515,7 +515,7 @@ func fusermountU(mountpoint string) (unmounted bool) { fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) if err == nil { if err := unix.Syncfs(fd); err != nil { - logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) + log.G(context.TODO()).Debugf("Error Syncfs(%s) - %v", mountpoint, err) } unix.Close(fd) } diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go index cfc93cb113..663625fb84 100644 --- a/daemon/graphdriver/overlay2/overlay.go +++ b/daemon/graphdriver/overlay2/overlay.go @@ -14,6 +14,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/containerd/continuity/fs" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" @@ -28,7 +29,6 @@ import ( "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -102,7 +102,7 @@ type Driver struct { } var ( - logger = logrus.WithField("storage-driver", "overlay2") + logger = log.G(context.TODO()).WithField("storage-driver", "overlay2") backingFs = "" projectQuotaSupported = false diff --git a/daemon/graphdriver/overlayutils/overlayutils.go b/daemon/graphdriver/overlayutils/overlayutils.go index 85eb96016a..4235be3d6e 100644 --- a/daemon/graphdriver/overlayutils/overlayutils.go +++ b/daemon/graphdriver/overlayutils/overlayutils.go @@ -4,15 +4,16 @@ package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils" import ( + "context" "fmt" "os" "path" "path/filepath" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/daemon/graphdriver" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -54,7 +55,7 @@ func SupportsOverlay(d string, checkMultipleLowers bool) error { } defer func() { if err := os.RemoveAll(td); err != nil { - logrus.Warnf("Failed to remove check directory %v: %v", td, err) + log.G(context.TODO()).Warnf("Failed to remove check directory %v: %v", td, err) } }() @@ -74,7 +75,7 @@ func SupportsOverlay(d string, checkMultipleLowers bool) error { return errors.Wrap(err, "failed to mount overlay") } if err := unix.Unmount(mnt, 0); err != nil { - logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err) + log.G(context.TODO()).Warnf("Failed to unmount check directory %v: %v", mnt, err) } return nil } diff --git a/daemon/graphdriver/overlayutils/userxattr.go b/daemon/graphdriver/overlayutils/userxattr.go index f5176c450d..173007cda9 100644 --- a/daemon/graphdriver/overlayutils/userxattr.go +++ b/daemon/graphdriver/overlayutils/userxattr.go @@ -21,14 +21,15 @@ package overlayutils import ( + "context" "fmt" "os" "path/filepath" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/parsers/kernel" - "github.com/sirupsen/logrus" ) // NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. @@ -67,7 +68,7 @@ func NeedsUserXAttr(d string) (bool, error) { tdRoot := filepath.Join(d, "userxattr-check") if err := os.RemoveAll(tdRoot); err != nil { - logrus.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) + log.G(context.TODO()).WithError(err).Warnf("Failed to remove check directory %v", tdRoot) } if err := os.MkdirAll(tdRoot, 0700); err != nil { @@ -76,7 +77,7 @@ func NeedsUserXAttr(d string) (bool, error) { defer func() { if err := os.RemoveAll(tdRoot); err != nil { - logrus.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) + log.G(context.TODO()).WithError(err).Warnf("Failed to remove check directory %v", tdRoot) } }() @@ -106,11 +107,11 @@ func NeedsUserXAttr(d string) (bool, error) { if err := m.Mount(dest); err != nil { // Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch. // Return false without error. - logrus.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr") + log.G(context.TODO()).WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr") return false, nil } if err := mount.UnmountAll(dest, 0); err != nil { - logrus.WithError(err).Warnf("Failed to unmount check directory %v", dest) + log.G(context.TODO()).WithError(err).Warnf("Failed to unmount check directory %v", dest) } return true, nil } diff --git a/daemon/graphdriver/vfs/quota_linux.go b/daemon/graphdriver/vfs/quota_linux.go index 0a5caa754b..d344bafc3d 100644 --- a/daemon/graphdriver/vfs/quota_linux.go +++ b/daemon/graphdriver/vfs/quota_linux.go @@ -1,8 +1,10 @@ package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" import ( + "context" + + "github.com/containerd/containerd/log" "github.com/docker/docker/quota" - "github.com/sirupsen/logrus" ) type driverQuota struct { @@ -14,7 +16,7 @@ func setupDriverQuota(driver *Driver) { if quotaCtl, err := quota.NewControl(driver.home); err == nil { driver.quotaCtl = quotaCtl } else if err != quota.ErrQuotaNotSupported { - logrus.Warnf("Unable to setup quota: %v\n", err) + log.G(context.TODO()).Warnf("Unable to setup quota: %v\n", err) } } diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go index 835af9d718..4090f8af0f 100644 --- a/daemon/graphdriver/windows/windows.go +++ b/daemon/graphdriver/windows/windows.go @@ -7,6 +7,7 @@ import ( "archive/tar" "bufio" "bytes" + "context" "encoding/json" "fmt" "io" @@ -24,6 +25,7 @@ import ( "github.com/Microsoft/go-winio/vhd" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" @@ -33,7 +35,6 @@ import ( "github.com/docker/docker/pkg/system" units "github.com/docker/go-units" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -65,7 +66,7 @@ func init() { // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes // debugging issues in the re-exec codepath significantly easier. if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { - logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + log.G(context.TODO()).Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") noreexec = true } else { reexec.Register("docker-windows-write-layer", writeLayerReexec) @@ -97,7 +98,7 @@ type Driver struct { // InitFilter returns a new Windows storage filter driver. func InitFilter(home string, options []string, _ idtools.IdentityMapping) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + log.G(context.TODO()).Debugf("WindowsGraphDriver InitFilter at %s", home) fsType, err := winiofs.GetFileSystemType(home) if err != nil { @@ -242,14 +243,14 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt if _, err := os.Lstat(d.dir(parent)); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + log.G(context.TODO()).Warnf("Failed to DestroyLayer %s: %s", id, err2) } return errors.Wrapf(err, "cannot create layer with missing parent %s", parent) } if err := d.setLayerChain(id, layerChain); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + log.G(context.TODO()).Warnf("Failed to DestroyLayer %s: %s", id, err2) } return err } @@ -352,7 +353,7 @@ func (d *Driver) Remove(id string) error { } } if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { - logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + log.G(context.TODO()).Errorf("Failed to DestroyLayer %s: %s", id, err) } return nil @@ -365,7 +366,7 @@ func (d *Driver) GetLayerPath(id string) (string, error) { // Get returns the rootfs path for the id. This will mount the dir at its given path. func (d *Driver) Get(id, mountLabel string) (string, error) { - logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + log.G(context.TODO()).Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string rID, err := d.resolveID(id) @@ -390,7 +391,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { d.ctr.Decrement(rID) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) + log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", id, err) } return "", err } @@ -399,10 +400,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err != nil { d.ctr.Decrement(rID) if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - logrus.Warnf("Failed to Unprepare %s: %s", id, err) + log.G(context.TODO()).Warnf("Failed to Unprepare %s: %s", id, err) } if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) + log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", id, err) } return "", err } @@ -423,7 +424,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put adds a new layer to the driver. func (d *Driver) Put(id string) error { - logrus.Debugf("WindowsGraphDriver Put() id %s", id) + log.G(context.TODO()).Debugf("WindowsGraphDriver Put() id %s", id) rID, err := d.resolveID(id) if err != nil { @@ -467,9 +468,9 @@ func (d *Driver) Cleanup() error { for _, item := range items { if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { - logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + log.G(context.TODO()).Warnf("Failed to cleanup %s: %s", item.Name(), err) } else { - logrus.Infof("Cleaned up %s", item.Name()) + log.G(context.TODO()).Infof("Cleaned up %s", item.Name()) } } } @@ -497,7 +498,7 @@ func (d *Driver) Diff(id, _ string) (_ io.ReadCloser, err error) { } prepare := func() { if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + log.G(context.TODO()).Warnf("Failed to Deactivate %s: %s", rID, err) } } @@ -531,7 +532,7 @@ func (d *Driver) Changes(id, _ string) ([]archive.Change, error) { } defer func() { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + log.G(context.TODO()).Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) } }() diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 9aa0ee3911..c1c426572d 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -3,6 +3,7 @@ package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( + "context" "fmt" "os" "os/exec" @@ -12,6 +13,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" @@ -21,7 +23,6 @@ import ( "github.com/moby/sys/mountinfo" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -39,7 +40,7 @@ type Logger struct{} // Log wraps log message from ZFS driver with a prefix '[zfs]'. func (*Logger) Log(cmd []string) { - logrus.WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " ")) + log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " ")) } // Init returns a new ZFS driver. @@ -48,7 +49,7 @@ func (*Logger) Log(cmd []string) { func Init(base string, opt []string, idMap idtools.IdentityMapping) (graphdriver.Driver, error) { var err error - logger := logrus.WithField("storage-driver", "zfs") + logger := log.G(context.TODO()).WithField("storage-driver", "zfs") if _, err := exec.LookPath("zfs"); err != nil { logger.Debugf("zfs command is not available: %v", err) @@ -155,7 +156,7 @@ func lookupZfsDataset(rootdir string) (string, error) { } for _, m := range mounts { if err := unix.Stat(m.Mountpoint, &stat); err != nil { - logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } @@ -372,10 +373,10 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { if retErr != nil { if c := d.ctr.Decrement(mountpoint); c <= 0 { if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { - logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + log.G(context.TODO()).WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) } if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) } } } @@ -383,7 +384,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { filesystem := d.zfsPath(id) options := label.FormatMountLabel("", mountLabel) - logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + log.G(context.TODO()).WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options) root := d.idMap.RootPair() // Create the target directories if they don't exist @@ -413,7 +414,7 @@ func (d *Driver) Put(id string) error { return nil } - logger := logrus.WithField("storage-driver", "zfs") + logger := log.G(context.TODO()).WithField("storage-driver", "zfs") logger.Debugf(`unmount("%s")`, mountpoint) diff --git a/daemon/graphdriver/zfs/zfs_freebsd.go b/daemon/graphdriver/zfs/zfs_freebsd.go index 5d86dc3a97..6f2269bed9 100644 --- a/daemon/graphdriver/zfs/zfs_freebsd.go +++ b/daemon/graphdriver/zfs/zfs_freebsd.go @@ -4,7 +4,7 @@ import ( "strings" "github.com/docker/docker/daemon/graphdriver" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "golang.org/x/sys/unix" ) @@ -16,7 +16,7 @@ func checkRootdirFs(rootdir string) error { // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) + log.G(ctx).WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) return graphdriver.ErrPrerequisites } diff --git a/daemon/graphdriver/zfs/zfs_linux.go b/daemon/graphdriver/zfs/zfs_linux.go index 589ecbd179..faf1d65e8b 100644 --- a/daemon/graphdriver/zfs/zfs_linux.go +++ b/daemon/graphdriver/zfs/zfs_linux.go @@ -1,8 +1,10 @@ package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( + "context" + + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/graphdriver" - "github.com/sirupsen/logrus" ) func checkRootdirFs(rootDir string) error { @@ -16,7 +18,7 @@ func checkRootdirFs(rootDir string) error { } if fsMagic != graphdriver.FsMagicZfs { - logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") + log.G(context.TODO()).WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") return graphdriver.ErrPrerequisites } diff --git a/daemon/health.go b/daemon/health.go index e9efebed63..914118fefc 100644 --- a/daemon/health.go +++ b/daemon/health.go @@ -9,11 +9,11 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" - "github.com/sirupsen/logrus" ) const ( @@ -130,7 +130,7 @@ func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container select { case <-tm.C: cancelProbe() - logrus.WithContext(ctx).Debugf("Health check for container %s taking too long", cntr.ID) + log.G(ctx).WithContext(ctx).Debugf("Health check for container %s taking too long", cntr.ID) // Wait for probe to exit (it might take some time to call containerd to kill // the process and we don't want dying probes to pile up). <-execErr @@ -235,7 +235,7 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch if err := c.CheckpointTo(d.containersReplica); err != nil { // queries will be inconsistent until the next probe runs or other state mutations // checkpoint the container - logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) + log.G(context.TODO()).Errorf("Error replicating health state for container %s: %v", c.ID, err) } current := h.Status() @@ -257,10 +257,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) select { case <-stop: - logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + log.G(context.TODO()).Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) return case <-intervalTimer.C: - logrus.Debugf("Running health check for container %s ...", c.ID) + log.G(context.TODO()).Debugf("Running health check for container %s ...", c.ID) startTime := time.Now() ctx, cancelProbe := context.WithCancel(context.Background()) results := make(chan *types.HealthcheckResult, 1) @@ -269,7 +269,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) result, err := probe.run(ctx, d, c) if err != nil { healthChecksFailedCounter.Inc() - logrus.Warnf("Health check for container %s error: %v", c.ID, err) + log.G(ctx).Warnf("Health check for container %s error: %v", c.ID, err) results <- &types.HealthcheckResult{ ExitCode: -1, Output: err.Error(), @@ -278,14 +278,14 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) } } else { result.Start = startTime - logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + log.G(ctx).Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) results <- result } close(results) }() select { case <-stop: - logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + log.G(ctx).Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) cancelProbe() // Wait for probe to exit (it might take a while to respond to the TERM // signal and we don't want dying probes to pile up). @@ -314,7 +314,7 @@ func getProbe(c *container.Container) probe { case "NONE": return nil default: - logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + log.G(context.TODO()).Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) return nil } } diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 42b8dbd0f9..29e96af1e2 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -3,11 +3,11 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "github.com/containerd/containerd/log" imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/builder" "github.com/docker/docker/image/cache" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // MakeImageCache creates a stateful image cache. @@ -24,7 +24,7 @@ func (i *ImageService) MakeImageCache(ctx context.Context, sourceRefs []string) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return nil, err } - logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + log.G(ctx).Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) continue } cache.Populate(img) diff --git a/daemon/images/image.go b/daemon/images/image.go index a773bd0cd2..bb975831a1 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -10,6 +10,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" imagetypes "github.com/docker/docker/api/types/image" @@ -19,7 +20,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. @@ -52,7 +52,7 @@ func (i *ImageService) PrepareSnapshot(ctx context.Context, id string, image str } func (i *ImageService) manifestMatchesPlatform(ctx context.Context, img *image.Image, platform ocispec.Platform) (bool, error) { - logger := logrus.WithField("image", img.ID).WithField("desiredPlatform", platforms.Format(platform)) + logger := log.G(ctx).WithField("image", img.ID).WithField("desiredPlatform", platforms.Format(platform)) ls, leaseErr := i.leases.ListResources(ctx, leases.Lease{ID: imageKey(img.ID().String())}) if leaseErr != nil { diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index 9569651ef9..92a7b3c6e7 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -5,6 +5,7 @@ import ( "io" "runtime" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/backend" @@ -22,7 +23,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type roLayer struct { @@ -189,7 +189,7 @@ This is most likely caused by a bug in the build system that created the fetched Please notify the image author to correct the configuration.`, platforms.Format(p), platforms.Format(imgPlat), name, ) - logrus.WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.") + log.G(ctx).WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.") err = nil } } diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 88877b01c6..9bb16f7119 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -4,9 +4,9 @@ import ( "context" "io" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" "github.com/docker/docker/image/tarexport" - "github.com/sirupsen/logrus" ) // ExportImage exports a list of images to the given output stream. The @@ -28,7 +28,7 @@ func (i *ImageService) PerformWithBaseFS(ctx context.Context, c *container.Conta if err != nil { err2 := i.ReleaseLayer(rwlayer) if err2 != nil { - logrus.WithError(err2).WithField("container", c.ID).Warn("Failed to release layer") + log.G(ctx).WithError(err2).WithField("container", c.ID).Warn("Failed to release layer") } } }() diff --git a/daemon/images/image_prune.go b/daemon/images/image_prune.go index 174ff3f70e..f751691ad6 100644 --- a/daemon/images/image_prune.go +++ b/daemon/images/image_prune.go @@ -7,6 +7,7 @@ import ( "sync/atomic" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" @@ -17,7 +18,6 @@ import ( "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var imagesAcceptedFilters = map[string]bool{ @@ -145,7 +145,7 @@ deleteImagesLoop: } if canceled { - logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) + log.G(ctx).Debugf("ImagesPrune operation cancelled: %#v", *rep) } i.eventsService.Log("prune", events.ImageEventType, events.Actor{ Attributes: map[string]string{ @@ -162,7 +162,7 @@ func imageDeleteFailed(ref string, err error) bool { case errdefs.IsConflict(err), errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): return true default: - logrus.Warnf("failed to prune image %s: %v", ref, err) + log.G(context.TODO()).Warnf("failed to prune image %s: %v", ref, err) return true } } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index b84168544f..83d322c3fb 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -7,6 +7,7 @@ import ( "time" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/docker/distribution/reference" imagetypes "github.com/docker/docker/api/types/image" @@ -19,7 +20,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // PullImage initiates a pull operation. image is the repository name to pull, and @@ -70,7 +70,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor if errdefs.IsNotFound(err) && img != nil { po := streamformatter.NewJSONProgressOutput(outStream, false) progress.Messagef(po, "", `WARNING: %s`, err.Error()) - logrus.WithError(err).WithField("image", image).Warn("ignoring platform mismatch on single-arch image") + log.G(ctx).WithError(err).WithField("image", image).Warn("ignoring platform mismatch on single-arch image") } else if err != nil { return err } diff --git a/daemon/images/image_unix.go b/daemon/images/image_unix.go index 8796eff6ed..eb8eed3af0 100644 --- a/daemon/images/image_unix.go +++ b/daemon/images/image_unix.go @@ -5,9 +5,9 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + "github.com/containerd/containerd/log" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/sirupsen/logrus" ) // GetLayerFolders returns the layer folders from an image RootFS @@ -27,14 +27,14 @@ func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID st // container operating systems. rwlayer, err := i.layerStore.GetRWLayer(containerID) if err != nil { - logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) + log.G(ctx).Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) return sizeRw, sizeRootfs, nil } defer i.layerStore.ReleaseRWLayer(rwlayer) sizeRw, err = rwlayer.Size() if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + log.G(ctx).Errorf("Driver %s couldn't return diff size of container %s: %s", i.layerStore.DriverName(), containerID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. diff --git a/daemon/images/mount.go b/daemon/images/mount.go index 5585e12513..827d7ab028 100644 --- a/daemon/images/mount.go +++ b/daemon/images/mount.go @@ -5,9 +5,9 @@ import ( "fmt" "runtime" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // Mount sets container.BaseFS @@ -20,7 +20,7 @@ func (i *ImageService) Mount(ctx context.Context, container *container.Container if err != nil { return err } - logrus.WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir) + log.G(ctx).WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir) if container.BaseFS != "" && container.BaseFS != dir { // The mount path reported by the graph driver should always be trusted on Windows, since the @@ -42,7 +42,7 @@ func (i *ImageService) Unmount(ctx context.Context, container *container.Contain return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") } if err := container.RWLayer.Unmount(); err != nil { - logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container") + log.G(ctx).WithField("container", container.ID).WithError(err).Error("error unmounting container") return err } diff --git a/daemon/info.go b/daemon/info.go index d1eeb699a5..f236ea2122 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -1,12 +1,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" "runtime" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/cli/debug" @@ -22,7 +24,6 @@ import ( "github.com/docker/docker/registry" metrics "github.com/docker/go-metrics" "github.com/opencontainers/selinux/go-selinux" - "github.com/sirupsen/logrus" ) // SystemInfo returns information about the host server the daemon is running on. @@ -237,7 +238,7 @@ func (daemon *Daemon) fillDefaultAddressPools(v *types.Info, cfg *config.Config) func hostName() string { hostname := "" if hn, err := os.Hostname(); err != nil { - logrus.Warnf("Could not get hostname: %v", err) + log.G(context.TODO()).Warnf("Could not get hostname: %v", err) } else { hostname = hn } @@ -247,7 +248,7 @@ func hostName() string { func kernelVersion() string { var kernelVersion string if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) + log.G(context.TODO()).Warnf("Could not get kernel version: %v", err) } else { kernelVersion = kv.String() } @@ -257,7 +258,7 @@ func kernelVersion() string { func memInfo() *meminfo.Memory { memInfo, err := meminfo.Read() if err != nil { - logrus.Errorf("Could not read system memory info: %v", err) + log.G(context.TODO()).Errorf("Could not read system memory info: %v", err) memInfo = &meminfo.Memory{} } return memInfo @@ -267,12 +268,12 @@ func operatingSystem() (operatingSystem string) { defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))() if s, err := operatingsystem.GetOperatingSystem(); err != nil { - logrus.Warnf("Could not get operating system name: %v", err) + log.G(context.TODO()).Warnf("Could not get operating system name: %v", err) } else { operatingSystem = s } if inContainer, err := operatingsystem.IsContainerized(); err != nil { - logrus.Errorf("Could not determine if daemon is containerized: %v", err) + log.G(context.TODO()).Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" @@ -286,7 +287,7 @@ func osVersion() (version string) { version, err := operatingsystem.GetOperatingSystemVersion() if err != nil { - logrus.Warnf("Could not get operating system version: %v", err) + log.G(context.TODO()).Warnf("Could not get operating system version: %v", err) } return version diff --git a/daemon/info_unix.go b/daemon/info_unix.go index d2f7ec91aa..6c3a58818a 100644 --- a/daemon/info_unix.go +++ b/daemon/info_unix.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/log" v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -18,7 +19,6 @@ import ( "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" rkclient "github.com/rootless-containers/rootlesskit/pkg/api/client" - "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. @@ -57,7 +57,7 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, v.InitCommit.ID = "N/A" if _, _, commit, err := parseDefaultRuntimeVersion(&cfg.Runtimes); err != nil { - logrus.Warnf(err.Error()) + log.G(context.TODO()).Warnf(err.Error()) } else { v.RuncCommit.ID = commit } @@ -65,20 +65,20 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { - logrus.Warnf("failed to retrieve containerd version: %v", err) + log.G(context.TODO()).Warnf("failed to retrieve containerd version: %v", err) } v.InitBinary = cfg.GetInitPath() if initBinary, err := cfg.LookupInitPath(); err != nil { - logrus.Warnf("failed to find docker-init: %s", err) + log.G(context.TODO()).Warnf("failed to find docker-init: %s", err) } else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { - logrus.Warnf("failed to parse %s version: %s", initBinary, err) + log.G(context.TODO()).Warnf("failed to parse %s version: %s", initBinary, err) } else { v.InitCommit.ID = commit } } else { - logrus.Warnf("failed to retrieve %s version: %s", initBinary, err) + log.G(context.TODO()).Warnf("failed to retrieve %s version: %s", initBinary, err) } // Set expected and actual commits to the same value to prevent the client @@ -184,7 +184,7 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *configStore) { } if _, ver, commit, err := parseDefaultRuntimeVersion(&cfg.Runtimes); err != nil { - logrus.Warnf(err.Error()) + log.G(context.TODO()).Warnf(err.Error()) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: cfg.Runtimes.Default, @@ -196,10 +196,10 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *configStore) { } if initBinary, err := cfg.LookupInitPath(); err != nil { - logrus.Warnf("failed to find docker-init: %s", err) + log.G(context.TODO()).Warnf("failed to find docker-init: %s", err) } else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { - logrus.Warnf("failed to parse %s version: %s", initBinary, err) + log.G(context.TODO()).Warnf("failed to parse %s version: %s", initBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(initBinary), @@ -210,7 +210,7 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *configStore) { }) } } else { - logrus.Warnf("failed to retrieve %s version: %s", initBinary, err) + log.G(context.TODO()).Warnf("failed to retrieve %s version: %s", initBinary, err) } daemon.fillRootlessVersion(v) @@ -222,12 +222,12 @@ func (daemon *Daemon) fillRootlessVersion(v *types.Version) { } rlc, err := getRootlessKitClient() if err != nil { - logrus.Warnf("failed to create RootlessKit client: %v", err) + log.G(context.TODO()).Warnf("failed to create RootlessKit client: %v", err) return } rlInfo, err := rlc.Info(context.TODO()) if err != nil { - logrus.Warnf("failed to retrieve RootlessKit version: %v", err) + log.G(context.TODO()).Warnf("failed to retrieve RootlessKit version: %v", err) return } v.Components = append(v.Components, types.ComponentVersion{ @@ -245,7 +245,7 @@ func (daemon *Daemon) fillRootlessVersion(v *types.Version) { case "slirp4netns": if rv, err := exec.Command("slirp4netns", "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { - logrus.Warnf("failed to parse slirp4netns version: %v", err) + log.G(context.TODO()).Warnf("failed to parse slirp4netns version: %v", err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: "slirp4netns", @@ -256,7 +256,7 @@ func (daemon *Daemon) fillRootlessVersion(v *types.Version) { }) } } else { - logrus.Warnf("failed to retrieve slirp4netns version: %v", err) + log.G(context.TODO()).Warnf("failed to retrieve slirp4netns version: %v", err) } case "vpnkit": if rv, err := exec.Command("vpnkit", "--version").Output(); err == nil { @@ -265,7 +265,7 @@ func (daemon *Daemon) fillRootlessVersion(v *types.Version) { Version: strings.TrimSpace(string(rv)), }) } else { - logrus.Warnf("failed to retrieve vpnkit version: %v", err) + log.G(context.TODO()).Warnf("failed to retrieve vpnkit version: %v", err) } } } diff --git a/daemon/kill.go b/daemon/kill.go index 420ea40b8d..e91a0ce8d6 100644 --- a/daemon/kill.go +++ b/daemon/kill.go @@ -8,11 +8,11 @@ import ( "syscall" "time" + "github.com/containerd/containerd/log" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/moby/sys/signal" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type errNoSuchProcess struct { @@ -61,7 +61,7 @@ func (daemon *Daemon) ContainerKill(name, stopSignal string) error { // or not running, or if there is a problem returned from the // underlying kill command. func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSignal syscall.Signal) error { - logrus.Debugf("Sending kill signal %d to container %s", stopSignal, container.ID) + log.G(context.TODO()).Debugf("Sending kill signal %d to container %s", stopSignal, container.ID) container.Lock() defer container.Unlock() @@ -100,7 +100,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign if err := task.Kill(context.Background(), stopSignal); err != nil { if errdefs.IsNotFound(err) { unpause = false - logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'") + log.G(context.TODO()).WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'") go func() { // We need to clean up this container but it is possible there is a case where we hit here before the exit event is processed // but after it was fired off. @@ -122,7 +122,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign if unpause { // above kill signal will be sent once resume is finished if err := task.Resume(context.Background()); err != nil { - logrus.Warnf("Cannot unpause container %s: %s", container.ID, err) + log.G(context.TODO()).Warnf("Cannot unpause container %s: %s", container.ID, err) } } @@ -159,7 +159,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error { return nil } - logrus.WithError(status.Err()).WithField("container", container.ID).Errorf("Container failed to exit within %v of kill - trying direct SIGKILL", waitTimeout) + log.G(ctx).WithError(status.Err()).WithField("container", container.ID).Errorf("Container failed to exit within %v of kill - trying direct SIGKILL", waitTimeout) if err := killProcessDirectly(container); err != nil { if errors.As(err, &errNoSuchProcess{}) { @@ -183,7 +183,7 @@ func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, err := daemon.killWithSignal(container, sig) if errdefs.IsNotFound(err) { err = errNoSuchProcess{container.GetPID(), sig} - logrus.Debug(err) + log.G(context.TODO()).Debug(err) return err } return err diff --git a/daemon/list.go b/daemon/list.go index 676de6f222..6c86b72905 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" imagetypes "github.com/docker/docker/api/types/image" @@ -16,7 +17,6 @@ import ( "github.com/docker/docker/image" "github.com/docker/go-connections/nat" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var acceptedPsFilterTags = map[string]bool{ @@ -320,7 +320,7 @@ func (daemon *Daemon) foldFilter(ctx context.Context, view *container.View, conf err := psFilters.WalkValues("ancestor", func(ancestor string) error { img, err := daemon.imageService.GetImage(ctx, ancestor, imagetypes.GetImageOpts{}) if err != nil { - logrus.Warnf("Error while looking up for image %v", ancestor) + log.G(ctx).Warnf("Error while looking up for image %v", ancestor) return nil } if imagesFilter[img.ID()] { diff --git a/daemon/listeners/listeners_linux.go b/daemon/listeners/listeners_linux.go index 515fa548b9..e0fd2bfe12 100644 --- a/daemon/listeners/listeners_linux.go +++ b/daemon/listeners/listeners_linux.go @@ -1,16 +1,17 @@ package listeners // import "github.com/docker/docker/daemon/listeners" import ( + "context" "crypto/tls" "net" "os" "strconv" + "github.com/containerd/containerd/log" "github.com/coreos/go-systemd/v22/activation" "github.com/docker/docker/pkg/homedir" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // Init creates new listeners for the server. @@ -38,7 +39,7 @@ func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listene if socketGroup != defaultSocketGroup { return nil, err } - logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + log.G(context.TODO()).Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) } gid = os.Getgid() } @@ -48,7 +49,7 @@ func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listene } if _, err := homedir.StickRuntimeDirContents([]string{addr}); err != nil { // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset - logrus.WithError(err).Warnf("cannot set sticky bit on socket %s under XDG_RUNTIME_DIR", addr) + log.G(context.TODO()).WithError(err).Warnf("cannot set sticky bit on socket %s under XDG_RUNTIME_DIR", addr) } ls = append(ls, l) default: diff --git a/daemon/logger/adapter.go b/daemon/logger/adapter.go index 97d59be5e0..c76ba33681 100644 --- a/daemon/logger/adapter.go +++ b/daemon/logger/adapter.go @@ -1,16 +1,17 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( + "context" "io" "os" "path/filepath" "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/plugins/logdriver" "github.com/docker/docker/pkg/plugingetter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // pluginAdapter takes a plugin and implements the Logger interface for logger @@ -69,10 +70,10 @@ func (a *pluginAdapter) Close() error { } if err := a.stream.Close(); err != nil { - logrus.WithError(err).Error("error closing plugin fifo") + log.G(context.TODO()).WithError(err).Error("error closing plugin fifo") } if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).Error("error cleaning up plugin fifo") + log.G(context.TODO()).WithError(err).Error("error cleaning up plugin fifo") } // may be nil, especially for unit tests diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go index 0caae0a8b8..0a517f18a7 100644 --- a/daemon/logger/awslogs/cloudwatchlogs.go +++ b/daemon/logger/awslogs/cloudwatchlogs.go @@ -22,6 +22,7 @@ import ( "github.com/aws/smithy-go" smithymiddleware "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" @@ -183,7 +184,7 @@ func New(info logger.Info) (logger.Logger, error) { if backoff < maxBackoff { backoff *= 2 } - logrus. + log.G(context.TODO()). WithError(err). WithField("container-id", info.ContainerID). WithField("container-name", info.ContainerName). @@ -335,6 +336,7 @@ var newSDKEndpoint = credentialsEndpoint // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(info logger.Info, configOpts ...func(*config.LoadOptions) error) (*cloudwatchlogs.Client, error) { + ctx := context.TODO() var region, endpoint *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) @@ -346,16 +348,16 @@ func newAWSLogsClient(info logger.Info, configOpts ...func(*config.LoadOptions) endpoint = aws.String(info.Config[endpointKey]) } if region == nil || *region == "" { - logrus.Info("Trying to get region from IMDS") + log.G(ctx).Info("Trying to get region from IMDS") regFinder, err := newRegionFinder(context.TODO()) if err != nil { - logrus.WithError(err).Error("could not create regionFinder") + log.G(ctx).WithError(err).Error("could not create regionFinder") return nil, errors.Wrap(err, "could not create regionFinder") } r, err := regFinder.GetRegion(context.TODO(), &imds.GetRegionInput{}) if err != nil { - logrus.WithError(err).Error("Could not get region from IMDS, environment, or log option") + log.G(ctx).WithError(err).Error("Could not get region from IMDS, environment, or log option") return nil, errors.Wrap(err, "cannot determine region for awslogs driver") } region = &r.Region @@ -364,7 +366,7 @@ func newAWSLogsClient(info logger.Info, configOpts ...func(*config.LoadOptions) configOpts = append(configOpts, config.WithRegion(*region)) if uri, ok := info.Config[credentialsEndpointKey]; ok { - logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") + log.G(ctx).Debugf("Trying to get credentials from awslogs-credentials-endpoint") endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) configOpts = append(configOpts, config.WithCredentialsProvider(endpointcreds.New(endpoint))) @@ -372,11 +374,11 @@ func newAWSLogsClient(info logger.Info, configOpts ...func(*config.LoadOptions) cfg, err := config.LoadDefaultConfig(context.TODO(), configOpts...) if err != nil { - logrus.WithError(err).Error("Could not initialize AWS SDK config") + log.G(ctx).WithError(err).Error("Could not initialize AWS SDK config") return nil, errors.Wrap(err, "could not initialize AWS SDK config") } - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") @@ -483,10 +485,10 @@ func (l *logStream) createLogGroup() error { } if _, ok := apiErr.(*types.ResourceAlreadyExistsException); ok { // Allow creation to succeed - logrus.WithFields(fields).Info("Log group already exists") + log.G(context.TODO()).WithFields(fields).Info("Log group already exists") return nil } - logrus.WithFields(fields).Error("Failed to create log group") + log.G(context.TODO()).WithFields(fields).Error("Failed to create log group") } return err } @@ -497,7 +499,7 @@ func (l *logStream) createLogGroup() error { func (l *logStream) createLogStream() error { // Directly return if we do not want to create log stream. if !l.logCreateStream { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, "logCreateStream": l.logCreateStream, @@ -522,10 +524,10 @@ func (l *logStream) createLogStream() error { } if _, ok := apiErr.(*types.ResourceAlreadyExistsException); ok { // Allow creation to succeed - logrus.WithFields(fields).Info("Log stream already exists") + log.G(context.TODO()).WithFields(fields).Info("Log stream already exists") return nil } - logrus.WithFields(fields).Error("Failed to create log stream") + log.G(context.TODO()).WithFields(fields).Error("Failed to create log stream") } } return err @@ -694,7 +696,7 @@ func (l *logStream) publishBatch(batch *eventBatch) { if apiErr := (*types.DataAlreadyAcceptedException)(nil); errors.As(err, &apiErr) { // already submitted, just grab the correct sequence token nextSequenceToken = apiErr.ExpectedSequenceToken - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "errorCode": apiErr.ErrorCode(), "message": apiErr.ErrorMessage(), "logGroupName": l.logGroupName, @@ -706,7 +708,7 @@ func (l *logStream) publishBatch(batch *eventBatch) { } } if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) } else { l.sequenceToken = nextSequenceToken } @@ -724,7 +726,7 @@ func (l *logStream) putLogEvents(events []types.InputLogEvent, sequenceToken *st if err != nil { var apiErr smithy.APIError if errors.As(err, &apiErr) { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "errorCode": apiErr.ErrorCode(), "message": apiErr.ErrorMessage(), "logGroupName": l.logGroupName, diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go index 30c68ea364..35ed4b3715 100644 --- a/daemon/logger/copier.go +++ b/daemon/logger/copier.go @@ -2,13 +2,14 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( "bytes" + "context" "io" "sync" "time" + "github.com/containerd/containerd/log" types "github.com/docker/docker/api/types/backend" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) const ( @@ -87,7 +88,7 @@ func (c *Copier) copySrc(name string, src io.Reader) { if err != nil { if err != io.EOF { logReadsFailedCount.Inc(1) - logrus.Errorf("Error scanning log stream: %s", err) + log.G(context.TODO()).Errorf("Error scanning log stream: %s", err) return } eof = true diff --git a/daemon/logger/etwlogs/etwlogs_windows.go b/daemon/logger/etwlogs/etwlogs_windows.go index 3a089dbeb1..5e14fa78e6 100644 --- a/daemon/logger/etwlogs/etwlogs_windows.go +++ b/daemon/logger/etwlogs/etwlogs_windows.go @@ -13,14 +13,15 @@ package etwlogs // import "github.com/docker/docker/daemon/logger/etwlogs" import ( + "context" "fmt" "sync" "unsafe" "github.com/Microsoft/go-winio/pkg/etw" "github.com/Microsoft/go-winio/pkg/guid" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -61,7 +62,7 @@ func New(info logger.Info) (logger.Logger, error) { if err := registerETWProvider(); err != nil { return nil, err } - logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) + log.G(context.TODO()).Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) return &etwLogs{ containerName: info.Name(), @@ -134,7 +135,7 @@ func callEventRegister() (*etw.Provider, error) { providerID, _ := guid.FromString(providerGUID) p, err := etw.NewProviderWithOptions("", etw.WithID(providerID)) if err != nil { - logrus.WithError(err).Error("Failed to register ETW provider") + log.G(context.TODO()).WithError(err).Error("Failed to register ETW provider") return nil, fmt.Errorf("failed to register ETW provider: %v", err) } return p, nil @@ -149,7 +150,7 @@ func callEventWriteString(message string) error { ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) if ret != win32CallSuccess { - logrus.WithError(err).Error("ETWLogs provider failed to log message") + log.G(context.TODO()).WithError(err).Error("ETWLogs provider failed to log message") return fmt.Errorf("ETWLogs provider failed to log message: %v", err) } return nil diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go index 17778d8099..43ca2c054a 100644 --- a/daemon/logger/fluentd/fluentd.go +++ b/daemon/logger/fluentd/fluentd.go @@ -3,19 +3,20 @@ package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( + "context" "math" "net/url" "strconv" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/errdefs" units "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type fluentd struct { @@ -88,7 +89,7 @@ func New(info logger.Info) (logger.Logger, error) { return nil, errdefs.InvalidParameter(err) } - logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). + log.G(context.TODO()).WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) diff --git a/daemon/logger/gcplogs/gcplogging.go b/daemon/logger/gcplogs/gcplogging.go index 92178f189a..db3f0eedf4 100644 --- a/daemon/logger/gcplogs/gcplogging.go +++ b/daemon/logger/gcplogs/gcplogging.go @@ -11,7 +11,7 @@ import ( "cloud.google.com/go/compute/metadata" "cloud.google.com/go/logging" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" ) @@ -188,10 +188,10 @@ func New(info logger.Info) (logger.Logger, error) { c.OnError = func(err error) { if err == logging.ErrOverflow { if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { - logrus.Errorf("gcplogs driver has dropped %v logs", i) + log.G(context.TODO()).Errorf("gcplogs driver has dropped %v logs", i) } } else { - logrus.Error(err) + log.G(context.TODO()).Error(err) } } diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 5396345564..c5798855de 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -3,15 +3,15 @@ package journald // import "github.com/docker/docker/daemon/logger/journald" import ( + "context" "errors" "runtime" "strconv" "sync/atomic" "time" + "github.com/containerd/containerd/log" "github.com/coreos/go-systemd/v22/journal" - "github.com/sirupsen/logrus" - "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/journald/internal/sdjournal" @@ -238,7 +238,7 @@ func (r *reader) drainJournal() error { if i != 0 && i%1024 == 0 { if _, err := r.j.Process(); err != nil { // log a warning but ignore it for now - logrus.WithField("container", r.s.vars[fieldContainerIDFull]). + log.G(context.TODO()).WithField("container", r.s.vars[fieldContainerIDFull]). WithField("error", err). Warn("journald: error processing journal") } @@ -447,7 +447,7 @@ func waitUntilFlushedImpl(s *journald) error { return } } - logrus.WithField("container", s.vars[fieldContainerIDFull]). + log.G(context.TODO()).WithField("container", s.vars[fieldContainerIDFull]). Warn("journald: deadline exceeded waiting for logs to be committed to journal") }() return <-flushed diff --git a/daemon/logger/logentries/logentries.go b/daemon/logger/logentries/logentries.go index 15d8c75bc6..803ec8b856 100644 --- a/daemon/logger/logentries/logentries.go +++ b/daemon/logger/logentries/logentries.go @@ -3,13 +3,14 @@ package logentries // import "github.com/docker/docker/daemon/logger/logentries" import ( + "context" "fmt" "strconv" "github.com/bsphere/le_go" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type logentries struct { @@ -40,7 +41,7 @@ func init() { // the context. The supported context configuration variable is // logentries-token. func New(info logger.Info) (logger.Logger, error) { - logrus.WithField("container", info.ContainerID). + log.G(context.TODO()).WithField("container", info.ContainerID). WithField("token", info.Config[token]). WithField("line-only", info.Config[lineonly]). Debug("logging driver logentries configured") diff --git a/daemon/logger/logger_error.go b/daemon/logger/logger_error.go index 70f4311979..1ebdec6c2a 100644 --- a/daemon/logger/logger_error.go +++ b/daemon/logger/logger_error.go @@ -1,7 +1,9 @@ package logger import ( - "github.com/sirupsen/logrus" + "context" + + "github.com/containerd/containerd/log" "golang.org/x/time/rate" ) @@ -16,7 +18,7 @@ var logErrorLimiter = rate.NewLimiter(333, 333) func logDriverError(loggerName, msgLine string, logErr error) { logWritesFailedCount.Inc(1) if logErrorLimiter.Allow() { - logrus.WithError(logErr). + log.G(context.TODO()).WithError(logErr). WithField("driver", loggerName). WithField("message", msgLine). Errorf("Error writing log message") diff --git a/daemon/logger/loggerutils/cache/local_cache.go b/daemon/logger/loggerutils/cache/local_cache.go index c5e8fc2cac..39218234af 100644 --- a/daemon/logger/loggerutils/cache/local_cache.go +++ b/daemon/logger/loggerutils/cache/local_cache.go @@ -1,14 +1,15 @@ package cache // import "github.com/docker/docker/daemon/logger/loggerutils/cache" import ( + "context" "strconv" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/container" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/local" units "github.com/docker/go-units" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -91,7 +92,7 @@ func (l *loggerWithCache) ReadLogs(config logger.ReadConfig) *logger.LogWatcher func (l *loggerWithCache) Close() error { err := l.l.Close() if err := l.cache.Close(); err != nil { - logrus.WithError(err).Warn("error while shutting cache logger") + log.G(context.TODO()).WithError(err).Warn("error while shutting cache logger") } return err } diff --git a/daemon/logger/loggerutils/follow.go b/daemon/logger/loggerutils/follow.go index 483e032d2c..c9a58dff7c 100644 --- a/daemon/logger/loggerutils/follow.go +++ b/daemon/logger/loggerutils/follow.go @@ -1,10 +1,12 @@ package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" import ( + "context" "fmt" "io" "os" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -22,7 +24,7 @@ type follow struct { // Do follows the log file as it is written, starting from f at read. func (fl *follow) Do(f *os.File, read logPos) { - fl.log = logrus.WithFields(logrus.Fields{ + fl.log = log.G(context.TODO()).WithFields(logrus.Fields{ "module": "logger", "file": f.Name(), }) diff --git a/daemon/logger/loggerutils/logfile.go b/daemon/logger/loggerutils/logfile.go index b37e93f875..6cf358684c 100644 --- a/daemon/logger/loggerutils/logfile.go +++ b/daemon/logger/loggerutils/logfile.go @@ -13,10 +13,10 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/pools" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // rotateFileMetadata is a metadata of the gzip header of the compressed log file @@ -219,7 +219,7 @@ func (w *LogFile) rotate() (retErr error) { defer w.fsopMu.Unlock() if err := rotate(fname, w.maxFiles, w.compress); err != nil { - logrus.WithError(err).Warn("Error rotating log file, log data may have been lost") + log.G(context.TODO()).WithError(err).Warn("Error rotating log file, log data may have been lost") } else { // We may have readers working their way through the // current log file so we can't truncate it. We need to @@ -228,11 +228,11 @@ func (w *LogFile) rotate() (retErr error) { // current file out of the way. if w.maxFiles < 2 { if err := unlink(fname); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error unlinking current log file") + log.G(context.TODO()).WithError(err).Error("Error unlinking current log file") } } else { if err := os.Rename(fname, fname+".1"); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error renaming current log file") + log.G(context.TODO()).WithError(err).Error("Error renaming current log file") } } } @@ -262,7 +262,7 @@ func (w *LogFile) rotate() (retErr error) { // point during the compression process will a reader fail to // open a complete copy of the file. if err := compressFile(fname+".1", ts); err != nil { - logrus.WithError(err).Error("Error compressing log file after rotation") + log.G(context.TODO()).WithError(err).Error("Error compressing log file after rotation") } }() @@ -289,7 +289,7 @@ func rotate(name string, maxFiles int, compress bool) error { toPath := name + "." + strconv.Itoa(i) + extension fromPath := name + "." + strconv.Itoa(i-1) + extension err := os.Rename(fromPath, toPath) - logrus.WithError(err).WithField("source", fromPath).WithField("target", toPath).Trace("Rotating log file") + log.G(context.TODO()).WithError(err).WithField("source", fromPath).WithField("target", toPath).Trace("Rotating log file") if err != nil && !errors.Is(err, fs.ErrNotExist) { return err } @@ -302,7 +302,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { file, err := open(fileName) if err != nil { if errors.Is(err, fs.ErrNotExist) { - logrus.WithField("file", fileName).WithError(err).Debug("Could not open log file to compress") + log.G(context.TODO()).WithField("file", fileName).WithError(err).Debug("Could not open log file to compress") return nil } return errors.Wrap(err, "failed to open log file") @@ -325,7 +325,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { outFile.Close() if retErr != nil { if err := unlink(fileName + ".gz"); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error cleaning up after failed log compression") + log.G(context.TODO()).WithError(err).Error("Error cleaning up after failed log compression") } } }() @@ -339,7 +339,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { compressWriter.Header.Extra, err = json.Marshal(&extra) if err != nil { // Here log the error only and don't return since this is just an optimization. - logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) + log.G(context.TODO()).Warningf("Failed to marshal gzip header as JSON: %v", err) } _, err = pools.Copy(compressWriter, file) diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go index d194334dad..007af09efd 100644 --- a/daemon/logger/splunk/splunk.go +++ b/daemon/logger/splunk/splunk.go @@ -19,11 +19,11 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/pools" "github.com/google/uuid" - "github.com/sirupsen/logrus" ) const ( @@ -446,7 +446,7 @@ func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) } if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil { - logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs") + log.G(ctx).WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs") if messagesLen-i >= l.bufferMaximum || lastChance { // If this is last chance - print them all to the daemon log if lastChance { @@ -456,9 +456,9 @@ func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) // we could not send and return buffer minus one batch size for j := i; j < upperBound; j++ { if jsonEvent, err := json.Marshal(messages[j]); err != nil { - logrus.Error(err) + log.G(ctx).Error(err) } else { - logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + log.G(ctx).Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) } } return messages[upperBound:messagesLen] @@ -651,7 +651,7 @@ func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time. } parsedValue, err := time.ParseDuration(valueStr) if err != nil { - logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) return defaultValue } return parsedValue @@ -664,7 +664,7 @@ func getAdvancedOptionInt(envName string, defaultValue int) int { } parsedValue, err := strconv.ParseInt(valueStr, 10, 32) if err != nil { - logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) return defaultValue } return int(parsedValue) diff --git a/daemon/logs.go b/daemon/logs.go index 3e73177d5d..8ceefb9422 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -5,6 +5,7 @@ import ( "strconv" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" @@ -25,7 +26,7 @@ import ( // if it returns nil, the config channel will be active and return log // messages until it runs out or the context is canceled. func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (messages <-chan *backend.LogMessage, isTTY bool, retErr error) { - lg := logrus.WithFields(logrus.Fields{ + lg := log.G(ctx).WithFields(logrus.Fields{ "module": "daemon", "method": "(*Daemon).ContainerLogs", "container": containerName, @@ -55,7 +56,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c defer func() { if retErr != nil { if err = cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) + log.G(ctx).Errorf("Error closing logger: %v", err) } } }() @@ -108,7 +109,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c if cLogCreated { defer func() { if err = cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) + log.G(ctx).Errorf("Error closing logger: %v", err) } }() } diff --git a/daemon/metrics.go b/daemon/metrics.go index 24c17f2fae..afbdbb06fe 100644 --- a/daemon/metrics.go +++ b/daemon/metrics.go @@ -1,15 +1,16 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" metrics "github.com/docker/go-metrics" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" ) const metricsPluginType = "MetricsCollector" @@ -121,11 +122,11 @@ func (daemon *Daemon) cleanupMetricsPlugins() { adapter, err := makePluginAdapter(p) if err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating metrics plugin adapter") + log.G(context.TODO()).WithError(err).WithField("plugin", p.Name()).Error("Error creating metrics plugin adapter") return } if err := adapter.StopMetrics(); err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error stopping plugin metrics collection") + log.G(context.TODO()).WithError(err).WithField("plugin", p.Name()).Error("Error stopping plugin metrics collection") } }() } diff --git a/daemon/metrics_unix.go b/daemon/metrics_unix.go index df49465dc3..2514149857 100644 --- a/daemon/metrics_unix.go +++ b/daemon/metrics_unix.go @@ -3,12 +3,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "net" "net/http" "path/filepath" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/config" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" @@ -16,7 +18,6 @@ import ( metrics "github.com/docker/go-metrics" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -31,13 +32,13 @@ func (daemon *Daemon) listenMetricsSock(cfg *config.Config) (string, error) { mux := http.NewServeMux() mux.Handle("/metrics", metrics.Handler()) go func() { - logrus.Debugf("metrics API listening on %s", l.Addr()) + log.G(context.TODO()).Debugf("metrics API listening on %s", l.Addr()) srv := &http.Server{ Handler: mux, ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout. } if err := srv.Serve(l); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Error("error serving metrics API") + log.G(context.TODO()).WithError(err).Error("error serving metrics API") } }() daemon.metricsPluginListener = l @@ -61,10 +62,10 @@ func registerMetricsPluginCallback(store *plugin.Store, sockPath string) { adapter, err := makePluginAdapter(p) if err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating plugin adapter") + log.G(context.TODO()).WithError(err).WithField("plugin", p.Name()).Error("Error creating plugin adapter") } if err := adapter.StartMetrics(); err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error starting metrics collector plugin") + log.G(context.TODO()).WithError(err).WithField("plugin", p.Name()).Error("Error starting metrics collector plugin") } }) } diff --git a/daemon/monitor.go b/daemon/monitor.go index 1897c533e3..ff48e55b5b 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -5,6 +5,7 @@ import ( "strconv" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" @@ -42,7 +43,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine es, err := tsk.Delete(ctx) cancel() if err != nil { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": c.ID, }).Warn("failed to delete container from containerd") @@ -72,7 +73,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine execDuration := time.Since(c.StartedAt) restart, wait, err := c.RestartManager().ShouldRestart(uint32(exitStatus.ExitCode), daemonShutdown || c.HasBeenManuallyStopped, execDuration) if err != nil { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": c.ID, "restartCount": c.RestartCount, @@ -92,7 +93,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine if restart { c.RestartCount++ - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ "container": c.ID, "restartCount": c.RestartCount, "exitStatus": exitStatus, @@ -122,7 +123,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine daemon.waitForStartupDone() cfg := daemon.config() // Apply the most up-to-date daemon config to the restarted container. if err = daemon.containerStart(context.Background(), cfg, c, "", "", false); err != nil { - logrus.Debugf("failed to restart container: %+v", err) + log.G(ctx).Debugf("failed to restart container: %+v", err) } } if err != nil { @@ -133,7 +134,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine c.Unlock() defer daemon.autoRemove(&cfg.Config, c) if err != restartmanager.ErrRestartCanceled { - logrus.Errorf("restartmanger wait error: %+v", err) + log.G(ctx).Errorf("restartmanger wait error: %+v", err) } } }() @@ -190,7 +191,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei cancel() if err := execConfig.CloseStreams(); err != nil { - logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + log.G(ctx).Errorf("failed to cleanup exec %s streams: %s", c.ID, err) } exitCode = ec @@ -206,7 +207,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei if execConfig.Process != nil { go func() { if _, err := execConfig.Process.Delete(context.Background()); err != nil { - logrus.WithFields(logrus.Fields{ + log.G(ctx).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": ei.ContainerID, "process": ei.ProcessID, @@ -231,7 +232,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei if errdefs.IsNotFound(err) { // The container was started by not-docker and so could have been deleted by // not-docker before we got around to loading it from containerd. - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": c.ID, }).Debug("could not load containerd container for start event") @@ -242,7 +243,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei tsk, err := ctr.Task(context.Background()) if err != nil { if errdefs.IsNotFound(err) { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ logrus.ErrorKey: err, "container": c.ID, }).Debug("failed to load task for externally-started container") @@ -310,5 +311,5 @@ func (daemon *Daemon) autoRemove(cfg *config.Config, c *container.Container) { return } - logrus.WithFields(logrus.Fields{logrus.ErrorKey: err, "container": c.ID}).Error("error removing container") + log.G(context.TODO()).WithFields(logrus.Fields{logrus.ErrorKey: err, "container": c.ID}).Error("error removing container") } diff --git a/daemon/names.go b/daemon/names.go index ccb6467ba3..86bf9c51f2 100644 --- a/daemon/names.go +++ b/daemon/names.go @@ -1,16 +1,17 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" "github.com/docker/docker/daemon/names" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/stringid" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( @@ -68,7 +69,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) { if errors.Is(err, container.ErrNameReserved) { id, err := daemon.containersReplica.Snapshot().GetID(name) if err != nil { - logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + log.G(context.TODO()).Errorf("got unexpected error while looking up reserved name: %v", err) return "", err } return "", nameConflictError{id: id, name: name} diff --git a/daemon/network.go b/daemon/network.go index 5a52fb4616..f808727644 100644 --- a/daemon/network.go +++ b/daemon/network.go @@ -9,6 +9,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" @@ -212,7 +213,7 @@ func (daemon *Daemon) setupIngress(cfg *config.Config, create *clustertypes.Netw // If it is any other error other than already // exists error log error and return. if _, ok := err.(libnetwork.NetworkNameError); !ok { - logrus.Errorf("Failed creating ingress network: %v", err) + log.G(context.TODO()).Errorf("Failed creating ingress network: %v", err) return } // Otherwise continue down the call to create or recreate sandbox. @@ -220,7 +221,7 @@ func (daemon *Daemon) setupIngress(cfg *config.Config, create *clustertypes.Netw _, err := daemon.GetNetworkByID(create.ID) if err != nil { - logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + log.G(context.TODO()).Errorf("Failed getting ingress network by id after creating: %v", err) } } @@ -233,12 +234,12 @@ func (daemon *Daemon) releaseIngress(id string) { n, err := controller.NetworkByID(id) if err != nil { - logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) + log.G(context.TODO()).Errorf("failed to retrieve ingress network %s: %v", id, err) return } if err := n.Delete(libnetwork.NetworkDeleteOptionRemoveLB); err != nil { - logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) + log.G(context.TODO()).Errorf("Failed to delete ingress network %s: %v", n.ID(), err) return } } @@ -323,7 +324,7 @@ func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCrea if defaultOpts, ok := cfg.DefaultNetworkOpts[driver]; create.ConfigFrom == nil && ok { for k, v := range defaultOpts { if _, ok := networkOptions[k]; !ok { - logrus.WithFields(logrus.Fields{"driver": driver, "network": id, k: v}).Debug("Applying network default option") + log.G(context.TODO()).WithFields(logrus.Fields{"driver": driver, "network": id, k: v}).Debug("Applying network default option") networkOptions[k] = v } } @@ -411,7 +412,7 @@ func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { if daemon.PluginStore != nil { _, err := daemon.PluginStore.Get(driver, capability, mode) if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + log.G(context.TODO()).WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") } } } @@ -785,12 +786,12 @@ func (daemon *Daemon) clearAttachableNetworks() { } containerID := sb.ContainerID() if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { - logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + log.G(context.TODO()).Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", containerID, n.Name(), err) } } if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { - logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + log.G(context.TODO()).Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) } } } diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 3ccf7cd79d..17dc0a87c6 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -11,6 +11,7 @@ import ( cdcgroups "github.com/containerd/cgroups/v3" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/log" coci "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/apparmor" "github.com/containerd/containerd/pkg/userns" @@ -30,7 +31,6 @@ import ( "github.com/opencontainers/runc/libcontainer/user" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -651,7 +651,7 @@ func withMounts(daemon *Daemon, daemonCfg *configStore, c *container.Container) return err } fallback = true - logrus.WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root") + log.G(ctx).WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root") } if !fallback { rootpg := mountPropagationMap[s.Linux.RootfsPropagation] @@ -855,7 +855,7 @@ func withCgroups(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Contain if useSystemd { cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID - logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + log.G(ctx).Debugf("createSpec: cgroupsPath: %s", cgroupsPath) } else { cgroupsPath = filepath.Join(parent, c.ID) } @@ -925,11 +925,11 @@ func WithDevices(daemon *Daemon, c *container.Container) coci.SpecOpts { for _, deviceMapping := range c.HostConfig.Devices { // issue a warning that custom cgroup permissions are ignored in privileged mode if deviceMapping.CgroupPermissions != "rwm" { - logrus.WithField("container", c.ID).Warnf("custom %s permissions for device %s are ignored in privileged mode", deviceMapping.CgroupPermissions, deviceMapping.PathOnHost) + log.G(ctx).WithField("container", c.ID).Warnf("custom %s permissions for device %s are ignored in privileged mode", deviceMapping.CgroupPermissions, deviceMapping.PathOnHost) } // issue a warning that the device path already exists via /dev mounting in privileged mode if deviceMapping.PathOnHost == deviceMapping.PathInContainer { - logrus.WithField("container", c.ID).Warnf("path in container %s already exists in privileged mode", deviceMapping.PathInContainer) + log.G(ctx).WithField("container", c.ID).Warnf("path in container %s already exists in privileged mode", deviceMapping.PathInContainer) continue } d, _, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, "rwm") diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go index 7809a02c53..920686b569 100644 --- a/daemon/oci_windows.go +++ b/daemon/oci_windows.go @@ -8,6 +8,9 @@ import ( "path/filepath" "strings" + "github.com/sirupsen/logrus" + + "github.com/containerd/containerd/log" coci "github.com/containerd/containerd/oci" containertypes "github.com/docker/docker/api/types/container" imagetypes "github.com/docker/docker/api/types/image" @@ -19,7 +22,6 @@ import ( "github.com/docker/docker/pkg/system" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows/registry" ) @@ -208,7 +210,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c if logrus.IsLevelEnabled(logrus.DebugLevel) { if b, err := json.Marshal(&s); err == nil { - logrus.Debugf("Generated spec: %s", string(b)) + log.G(ctx).Debugf("Generated spec: %s", string(b)) } } diff --git a/daemon/pause.go b/daemon/pause.go index 976531e527..c4204afdac 100644 --- a/daemon/pause.go +++ b/daemon/pause.go @@ -4,8 +4,8 @@ import ( "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" - "github.com/sirupsen/logrus" ) // ContainerPause pauses a container @@ -49,7 +49,7 @@ func (daemon *Daemon) containerPause(container *container.Container) error { daemon.LogContainerEvent(container, "pause") if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).Warn("could not save container to disk") + log.G(context.TODO()).WithError(err).Warn("could not save container to disk") } return nil diff --git a/daemon/prune.go b/daemon/prune.go index d2b265a678..c194df0916 100644 --- a/daemon/prune.go +++ b/daemon/prune.go @@ -7,6 +7,7 @@ import ( "sync/atomic" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" @@ -15,7 +16,6 @@ import ( "github.com/docker/docker/libnetwork" "github.com/docker/docker/runconfig" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( @@ -61,7 +61,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters. for _, c := range allContainers { select { case <-ctx.Done(): - logrus.Debugf("ContainersPrune operation cancelled: %#v", *rep) + log.G(ctx).Debugf("ContainersPrune operation cancelled: %#v", *rep) return rep, nil default: } @@ -80,7 +80,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters. // TODO: sets RmLink to true? err = daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{}) if err != nil { - logrus.Warnf("failed to prune container %s: %v", c.ID, err) + log.G(ctx).Warnf("failed to prune container %s: %v", c.ID, err) continue } if cSize > 0 { @@ -126,7 +126,7 @@ func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filte return false } if err := daemon.DeleteNetwork(nw.ID()); err != nil { - logrus.Warnf("could not remove local network %s: %v", nwName, err) + log.G(ctx).Warnf("could not remove local network %s: %v", nwName, err) return false } rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) @@ -176,7 +176,7 @@ func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters fil // we can safely ignore the "network .. is in use" error match := networkIsInUse.FindStringSubmatch(err.Error()) if len(match) != 2 || match[1] != nw.ID { - logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + log.G(ctx).Warnf("could not remove cluster network %s: %v", nw.Name, err) } continue } @@ -213,7 +213,7 @@ func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Ar select { case <-ctx.Done(): - logrus.Debugf("NetworksPrune operation cancelled: %#v", *rep) + log.G(ctx).Debugf("NetworksPrune operation cancelled: %#v", *rep) return rep, nil default: } diff --git a/daemon/reload.go b/daemon/reload.go index 682c71786c..ff1f076670 100644 --- a/daemon/reload.go +++ b/daemon/reload.go @@ -1,10 +1,12 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "encoding/json" "fmt" "strconv" + "github.com/containerd/containerd/log" "github.com/hashicorp/go-multierror" "github.com/mitchellh/copystructure" "github.com/sirupsen/logrus" @@ -124,7 +126,7 @@ func (daemon *Daemon) Reload(conf *config.Config) error { NoProxy: config.MaskCredentials(newCfg.NoProxy), }, }) - logrus.Infof("Reloaded configuration: %s", jsonString) + log.G(context.TODO()).Infof("Reloaded configuration: %s", jsonString) daemon.configStore.Store(newCfg) daemon.LogDaemonEventWithAttributes("reload", attributes) return txn.Commit() @@ -179,8 +181,8 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, new // prepare reload event attributes with updatable configurations attributes["max-concurrent-downloads"] = strconv.Itoa(newCfg.MaxConcurrentDownloads) attributes["max-concurrent-uploads"] = strconv.Itoa(newCfg.MaxConcurrentUploads) - logrus.Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"]) - logrus.Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"]) + log.G(context.TODO()).Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"]) + log.G(context.TODO()).Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"]) return nil } @@ -195,7 +197,7 @@ func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg *configSt // prepare reload event attributes with updatable configurations attributes["max-download-attempts"] = strconv.Itoa(newCfg.MaxDownloadAttempts) - logrus.Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"]) + log.G(context.TODO()).Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"]) return nil } @@ -205,7 +207,7 @@ func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg *configStore, // update corresponding configuration if conf.IsValueSet("shutdown-timeout") { newCfg.ShutdownTimeout = conf.ShutdownTimeout - logrus.Debugf("Reset Shutdown Timeout: %d", newCfg.ShutdownTimeout) + log.G(context.TODO()).Debugf("Reset Shutdown Timeout: %d", newCfg.ShutdownTimeout) } // prepare reload event attributes with updatable configurations @@ -278,7 +280,7 @@ func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg *config return nil } // Enable the network diagnostic if the flag is set with a valid port within the range - logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") + log.G(context.TODO()).WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) return nil }) diff --git a/daemon/rename.go b/daemon/rename.go index 4ecfa62257..52c41d158a 100644 --- a/daemon/rename.go +++ b/daemon/rename.go @@ -1,13 +1,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "strings" + "github.com/containerd/containerd/log" dockercontainer "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/libnetwork" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ContainerRename changes the name of a container, using the oldName @@ -101,7 +102,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { container.Name = oldName container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint if e := container.CheckpointTo(daemon.containersReplica); e != nil { - logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + log.G(context.TODO()).Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) } } }() diff --git a/daemon/runtime_unix.go b/daemon/runtime_unix.go index 4d4be1ca91..ca521debb9 100644 --- a/daemon/runtime_unix.go +++ b/daemon/runtime_unix.go @@ -4,6 +4,7 @@ package daemon import ( "bytes" + "context" "crypto/sha256" "encoding/base32" "encoding/json" @@ -14,6 +15,7 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/plugin" v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/containerd/runtime/v2/shim" @@ -24,7 +26,6 @@ import ( "github.com/docker/docker/pkg/system" "github.com/opencontainers/runtime-spec/specs-go/features" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -70,11 +71,11 @@ func defaultV2ShimConfig(conf *config.Config, runtimePath string) *shimConfig { featuresCmd := exec.Command(runtimePath, "features") featuresCmd.Stderr = &featuresStderr if featuresB, err := featuresCmd.Output(); err != nil { - logrus.WithError(err).Warnf("Failed to run %v: %q", featuresCmd.Args, featuresStderr.String()) + log.G(context.TODO()).WithError(err).Warnf("Failed to run %v: %q", featuresCmd.Args, featuresStderr.String()) } else { var features features.Features if jsonErr := json.Unmarshal(featuresB, &features); jsonErr != nil { - logrus.WithError(err).Warnf("Failed to unmarshal the output of %v as a JSON", featuresCmd.Args) + log.G(context.TODO()).WithError(err).Warnf("Failed to unmarshal the output of %v as a JSON", featuresCmd.Args) } else { shim.Features = &features } diff --git a/daemon/seccomp_linux.go b/daemon/seccomp_linux.go index 2e3c37818e..4d1ff76865 100644 --- a/daemon/seccomp_linux.go +++ b/daemon/seccomp_linux.go @@ -5,12 +5,12 @@ import ( "fmt" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/log" coci "github.com/containerd/containerd/oci" "github.com/docker/docker/container" dconfig "github.com/docker/docker/daemon/config" "github.com/docker/docker/profiles/seccomp" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" ) const supportsSeccomp = true @@ -28,7 +28,7 @@ func WithSeccomp(daemon *Daemon, c *container.Container) coci.SpecOpts { if c.SeccompProfile != "" && c.SeccompProfile != dconfig.SeccompProfileDefault { return fmt.Errorf("seccomp is not enabled in your kernel, cannot run a custom seccomp profile") } - logrus.Warn("seccomp is not enabled in your kernel, running container without default profile") + log.G(ctx).Warn("seccomp is not enabled in your kernel, running container without default profile") c.SeccompProfile = dconfig.SeccompProfileUnconfined return nil } diff --git a/daemon/secrets.go b/daemon/secrets.go index 6d368a9fd7..c81355495c 100644 --- a/daemon/secrets.go +++ b/daemon/secrets.go @@ -1,14 +1,16 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + + "github.com/containerd/containerd/log" swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/sirupsen/logrus" ) // SetContainerSecretReferences sets the container secret references needed func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { if !secretsSupported() && len(refs) > 0 { - logrus.Warn("secrets are not supported on this platform") + log.G(context.TODO()).Warn("secrets are not supported on this platform") return nil } diff --git a/daemon/snapshotter/mount.go b/daemon/snapshotter/mount.go index f156bb2ab2..9c716d9b98 100644 --- a/daemon/snapshotter/mount.go +++ b/daemon/snapshotter/mount.go @@ -1,14 +1,15 @@ package snapshotter import ( + "context" "os" "path/filepath" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/moby/locker" - "github.com/sirupsen/logrus" ) const mountsDir = "rootfs" @@ -82,10 +83,10 @@ func (m *refCountMounter) Mount(mounts []mount.Mount, containerID string) (targe if retErr != nil { if c := m.rc.Decrement(target); c <= 0 { if mntErr := unmount(target); mntErr != nil { - logrus.Errorf("error unmounting %s: %v", target, mntErr) + log.G(context.TODO()).Errorf("error unmounting %s: %v", target, mntErr) } if rmErr := os.Remove(target); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.Debugf("Failed to remove %s: %v: %v", target, rmErr, err) + log.G(context.TODO()).Debugf("Failed to remove %s: %v: %v", target, rmErr, err) } } } @@ -108,11 +109,11 @@ func (m *refCountMounter) Unmount(target string) error { defer m.locker.Unlock(target) if err := unmount(target); err != nil { - logrus.Debugf("Failed to unmount %s: %v", target, err) + log.G(context.TODO()).Debugf("Failed to unmount %s: %v", target, err) } if err := os.Remove(target); err != nil { - logrus.WithError(err).WithField("dir", target).Error("failed to remove mount temp dir") + log.G(context.TODO()).WithError(err).WithField("dir", target).Error("failed to remove mount temp dir") } return nil diff --git a/daemon/start.go b/daemon/start.go index df42a2ad4b..6d6d2fc969 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -5,13 +5,13 @@ import ( "runtime" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ContainerStart starts a container. @@ -53,7 +53,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if hostConfig != nil { - logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + log.G(ctx).Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") oldNetworkMode := ctr.HostConfig.NetworkMode if err := daemon.setSecurityOptions(&daemonCfg.Config, ctr, hostConfig); err != nil { return errdefs.InvalidParameter(err) @@ -128,7 +128,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore container.SetExitCode(exitUnknown) } if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) + log.G(ctx).Errorf("%s: failed saving state on start failure: %v", container.ID, err) } container.Reset(false) @@ -137,7 +137,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore if container.HostConfig.AutoRemove { container.Unlock() if err := daemon.containerRm(&daemonCfg.Config, container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.Errorf("can't remove container %s: %v", container.ID, err) + log.G(ctx).Errorf("can't remove container %s: %v", container.ID, err) } container.Lock() } @@ -189,7 +189,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore container.InitializeStdio) if err != nil { if err := ctr.Delete(context.Background()); err != nil { - logrus.WithError(err).WithField("container", container.ID). + log.G(ctx).WithError(err).WithField("container", container.ID). Error("failed to delete failed start container") } return setExitCodeFromError(container.SetExitCode, err) @@ -203,7 +203,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore daemon.initHealthMonitor(container) if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).WithField("container", container.ID). + log.G(ctx).WithError(err).WithField("container", container.ID). Errorf("failed to store container") } @@ -220,14 +220,14 @@ func (daemon *Daemon) Cleanup(container *container.Container) { // released while the container still exists. if ctr, ok := container.C8dContainer(); ok { if err := ctr.Delete(context.Background()); err != nil { - logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) + log.G(context.TODO()).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) } } daemon.releaseNetwork(container) if err := container.UnmountIpcMount(); err != nil { - logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err) + log.G(context.TODO()).Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err) } if err := daemon.conditionalUnmountOnCleanup(container); err != nil { @@ -239,11 +239,11 @@ func (daemon *Daemon) Cleanup(container *container.Container) { } if err := container.UnmountSecrets(); err != nil { - logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + log.G(context.TODO()).Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) } if err := recursiveUnmount(container.Root); err != nil { - logrus.WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.") + log.G(context.TODO()).WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.") } for _, eConfig := range container.ExecCommands.Commands() { @@ -252,7 +252,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) { if container.BaseFS != "" { if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { - logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + log.G(context.TODO()).Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) } } diff --git a/daemon/stats/collector.go b/daemon/stats/collector.go index 935bd8056e..aa033e48ed 100644 --- a/daemon/stats/collector.go +++ b/daemon/stats/collector.go @@ -2,14 +2,15 @@ package stats // import "github.com/docker/docker/daemon/stats" import ( "bufio" + "context" "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/moby/pubsub" - "github.com/sirupsen/logrus" ) // Collector manages and provides container resource stats @@ -109,7 +110,7 @@ func (s *Collector) Run() { onlineCPUs, err := s.getNumberOnlineCPUs() if err != nil { - logrus.Errorf("collecting system online cpu count: %v", err) + log.G(context.TODO()).Errorf("collecting system online cpu count: %v", err) continue } @@ -122,7 +123,7 @@ func (s *Collector) Run() { // noise in metric calculations. systemUsage, err := s.getSystemCPUUsage() if err != nil { - logrus.WithError(err).WithField("container_id", pair.container.ID).Errorf("collecting system cpu usage") + log.G(context.TODO()).WithError(err).WithField("container_id", pair.container.ID).Errorf("collecting system cpu usage") continue } @@ -140,7 +141,7 @@ func (s *Collector) Run() { }) default: - logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + log.G(context.TODO()).Errorf("collecting stats for %s: %v", pair.container.ID, err) pair.publisher.Publish(types.StatsJSON{ Name: pair.container.Name, ID: pair.container.ID, diff --git a/daemon/stop.go b/daemon/stop.go index 0d29e5c996..b5b1506d73 100644 --- a/daemon/stop.go +++ b/daemon/stop.go @@ -4,12 +4,12 @@ import ( "context" "time" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/moby/sys/signal" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ContainerStop looks for the given container and stops it. @@ -94,7 +94,7 @@ func (daemon *Daemon) containerStop(_ context.Context, ctr *container.Container, if err != nil { // the container has still not exited, and the kill function errored, so log the error here: - logrus.WithError(err).WithField("container", ctr.ID).Errorf("Error sending stop (signal %d) to container", stopSignal) + log.G(ctx).WithError(err).WithField("container", ctr.ID).Errorf("Error sending stop (signal %d) to container", stopSignal) } if stopTimeout < 0 { // if the client requested that we never kill / wait forever, but container.Wait was still @@ -102,7 +102,7 @@ func (daemon *Daemon) containerStop(_ context.Context, ctr *container.Container, return err } - logrus.WithField("container", ctr.ID).Infof("Container failed to exit within %s of signal %d - using the force", wait, stopSignal) + log.G(ctx).WithField("container", ctr.ID).Infof("Container failed to exit within %s of signal %d - using the force", wait, stopSignal) // Stop either failed or container didn't exit, so fallback to kill. if err := daemon.Kill(ctr); err != nil { @@ -111,7 +111,7 @@ func (daemon *Daemon) containerStop(_ context.Context, ctr *container.Container, defer cancel() status := <-ctr.Wait(subCtx, container.WaitConditionNotRunning) if status.Err() != nil { - logrus.WithError(err).WithField("container", ctr.ID).Errorf("error killing container: %v", status.Err()) + log.G(ctx).WithError(err).WithField("container", ctr.ID).Errorf("error killing container: %v", status.Err()) return err } // container did exit, so ignore previous errors and continue diff --git a/daemon/unpause.go b/daemon/unpause.go index eb52256771..c3568311da 100644 --- a/daemon/unpause.go +++ b/daemon/unpause.go @@ -4,8 +4,8 @@ import ( "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/container" - "github.com/sirupsen/logrus" ) // ContainerUnpause unpauses a container @@ -41,7 +41,7 @@ func (daemon *Daemon) containerUnpause(ctr *container.Container) error { daemon.LogContainerEvent(ctr, "unpause") if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).Warn("could not save container to disk") + log.G(context.TODO()).WithError(err).Warn("could not save container to disk") } return nil diff --git a/daemon/volumes.go b/daemon/volumes.go index 6e17e221c6..9d1ebee19c 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" mounttypes "github.com/docker/docker/api/types/mount" @@ -18,7 +19,6 @@ import ( "github.com/docker/docker/volume/service" volumeopts "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type mounts []container.Mount @@ -72,7 +72,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo dereferenceIfExists := func(destination string) { if v, ok := mountPoints[destination]; ok { - logrus.Debugf("Duplicate mount point '%s'", destination) + log.G(ctx).Debugf("Duplicate mount point '%s'", destination) if v.Volume != nil { daemon.volumes.Release(ctx, v.Volume.Name(), container.ID) } diff --git a/distribution/errors.go b/distribution/errors.go index d0de8d9e19..f91b3327e0 100644 --- a/distribution/errors.go +++ b/distribution/errors.go @@ -1,11 +1,13 @@ package distribution // import "github.com/docker/docker/distribution" import ( + "context" "fmt" "net/url" "strings" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" @@ -15,7 +17,6 @@ import ( "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/errdefs" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // fallbackError wraps an error that can possibly allow fallback to a different @@ -85,7 +86,7 @@ func translatePullError(err error, ref reference.Named) error { case errcode.Errors: if len(v) != 0 { for _, extra := range v[1:] { - logrus.WithError(extra).Infof("Ignoring extra error returned from registry") + log.G(context.TODO()).WithError(extra).Infof("Ignoring extra error returned from registry") } return translatePullError(v[0], ref) } diff --git a/distribution/manifest.go b/distribution/manifest.go index 621bd1cea1..6fb9dd4404 100644 --- a/distribution/manifest.go +++ b/distribution/manifest.go @@ -20,7 +20,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // labelDistributionSource describes the source blob comes from. @@ -117,7 +116,7 @@ func (m *manifestStore) getLocal(ctx context.Context, desc ocispec.Descriptor, r // If we haven't, we need to check the remote repository to see if it has the content, otherwise we can end up returning // a manifest that has never even existed in the remote before. if !hasDistributionSource(info.Labels[distKey], distRepo) { - logrus.WithField("ref", ref).Debug("found manifest but no mataching source repo is listed, checking with remote") + log.G(ctx).WithField("ref", ref).Debug("found manifest but no mataching source repo is listed, checking with remote") exists, err := m.remote.Exists(ctx, desc.Digest) if err != nil { return nil, errors.Wrap(err, "error checking if remote exists") @@ -136,7 +135,7 @@ func (m *manifestStore) getLocal(ctx context.Context, desc ocispec.Descriptor, r } info.Labels[distKey] = appendDistributionSourceLabel(info.Labels[distKey], distRepo) if _, err := m.local.Update(ctx, info, "labels."+distKey); err != nil { - logrus.WithError(err).WithField("ref", ref).Warn("Could not update content distribution source") + log.G(ctx).WithError(err).WithField("ref", ref).Warn("Could not update content distribution source") } r := io.NewSectionReader(ra, 0, ra.Size()) diff --git a/distribution/pull.go b/distribution/pull.go index 7780ea4c43..0b89821b07 100644 --- a/distribution/pull.go +++ b/distribution/pull.go @@ -4,12 +4,12 @@ import ( "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api" refstore "github.com/docker/docker/reference" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // Pull initiates a pull operation. image is the repository name to pull, and @@ -42,12 +42,12 @@ func Pull(ctx context.Context, ref reference.Named, config *ImagePullConfig, loc for _, endpoint := range endpoints { if endpoint.URL.Scheme != "https" { if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + log.G(ctx).Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) continue } } - logrus.Debugf("Trying to pull %s from %s", reference.FamiliarName(repoInfo.Name), endpoint.URL) + log.G(ctx).Debugf("Trying to pull %s from %s", reference.FamiliarName(repoInfo.Name), endpoint.URL) if err := newPuller(endpoint, repoInfo, config, local).pull(ctx, ref); err != nil { // Was this pull cancelled? If so, don't try to fall @@ -66,10 +66,10 @@ func Pull(ctx context.Context, ref reference.Named, config *ImagePullConfig, loc } if fallback { lastErr = err - logrus.Infof("Attempting next endpoint for pull after error: %v", err) + log.G(ctx).Infof("Attempting next endpoint for pull after error: %v", err) continue } - logrus.Errorf("Not continuing with pull after error: %v", err) + log.G(ctx).Errorf("Not continuing with pull after error: %v", err) return translatePullError(err, ref) } @@ -101,7 +101,7 @@ func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.D if oldTagID, err := store.Get(dgstRef); err == nil { if oldTagID != id { // Updating digests not supported by reference store - logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + log.G(context.TODO()).Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) } return nil } else if err != refstore.ErrDoesNotExist { diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go index 06e10a9b8d..82d686db2c 100644 --- a/distribution/pull_v2.go +++ b/distribution/pull_v2.go @@ -79,7 +79,7 @@ func (p *puller) pull(ctx context.Context, ref reference.Named) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { - logrus.Warnf("Error getting v2 registry: %v", err) + log.G(ctx).Warnf("Error getting v2 registry: %v", err) return err } @@ -180,7 +180,7 @@ func (ld *layerDescriptor) DiffID() (layer.DiffID, error) { } func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - logrus.Debugf("pulling blob %q", ld.digest) + log.G(ctx).Debugf("pulling blob %q", ld.digest) var ( err error @@ -195,19 +195,19 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress } else { offset, err = ld.tmpFile.Seek(0, io.SeekEnd) if err != nil { - logrus.Debugf("error seeking to end of download file: %v", err) + log.G(ctx).Debugf("error seeking to end of download file: %v", err) offset = 0 ld.tmpFile.Close() if err := os.Remove(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + log.G(ctx).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } ld.tmpFile, err = createDownloadFile() if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } } else if offset != 0 { - logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + log.G(ctx).Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) } } @@ -215,7 +215,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress layerDownload, err := ld.open(ctx) if err != nil { - logrus.Errorf("Error initiating layer download: %v", err) + log.G(ctx).Errorf("Error initiating layer download: %v", err) return nil, 0, retryOnError(err) } @@ -236,7 +236,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress size = 0 } else { if size != 0 && offset > size { - logrus.Debug("Partial download is larger than full blob. Starting over") + log.G(ctx).Debug("Partial download is larger than full blob. Starting over") offset = 0 if err := ld.truncateDownloadFile(); err != nil { return nil, 0, xfer.DoNotRetry{Err: err} @@ -274,7 +274,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress if !ld.verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) - logrus.Error(err) + log.G(ctx).Error(err) // Allow a retry if this digest verification error happened // after a resumed download. @@ -290,13 +290,13 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress progress.Update(progressOutput, ld.ID(), "Download complete") - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + log.G(ctx).Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, io.SeekStart) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name()) } ld.tmpFile = nil ld.verifier = nil @@ -311,7 +311,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress tmpFile.Close() err := os.RemoveAll(tmpFile.Name()) if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return err }), size, nil @@ -321,7 +321,7 @@ func (ld *layerDescriptor) Close() { if ld.tmpFile != nil { ld.tmpFile.Close() if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + log.G(context.TODO()).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) } } } @@ -331,12 +331,12 @@ func (ld *layerDescriptor) truncateDownloadFile() error { ld.verifier = nil if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil { - logrus.Errorf("error seeking to beginning of download file: %v", err) + log.G(context.TODO()).Errorf("error seeking to beginning of download file: %v", err) return err } if err := ld.tmpFile.Truncate(0); err != nil { - logrus.Errorf("error truncating download file: %v", err) + log.G(context.TODO()).Errorf("error truncating download file: %v", err) return err } @@ -375,7 +375,7 @@ func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *oci return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } - ctx = log.WithLogger(ctx, logrus.WithFields( + ctx = log.WithLogger(ctx, log.G(ctx).WithFields( logrus.Fields{ "digest": dgst, "remote": ref, @@ -390,7 +390,7 @@ func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *oci manifest, err := p.manifestStore.Get(ctx, desc, ref) if err != nil { if isTagged && isNotFound(errors.Cause(err)) { - logrus.WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") + log.G(ctx).WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag") msg := `%s Failed to pull manifest by the resolved digest. This registry does not appear to conform to the distribution registry specification; falling back to @@ -428,7 +428,7 @@ func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *oci } } - logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + log.G(ctx).Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( @@ -442,7 +442,7 @@ func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *oci // TODO: condition to be removed if reference.Domain(ref) == "docker.io" { msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) - logrus.Warn(msg) + log.G(ctx).Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } @@ -847,7 +847,7 @@ func (p *puller) pullManifestList(ctx context.Context, ref reference.Named, mfst if pp != nil { platform = *pp } - logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s match", ref, len(mfstList.Manifests), platforms.Format(platform)) + log.G(ctx).Debugf("%s resolved to a manifestList object with %d entries; looking for a %s match", ref, len(mfstList.Manifests), platforms.Format(platform)) manifestMatches := filterManifests(mfstList.Manifests, platform) @@ -874,7 +874,7 @@ func (p *puller) pullManifestList(ctx context.Context, ref reference.Named, mfst switch v := manifest.(type) { case *schema1.SignedManifest: msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) - logrus.Warn(msg) + log.G(ctx).Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) platform := toOCIPlatform(match.Platform) @@ -934,7 +934,7 @@ func (p *puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (con } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) - logrus.Error(err) + log.G(ctx).Error(err) return nil, err } @@ -967,7 +967,7 @@ func retry(ctx context.Context, maxAttempts int, sleep time.Duration, f func(ctx timer.Stop() return ctx.Err() case <-timer.C: - logrus.WithError(err).WithField("attempts", attempt+1).Debug("retrying after error") + log.G(ctx).WithError(err).WithField("attempts", attempt+1).Debug("retrying after error") sleep *= 2 } } @@ -991,7 +991,7 @@ func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (dig } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) - logrus.Error(err) + log.G(context.TODO()).Error(err) return "", err } return digested.Digest(), nil @@ -1011,7 +1011,7 @@ func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) - logrus.Error(err) + log.G(context.TODO()).Error(err) return nil, err } } diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go index b71719244f..824b84885e 100644 --- a/distribution/pull_v2_unix.go +++ b/distribution/pull_v2_unix.go @@ -6,11 +6,11 @@ import ( "context" "sort" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) func (ld *layerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { @@ -27,7 +27,7 @@ func filterManifests(manifests []manifestlist.ManifestDescriptor, p ocispec.Plat if descP == nil || m.Match(*descP) { matches = append(matches, desc) if descP != nil { - logrus.Debugf("found match for %s with media type %s, digest %s", platforms.Format(p), desc.MediaType, desc.Digest.String()) + log.G(context.TODO()).Debugf("found match for %s with media type %s, digest %s", platforms.Format(p), desc.MediaType, desc.Digest.String()) } } } diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go index 9e7dc25cf3..f9698a29c0 100644 --- a/distribution/pull_v2_windows.go +++ b/distribution/pull_v2_windows.go @@ -19,7 +19,7 @@ import ( "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/pkg/system" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) var _ distribution.Describable = &layerDescriptor{} @@ -50,7 +50,7 @@ func (ld *layerDescriptor) open(ctx context.Context) (distribution.ReadSeekClose // Find the first URL that results in a 200 result code. for _, url := range ld.src.URLs { - logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + log.G(ctx).Debugf("Pulling %v from foreign URL %v", ld.digest, url) rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) // Seek does an HTTP GET. If it succeeds, the blob really is accessible. @@ -58,7 +58,7 @@ func (ld *layerDescriptor) open(ctx context.Context) (distribution.ReadSeekClose if err == nil { break } - logrus.Debugf("Download for %v failed: %v", ld.digest, err) + log.G(ctx).Debugf("Download for %v failed: %v", ld.digest, err) rsc.Close() rsc = nil } @@ -68,7 +68,7 @@ func (ld *layerDescriptor) open(ctx context.Context) (distribution.ReadSeekClose func filterManifests(manifests []manifestlist.ManifestDescriptor, p ocispec.Platform) []manifestlist.ManifestDescriptor { version := osversion.Get() osVersion := fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build) - logrus.Debugf("will prefer Windows entries with version %s", osVersion) + log.G(context.TODO()).Debugf("will prefer Windows entries with version %s", osVersion) var matches []manifestlist.ManifestDescriptor foundWindowsMatch := false @@ -83,9 +83,9 @@ func filterManifests(manifests []manifestlist.ManifestDescriptor, p ocispec.Plat foundWindowsMatch = true } matches = append(matches, manifestDescriptor) - logrus.Debugf("found match %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + log.G(context.TODO()).Debugf("found match %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) } else { - logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, manifestDescriptor.Platform.Architecture, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + log.G(context.TODO()).Debugf("ignoring %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, manifestDescriptor.Platform.Architecture, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) } } if foundWindowsMatch { @@ -130,7 +130,7 @@ func checkImageCompatibility(imageOS, imageOSVersion string) error { if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil { if imageOSBuild > int(hostOSV.Build) { errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString()) - logrus.Debugf(errMsg) + log.G(context.TODO()).Debugf(errMsg) return errors.New(errMsg) } } diff --git a/distribution/push.go b/distribution/push.go index 808c5ec316..fb416e2c48 100644 --- a/distribution/push.go +++ b/distribution/push.go @@ -7,9 +7,9 @@ import ( "fmt" "io" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/pkg/progress" - "github.com/sirupsen/logrus" ) const compressionBufSize = 32768 @@ -49,12 +49,12 @@ func Push(ctx context.Context, ref reference.Named, config *ImagePushConfig) err for _, endpoint := range endpoints { if endpoint.URL.Scheme != "https" { if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + log.G(ctx).Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) continue } } - logrus.Debugf("Trying to push %s to %s", repoInfo.Name.Name(), endpoint.URL) + log.G(ctx).Debugf("Trying to push %s to %s", repoInfo.Name.Name(), endpoint.URL) if err := newPusher(ref, endpoint, repoInfo, config).push(ctx); err != nil { // Was this push cancelled? If so, don't try to fall @@ -68,12 +68,12 @@ func Push(ctx context.Context, ref reference.Named, config *ImagePushConfig) err } err = fallbackErr.err lastErr = err - logrus.Infof("Attempting next endpoint for push after error: %v", err) + log.G(ctx).Infof("Attempting next endpoint for push after error: %v", err) continue } } - logrus.Errorf("Not continuing with push after error: %v", err) + log.G(ctx).Errorf("Not continuing with push after error: %v", err) return err } diff --git a/distribution/push_v2.go b/distribution/push_v2.go index 938c5cac35..c10efcebd4 100644 --- a/distribution/push_v2.go +++ b/distribution/push_v2.go @@ -10,6 +10,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" @@ -27,7 +28,6 @@ import ( "github.com/docker/libtrust" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -77,7 +77,7 @@ func (p *pusher) push(ctx context.Context) (err error) { p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) + log.G(ctx).Debugf("Error getting v2 registry: %v", err) return err } @@ -125,7 +125,7 @@ func (p *pusher) pushRepository(ctx context.Context) (err error) { } func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { - logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + log.G(ctx).Debugf("Pushing repository: %s", reference.FamiliarString(ref)) imgConfig, err := p.config.ImageStore.Get(ctx, id) if err != nil { @@ -189,7 +189,7 @@ func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id dige putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { if runtime.GOOS == "windows" { - logrus.Warnf("failed to upload schema2 manifest: %v", err) + log.G(ctx).Warnf("failed to upload schema2 manifest: %v", err) return err } @@ -199,13 +199,13 @@ func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id dige if os.Getenv("DOCKER_ALLOW_SCHEMA1_PUSH_DONOTUSE") == "" { if err.Error() == "tag invalid" { msg := "[DEPRECATED] support for pushing manifest v2 schema1 images has been removed. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/" - logrus.WithError(err).Error(msg) + log.G(ctx).WithError(err).Error(msg) return errors.Wrap(err, msg) } return err } - logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + log.G(ctx).Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) // Note: this fallback is deprecated, see log messages below manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) @@ -228,7 +228,7 @@ func (p *pusher) pushTag(ctx context.Context, ref reference.NamedTagged, id dige // schema2 failed but schema1 succeeded msg := fmt.Sprintf("[DEPRECATION NOTICE] support for pushing manifest v2 schema1 images will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref)) - logrus.Warn(msg) + log.G(ctx).Warn(msg) progress.Message(p.config.ProgressOutput, "", msg) } @@ -342,13 +342,13 @@ func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Ou isUnauthorizedError := false for _, mc := range candidates { mountCandidate := mc - logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + log.G(ctx).Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) createOpts := []distribution.BlobCreateOption{} if len(mountCandidate.SourceRepository) > 0 { namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) if err != nil { - logrus.WithError(err).Errorf("failed to parse source repository reference %v", reference.FamiliarString(namedRef)) + log.G(ctx).WithError(err).Errorf("failed to parse source repository reference %v", reference.FamiliarString(namedRef)) _ = pd.metadataService.Remove(mountCandidate) continue } @@ -357,13 +357,13 @@ func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Ou // with only path to set mount from with remoteRef, err := reference.WithName(reference.Path(namedRef)) if err != nil { - logrus.WithError(err).Errorf("failed to make remote reference out of %q", reference.Path(namedRef)) + log.G(ctx).WithError(err).Errorf("failed to make remote reference out of %q", reference.Path(namedRef)) continue } canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) if err != nil { - logrus.WithError(err).Error("failed to make canonical reference") + log.G(ctx).WithError(err).Error("failed to make canonical reference") continue } @@ -398,14 +398,14 @@ func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Ou case errcode.Error: if e.Code == errcode.ErrorCodeUnauthorized { // when unauthorized error that indicate user don't has right to push layer to register - logrus.Debugln("failed to push layer to registry because unauthorized error") + log.G(ctx).Debugln("failed to push layer to registry because unauthorized error") isUnauthorizedError = true } default: } } default: - logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + log.G(ctx).Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) } // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register @@ -418,7 +418,7 @@ func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Ou if err != nil { cause = fmt.Sprintf("an error: %v", err.Error()) } - logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + log.G(ctx).Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) _ = pd.metadataService.Remove(mountCandidate) } @@ -437,7 +437,7 @@ func (pd *pushDescriptor) Upload(ctx context.Context, progressOutput progress.Ou } } - logrus.Debugf("Pushing layer: %s", diffID) + log.G(ctx).Debugf("Pushing layer: %s", diffID) if layerUpload == nil { layerUpload, err = bs.Create(ctx) if err != nil { @@ -500,7 +500,7 @@ func (pd *pushDescriptor) uploadUsingSession( return distribution.Descriptor{}, retryOnError(err) } - logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + log.G(ctx).Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) progress.Update(progressOutput, pd.ID(), "Pushed") // Cache mapping from this layer's DiffID to the blobsum @@ -570,7 +570,7 @@ func (pd *pushDescriptor) layerAlreadyExists( attempts: for _, dgst := range layerDigests { meta := digestToMetadata[dgst] - logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + log.G(ctx).Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) pd.checkedDigests[meta.Digest] = struct{}{} switch err { @@ -593,7 +593,7 @@ attempts: pd.metadataService.Remove(*meta) } default: - logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + log.G(ctx).WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) } } @@ -724,10 +724,10 @@ func getPathComponents(path string) []string { func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { if layerUpload != nil { - logrus.Debugf("cancelling upload of blob %s", dgst) + log.G(ctx).Debugf("cancelling upload of blob %s", dgst) err := layerUpload.Cancel(ctx) if err != nil { - logrus.Warnf("failed to cancel upload: %v", err) + log.G(ctx).Warnf("failed to cancel upload: %v", err) } } } diff --git a/distribution/registry_unit_test.go b/distribution/registry_unit_test.go index 035f062740..f48418baf2 100644 --- a/distribution/registry_unit_test.go +++ b/distribution/registry_unit_test.go @@ -8,10 +8,10 @@ import ( "strings" "testing" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" registrypkg "github.com/docker/docker/registry" - "github.com/sirupsen/logrus" ) const secretRegistryToken = "mysecrettoken" @@ -25,7 +25,7 @@ type tokenPassThruHandler struct { func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.reached = true if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { - logrus.Debug("Detected registry token in auth header") + log.G(context.TODO()).Debug("Detected registry token in auth header") h.gotToken = true } if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { @@ -74,7 +74,7 @@ func testTokenPassThru(t *testing.T, ts *httptest.Server) { t.Fatal(err) } - logrus.Debug("About to pull") + log.G(ctx).Debug("About to pull") // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above tag, _ := reference.WithTag(n, "tag_goes_here") _ = p.pullRepository(ctx, tag) diff --git a/distribution/utils/progress.go b/distribution/utils/progress.go index 73ee2be61e..43451bec68 100644 --- a/distribution/utils/progress.go +++ b/distribution/utils/progress.go @@ -1,14 +1,15 @@ package utils // import "github.com/docker/docker/distribution/utils" import ( + "context" "io" "net" "os" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" - "github.com/sirupsen/logrus" ) // WriteDistributionProgress is a helper for writing progress from chan to JSON @@ -21,9 +22,9 @@ func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressC if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { // don't log broken pipe errors as this is the normal case when a client aborts if isBrokenPipe(err) { - logrus.Info("Pull session cancelled") + log.G(context.TODO()).Info("Pull session cancelled") } else { - logrus.Errorf("error writing progress to client: %v", err) + log.G(context.TODO()).Errorf("error writing progress to client: %v", err) } cancelFunc() operationCancelled = true diff --git a/distribution/xfer/download.go b/distribution/xfer/download.go index ad283b724d..4568f1beb5 100644 --- a/distribution/xfer/download.go +++ b/distribution/xfer/download.go @@ -7,13 +7,13 @@ import ( "io" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" - "github.com/sirupsen/logrus" ) const maxDownloadAttempts = 5 @@ -135,7 +135,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima l, err := ldm.layerStore.Get(getRootFS.ChainID()) if err == nil { // Layer already exists. - logrus.Debugf("Layer already exists: %s", descriptor.ID()) + log.G(ctx).Debugf("Layer already exists: %s", descriptor.ID()) progress.Update(progressOutput, descriptor.ID(), "Already exists") if topLayer != nil { layer.ReleaseAndLog(ldm.layerStore, topLayer) @@ -288,12 +288,12 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, } if _, isDNR := err.(DoNotRetry); isDNR || attempt >= ldm.maxDownloadAttempts { - logrus.Errorf("Download failed after %d attempts: %v", attempt, err) + log.G(context.TODO()).Errorf("Download failed after %d attempts: %v", attempt, err) d.err = err return } - logrus.Infof("Download failed, retrying (%d/%d): %v", attempt, ldm.maxDownloadAttempts, err) + log.G(context.TODO()).Infof("Download failed, retrying (%d/%d): %v", attempt, ldm.maxDownloadAttempts, err) delay := attempt * 5 ticker := time.NewTicker(ldm.waitDuration) attempt++ diff --git a/distribution/xfer/upload.go b/distribution/xfer/upload.go index 40705bad6c..247c6eb0de 100644 --- a/distribution/xfer/upload.go +++ b/distribution/xfer/upload.go @@ -5,10 +5,10 @@ import ( "errors" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "github.com/sirupsen/logrus" ) const maxUploadAttempts = 5 @@ -141,12 +141,12 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) doFun retries++ if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { - logrus.Errorf("Upload failed: %v", err) + log.G(context.TODO()).Errorf("Upload failed: %v", err) u.err = err return } - logrus.Errorf("Upload failed, retrying: %v", err) + log.G(context.TODO()).Errorf("Upload failed, retrying: %v", err) delay := retries * 5 ticker := time.NewTicker(lum.waitDuration) diff --git a/image/fs.go b/image/fs.go index 57109efeee..803176475f 100644 --- a/image/fs.go +++ b/image/fs.go @@ -1,15 +1,16 @@ package image // import "github.com/docker/docker/image" import ( + "context" "fmt" "os" "path/filepath" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // DigestWalkFunc is function called by StoreBackend.Walk @@ -75,7 +76,7 @@ func (s *fs) Walk(f DigestWalkFunc) error { for _, v := range dir { dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name()) if err := dgst.Validate(); err != nil { - logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + log.G(context.TODO()).Debugf("skipping invalid digest %s: %s", dgst, err) continue } if err := f(dgst); err != nil { diff --git a/image/rootfs.go b/image/rootfs.go index f73a0660fa..f971261f45 100644 --- a/image/rootfs.go +++ b/image/rootfs.go @@ -1,10 +1,11 @@ package image // import "github.com/docker/docker/image" import ( + "context" "runtime" + "github.com/containerd/containerd/log" "github.com/docker/docker/layer" - "github.com/sirupsen/logrus" ) // TypeLayers is used for RootFS.Type for filesystems organized into layers. @@ -46,7 +47,7 @@ func (r *RootFS) Clone() *RootFS { // ChainID returns the ChainID for the top layer in RootFS. func (r *RootFS) ChainID() layer.ChainID { if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { - logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + log.G(context.TODO()).Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) return "" } return layer.CreateChainID(r.DiffIDs) diff --git a/image/store.go b/image/store.go index c457bc3a82..55d43c354a 100644 --- a/image/store.go +++ b/image/store.go @@ -1,10 +1,12 @@ package image // import "github.com/docker/docker/image" import ( + "context" "fmt" "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/system" @@ -68,7 +70,7 @@ func NewImageStore(fs StoreBackend, lss LayerGetReleaser) (Store, error) { func (is *store) restore() error { // As the code below is run when restoring all images (which can be "many"), - // constructing the "logrus.WithFields" is deliberately not "DRY", as the + // constructing the "log.G(ctx).WithFields" is deliberately not "DRY", as the // logger is only used for error-cases, and we don't want to do allocations // if we don't need it. The "f" type alias is here is just for convenience, // and to make the code _slightly_ more DRY. See the discussion on GitHub; @@ -77,19 +79,19 @@ func (is *store) restore() error { err := is.fs.Walk(func(dgst digest.Digest) error { img, err := is.Get(ID(dgst)) if err != nil { - logrus.WithFields(f{"digest": dgst, "err": err}).Error("invalid image") + log.G(context.TODO()).WithFields(f{"digest": dgst, "err": err}).Error("invalid image") return nil } var l layer.Layer if chainID := img.RootFS.ChainID(); chainID != "" { if !system.IsOSSupported(img.OperatingSystem()) { - logrus.WithFields(f{"chainID": chainID, "os": img.OperatingSystem()}).Error("not restoring image with unsupported operating system") + log.G(context.TODO()).WithFields(f{"chainID": chainID, "os": img.OperatingSystem()}).Error("not restoring image with unsupported operating system") return nil } l, err = is.lss.Get(chainID) if err != nil { if errors.Is(err, layer.ErrLayerDoesNotExist) { - logrus.WithFields(f{"chainID": chainID, "os": img.OperatingSystem(), "err": err}).Error("not restoring image") + log.G(context.TODO()).WithFields(f{"chainID": chainID, "os": img.OperatingSystem(), "err": err}).Error("not restoring image") return nil } return err @@ -246,7 +248,7 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) { } if err := is.digestSet.Remove(id.Digest()); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) + log.G(context.TODO()).Errorf("error removing %s from digest set: %q", id, err) } delete(is.images, id) is.fs.Delete(id.Digest()) @@ -332,7 +334,7 @@ func (is *store) imagesMap(all bool) map[ID]*Image { } img, err := is.Get(id) if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) + log.G(context.TODO()).Errorf("invalid image access: %q, error: %q", id, err) continue } images[id] = img diff --git a/image/tarexport/load.go b/image/tarexport/load.go index 4cc1c2263b..7f675d98e8 100644 --- a/image/tarexport/load.go +++ b/image/tarexport/load.go @@ -1,6 +1,7 @@ package tarexport // import "github.com/docker/docker/image/tarexport" import ( + "context" "encoding/json" "errors" "fmt" @@ -10,6 +11,7 @@ import ( "reflect" "runtime" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/docker/image" @@ -24,7 +26,6 @@ import ( "github.com/moby/sys/sequential" "github.com/moby/sys/symlink" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { @@ -174,7 +175,7 @@ func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, // On Linux, this equates to a regular os.Open. rawTar, err := sequential.Open(filename) if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) + log.G(context.TODO()).Debugf("Error reading embedded tar: %v", err) return nil, err } defer rawTar.Close() @@ -183,7 +184,7 @@ func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, if progressOutput != nil { fileInfo, err := rawTar.Stat() if err != nil { - logrus.Debugf("Error statting file: %v", err) + log.G(context.TODO()).Debugf("Error statting file: %v", err) return nil, err } @@ -280,7 +281,7 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str } imageJSON, err := os.ReadFile(configPath) if err != nil { - logrus.Debugf("Error reading json: %v", err) + log.G(context.TODO()).Debugf("Error reading json: %v", err) return err } diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go index 650897c5fa..8837ae41dc 100644 --- a/image/v1/imagev1.go +++ b/image/v1/imagev1.go @@ -1,15 +1,16 @@ package v1 // import "github.com/docker/docker/image/v1" import ( + "context" "encoding/json" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) // noFallbackMinVersion is the minimum version for which v1compatibility @@ -58,7 +59,7 @@ func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest if err != nil { return "", err } - logrus.Debugf("CreateV1ID %s", configJSON) + log.G(context.TODO()).Debugf("CreateV1ID %s", configJSON) return digest.FromBytes(configJSON), nil } diff --git a/integration-cli/events_utils_test.go b/integration-cli/events_utils_test.go index 6a094ba160..dce4c216c5 100644 --- a/integration-cli/events_utils_test.go +++ b/integration-cli/events_utils_test.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "io" "os/exec" "regexp" @@ -10,8 +11,8 @@ import ( "strings" "testing" + "github.com/containerd/containerd/log" eventstestutils "github.com/docker/docker/daemon/events/testutils" - "github.com/sirupsen/logrus" "gotest.tools/v3/assert" ) @@ -89,7 +90,7 @@ func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { err = io.EOF } - logrus.Debugf("EventObserver scanner loop finished: %v", err) + log.G(context.TODO()).Debugf("EventObserver scanner loop finished: %v", err) e.disconnectionError = err } diff --git a/layer/filestore.go b/layer/filestore.go index f2465b3b04..4cf4e2fe6a 100644 --- a/layer/filestore.go +++ b/layer/filestore.go @@ -2,6 +2,7 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" + "context" "encoding/json" "io" "os" @@ -10,11 +11,11 @@ import ( "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( @@ -322,7 +323,7 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { nameSplit := strings.Split(fi.Name(), "-") dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0]) if err := dgst.Validate(); err != nil { - logrus.WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") + log.G(context.TODO()).WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") continue } @@ -330,13 +331,13 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { contentBytes, err := os.ReadFile(chainFile) if err != nil { if !os.IsNotExist(err) { - logrus.WithError(err).WithField("digest", dgst).Error("failed to read cache ID") + log.G(context.TODO()).WithError(err).WithField("digest", dgst).Error("failed to read cache ID") } continue } cacheID := strings.TrimSpace(string(contentBytes)) if cacheID == "" { - logrus.Error("invalid cache ID") + log.G(context.TODO()).Error("invalid cache ID") continue } @@ -366,7 +367,7 @@ func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { if fi.IsDir() && fi.Name() != "mounts" { dgst := digest.NewDigestFromEncoded(algorithm, fi.Name()) if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + log.G(context.TODO()).Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) } else { ids = append(ids, ChainID(dgst)) } @@ -410,17 +411,17 @@ func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error { chainFile := filepath.Join(dir, "cache-id") contentBytes, err := os.ReadFile(chainFile) if err != nil { - logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID") + log.G(context.TODO()).WithError(err).WithField("file", chainFile).Error("cannot get cache ID") continue } cacheID := strings.TrimSpace(string(contentBytes)) if cacheID != cache { continue } - logrus.Debugf("Removing folder: %s", dir) + log.G(context.TODO()).Debugf("Removing folder: %s", dir) err = os.RemoveAll(dir) if err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer") + log.G(context.TODO()).WithError(err).WithField("name", f.Name()).Error("cannot remove layer") continue } } diff --git a/layer/layer.go b/layer/layer.go index 330f359632..e04ff6c49a 100644 --- a/layer/layer.go +++ b/layer/layer.go @@ -10,13 +10,14 @@ package layer // import "github.com/docker/docker/layer" import ( + "context" "errors" "io" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/pkg/archive" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) var ( @@ -212,7 +213,7 @@ func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { func ReleaseAndLog(ls Store, l Layer) { metadata, err := ls.Release(l) if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + log.G(context.TODO()).Errorf("Error releasing layer %s: %v", l.ChainID(), err) } LogReleaseMetadata(metadata) } @@ -221,6 +222,6 @@ func ReleaseAndLog(ls Store, l Layer) { // ensure consistent logging for release metadata func LogReleaseMetadata(metadatas []Metadata) { for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) + log.G(context.TODO()).Infof("Layer %s cleaned up", metadata.ChainID) } } diff --git a/layer/layer_store.go b/layer/layer_store.go index 221c1df7ad..396d528199 100644 --- a/layer/layer_store.go +++ b/layer/layer_store.go @@ -1,6 +1,7 @@ package layer // import "github.com/docker/docker/layer" import ( + "context" "errors" "fmt" "io" @@ -8,6 +9,7 @@ import ( "path/filepath" "sync" + "github.com/containerd/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" @@ -15,7 +17,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/moby/locker" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -67,7 +68,7 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { } return nil, fmt.Errorf("error initializing graphdriver: %v", err) } - logrus.Debugf("Initialized graph driver %s", driver) + log.G(context.TODO()).Debugf("Initialized graph driver %s", driver) root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) @@ -105,7 +106,7 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver) (Store, err for _, id := range ids { l, err := ls.loadLayer(id) if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) + log.G(context.TODO()).Debugf("Failed to load layer %s: %s", id, err) continue } if l.parent != nil { @@ -115,7 +116,7 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver) (Store, err for _, mount := range mounts { if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) + log.G(context.TODO()).Debugf("Failed to load mount %s: %s", mount, err) } } @@ -258,7 +259,7 @@ func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent layer.size = applySize layer.diffID = DiffID(digester.Digest()) - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + log.G(context.TODO()).Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) return nil } @@ -318,12 +319,12 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr defer func() { if cErr != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, cErr) + log.G(context.TODO()).Debugf("Cleaning up layer %s: %v", layer.cacheID, cErr) if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + log.G(context.TODO()).Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) } if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() @@ -576,7 +577,7 @@ func (ls *layerStore) GetMountID(id string) (string, error) { if mount == nil { return "", ErrMountDoesNotExist } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + log.G(context.TODO()).Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) return mount.mountID, nil } @@ -602,21 +603,21 @@ func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { } if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing mounted layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } if m.initID != "" { if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing init layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } } if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing mount metadata: %s: %s", m.name, err) m.retakeReference(l) return nil, err } @@ -735,28 +736,28 @@ func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size metaUnpacker := storage.NewJSONUnpacker(metadata) upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) + log.G(context.TODO()).Debugf("Assembling tar data for %s", graphID) return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) } func (ls *layerStore) Cleanup() error { orphanLayers, err := ls.store.getOrphan() if err != nil { - logrus.WithError(err).Error("cannot get orphan layers") + log.G(context.TODO()).WithError(err).Error("cannot get orphan layers") } if len(orphanLayers) > 0 { - logrus.Debugf("found %v orphan layers", len(orphanLayers)) + log.G(context.TODO()).Debugf("found %v orphan layers", len(orphanLayers)) } for _, orphan := range orphanLayers { - logrus.WithField("cache-id", orphan.cacheID).Debugf("removing orphan layer, chain ID: %v", orphan.chainID) + log.G(context.TODO()).WithField("cache-id", orphan.cacheID).Debugf("removing orphan layer, chain ID: %v", orphan.chainID) err = ls.driver.Remove(orphan.cacheID) if err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer") + log.G(context.TODO()).WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer") continue } err = ls.store.Remove(orphan.chainID, orphan.cacheID) if err != nil { - logrus.WithError(err).WithField("chain-id", orphan.chainID).Error("cannot remove orphan layer metadata") + log.G(context.TODO()).WithError(err).WithField("chain-id", orphan.chainID).Error("cannot remove orphan layer metadata") } } return ls.driver.Cleanup() diff --git a/layer/migration.go b/layer/migration.go index 0d97c6eca2..b90c0b4c27 100644 --- a/layer/migration.go +++ b/layer/migration.go @@ -2,12 +2,13 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" + "context" "errors" "io" "os" + "github.com/containerd/containerd/log" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -132,9 +133,9 @@ func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID D defer func() { if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + log.G(context.TODO()).Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() diff --git a/libcontainerd/local/local_windows.go b/libcontainerd/local/local_windows.go index e9c9f7f326..d1dba4d5af 100644 --- a/libcontainerd/local/local_windows.go +++ b/libcontainerd/local/local_windows.go @@ -15,10 +15,13 @@ import ( "syscall" "time" + "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim" "github.com/containerd/containerd" "github.com/containerd/containerd/cio" cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/queue" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" @@ -26,7 +29,6 @@ import ( "github.com/docker/docker/pkg/system" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -88,7 +90,7 @@ func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, c := &client{ stateDir: stateDir, backend: b, - logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns), + logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns), } return c, nil diff --git a/libcontainerd/remote/client.go b/libcontainerd/remote/client.go index 6ea98b0c9c..8a6c01d8fa 100644 --- a/libcontainerd/remote/client.go +++ b/libcontainerd/remote/client.go @@ -22,6 +22,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/typeurl/v2" "github.com/docker/docker/errdefs" @@ -75,7 +76,7 @@ func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, c := &client{ client: cli, stateDir: stateDir, - logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns), + logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns), ns: ns, backend: b, } @@ -597,7 +598,7 @@ func (c *client) waitServe(ctx context.Context) bool { if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return false } - logrus.WithError(err).Warn("Error while testing if containerd API is ready") + log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready") } if serving { diff --git a/libcontainerd/remote/client_linux.go b/libcontainerd/remote/client_linux.go index 3b7ee1ab6e..8db8933376 100644 --- a/libcontainerd/remote/client_linux.go +++ b/libcontainerd/remote/client_linux.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/log" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/pkg/idtools" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -107,7 +108,7 @@ func newFIFOSet(bundleDir, processID string, withStdin, withTerminal bool) *cio. closer := func() error { for _, path := range paths { if err := os.RemoveAll(path); err != nil { - logrus.Warnf("libcontainerd: failed to remove fifo %v: %v", path, err) + log.G(context.TODO()).Warnf("libcontainerd: failed to remove fifo %v: %v", path, err) } } return nil diff --git a/libcontainerd/replace.go b/libcontainerd/replace.go index 6ef6141e98..fd4f25d52b 100644 --- a/libcontainerd/replace.go +++ b/libcontainerd/replace.go @@ -4,9 +4,9 @@ import ( "context" "github.com/containerd/containerd" + "github.com/containerd/containerd/log" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd/types" @@ -23,7 +23,7 @@ func ReplaceContainer(ctx context.Context, client types.Client, id string, spec return ctr, err } - log := logrus.WithContext(ctx).WithField("container", id) + log := log.G(ctx).WithContext(ctx).WithField("container", id) log.Debug("A container already exists with the same ID. Attempting to clean up the old container.") ctr, err = client.LoadContainer(ctx, id) if err != nil { diff --git a/libcontainerd/supervisor/remote_daemon.go b/libcontainerd/supervisor/remote_daemon.go index b0eb6cceae..761e66885c 100644 --- a/libcontainerd/supervisor/remote_daemon.go +++ b/libcontainerd/supervisor/remote_daemon.go @@ -10,6 +10,7 @@ import ( "time" "github.com/containerd/containerd" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/services/server/config" "github.com/containerd/containerd/sys" "github.com/docker/docker/pkg/pidfile" @@ -76,7 +77,7 @@ func Start(ctx context.Context, rootDir, stateDir string, opts ...DaemonOpt) (Da configFile: filepath.Join(stateDir, configFile), daemonPid: -1, pidFile: filepath.Join(stateDir, pidFile), - logger: logrus.WithField("module", "libcontainerd"), + logger: log.G(ctx).WithField("module", "libcontainerd"), daemonStartCh: make(chan error, 1), daemonStopCh: make(chan struct{}), } diff --git a/libnetwork/agent.go b/libnetwork/agent.go index e655297339..518e6bec84 100644 --- a/libnetwork/agent.go +++ b/libnetwork/agent.go @@ -3,12 +3,14 @@ package libnetwork //go:generate protoc -I=. -I=../vendor/ --gogofaster_out=import_path=github.com/docker/docker/libnetwork:. agent.proto import ( + "context" "encoding/json" "fmt" "net" "sort" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/cluster" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" @@ -17,7 +19,6 @@ import ( "github.com/docker/docker/libnetwork/types" "github.com/docker/go-events" "github.com/gogo/protobuf/proto" - "github.com/sirupsen/logrus" ) const ( @@ -102,7 +103,7 @@ func (c *Controller) handleKeyChange(keys []*types.EncryptionKey) error { a := c.getAgent() if a == nil { - logrus.Debug("Skipping key change as agent is nil") + log.G(context.TODO()).Debug("Skipping key change as agent is nil") return nil } @@ -183,16 +184,16 @@ func (c *Controller) handleKeyChange(keys []*types.EncryptionKey) error { c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { err := driver.DiscoverNew(discoverapi.EncryptionKeysUpdate, drvEnc) if err != nil { - logrus.Warnf("Failed to update datapath keys in driver %s: %v", name, err) + log.G(context.TODO()).Warnf("Failed to update datapath keys in driver %s: %v", name, err) // Attempt to reconfigure keys in case of a update failure // which can arise due to a mismatch of keys // if worker nodes get temporarily disconnected - logrus.Warnf("Reconfiguring datapath keys for %s", name) + log.G(context.TODO()).Warnf("Reconfiguring datapath keys for %s", name) drvCfgEnc := discoverapi.DriverEncryptionConfig{} drvCfgEnc.Keys, drvCfgEnc.Tags = c.getKeys(subsysIPSec) err = driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvCfgEnc) if err != nil { - logrus.Warnf("Failed to reset datapath keys in driver %s: %v", name, err) + log.G(context.TODO()).Warnf("Failed to reset datapath keys in driver %s: %v", name, err) } } return false @@ -222,11 +223,11 @@ func (c *Controller) agentSetup(clusterProvider cluster.Provider) error { listen := clusterProvider.GetListenAddress() listenAddr, _, _ := net.SplitHostPort(listen) - logrus.Infof("Initializing Libnetwork Agent Listen-Addr=%s Local-addr=%s Adv-addr=%s Data-addr=%s Remote-addr-list=%v MTU=%d", + log.G(context.TODO()).Infof("Initializing Libnetwork Agent Listen-Addr=%s Local-addr=%s Adv-addr=%s Data-addr=%s Remote-addr-list=%v MTU=%d", listenAddr, bindAddr, advAddr, dataAddr, remoteAddrList, c.Config().NetworkControlPlaneMTU) if advAddr != "" && agent == nil { if err := c.agentInit(listenAddr, bindAddr, advAddr, dataAddr); err != nil { - logrus.Errorf("error in agentInit: %v", err) + log.G(context.TODO()).Errorf("error in agentInit: %v", err) return err } c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { @@ -239,7 +240,7 @@ func (c *Controller) agentSetup(clusterProvider cluster.Provider) error { if len(remoteAddrList) > 0 { if err := c.agentJoin(remoteAddrList); err != nil { - logrus.Errorf("Error in joining gossip cluster : %v(join will be retried in background)", err) + log.G(context.TODO()).Errorf("Error in joining gossip cluster : %v(join will be retried in background)", err) } } @@ -299,7 +300,7 @@ func (c *Controller) agentInit(listenAddr, bindAddrOrInterface, advertiseAddr, d // Consider the MTU remove the IP hdr (IPv4 or IPv6) and the TCP/UDP hdr. // To be on the safe side let's cut 100 bytes netDBConf.PacketBufferSize = (c.Config().NetworkControlPlaneMTU - 100) - logrus.Debugf("Control plane MTU: %d will initialize NetworkDB with: %d", + log.G(context.TODO()).Debugf("Control plane MTU: %d will initialize NetworkDB with: %d", c.Config().NetworkControlPlaneMTU, netDBConf.PacketBufferSize) } nDB, err := networkdb.New(netDBConf) @@ -338,7 +339,7 @@ func (c *Controller) agentInit(listenAddr, bindAddrOrInterface, advertiseAddr, d c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc) if err != nil { - logrus.Warnf("Failed to set datapath keys in driver %s: %v", name, err) + log.G(context.TODO()).Warnf("Failed to set datapath keys in driver %s: %v", name, err) } return false }) @@ -367,7 +368,7 @@ func (c *Controller) agentDriverNotify(d driverapi.Driver) { BindAddress: agent.bindAddr, Self: true, }); err != nil { - logrus.Warnf("Failed the node discovery in driver: %v", err) + log.G(context.TODO()).Warnf("Failed the node discovery in driver: %v", err) } drvEnc := discoverapi.DriverEncryptionConfig{} @@ -376,7 +377,7 @@ func (c *Controller) agentDriverNotify(d driverapi.Driver) { drvEnc.Tags = tags if err := d.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc); err != nil { - logrus.Warnf("Failed to set datapath keys in driver: %v", err) + log.G(context.TODO()).Warnf("Failed to set datapath keys in driver: %v", err) } } @@ -452,7 +453,7 @@ func (n *network) Services() map[string]ServiceInfo { var epRec EndpointRecord nid := n.ID() if err := proto.Unmarshal(value.Value, &epRec); err != nil { - logrus.Errorf("Unmarshal of libnetworkEPTable failed for endpoint %s in network %s, %v", eid, nid, err) + log.G(context.TODO()).Errorf("Unmarshal of libnetworkEPTable failed for endpoint %s in network %s, %v", eid, nid, err) continue } i := n.getController().getLBIndex(epRec.ServiceID, nid, epRec.IngressPorts) @@ -467,7 +468,7 @@ func (n *network) Services() map[string]ServiceInfo { // relevant info about the endpoint. d, err := n.driver(true) if err != nil { - logrus.Errorf("Could not resolve driver for network %s/%s while fetching services: %v", n.networkType, n.ID(), err) + log.G(context.TODO()).Errorf("Could not resolve driver for network %s/%s while fetching services: %v", n.networkType, n.ID(), err) return nil } for _, table := range n.driverTables { @@ -478,7 +479,7 @@ func (n *network) Services() map[string]ServiceInfo { for key, value := range entries { epID, info := d.DecodeTableEntry(table.name, key, value.Value) if ep, ok := eps[epID]; !ok { - logrus.Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID) + log.G(context.TODO()).Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID) } else { ep.info = info eps[epID] = ep @@ -607,7 +608,7 @@ func (ep *Endpoint) addServiceInfoToCluster(sb *Sandbox) error { sb.service.Lock() defer sb.service.Unlock() - logrus.Debugf("addServiceInfoToCluster START for %s %s", ep.svcName, ep.ID()) + log.G(context.TODO()).Debugf("addServiceInfoToCluster START for %s %s", ep.svcName, ep.ID()) // Check that the endpoint is still present on the sandbox before adding it to the service discovery. // This is to handle a race between the EnableService and the sbLeave @@ -621,7 +622,7 @@ func (ep *Endpoint) addServiceInfoToCluster(sb *Sandbox) error { // removed from the list, in this situation the delete will bail out not finding any data to cleanup // and the add will bail out not finding the endpoint on the sandbox. if e := sb.getEndpoint(ep.ID()); e == nil { - logrus.Warnf("addServiceInfoToCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) + log.G(context.TODO()).Warnf("addServiceInfoToCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) return nil } @@ -667,12 +668,12 @@ func (ep *Endpoint) addServiceInfoToCluster(sb *Sandbox) error { if agent != nil { if err := agent.networkDB.CreateEntry(libnetworkEPTable, n.ID(), ep.ID(), buf); err != nil { - logrus.Warnf("addServiceInfoToCluster NetworkDB CreateEntry failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Warnf("addServiceInfoToCluster NetworkDB CreateEntry failed for %s %s err:%s", ep.id, n.id, err) return err } } - logrus.Debugf("addServiceInfoToCluster END for %s %s", ep.svcName, ep.ID()) + log.G(context.TODO()).Debugf("addServiceInfoToCluster END for %s %s", ep.svcName, ep.ID()) return nil } @@ -689,14 +690,14 @@ func (ep *Endpoint) deleteServiceInfoFromCluster(sb *Sandbox, fullRemove bool, m sb.service.Lock() defer sb.service.Unlock() - logrus.Debugf("deleteServiceInfoFromCluster from %s START for %s %s", method, ep.svcName, ep.ID()) + log.G(context.TODO()).Debugf("deleteServiceInfoFromCluster from %s START for %s %s", method, ep.svcName, ep.ID()) // Avoid a race w/ with a container that aborts preemptively. This would // get caught in disableServceInNetworkDB, but we check here to make the // nature of the condition more clear. // See comment in addServiceInfoToCluster() if e := sb.getEndpoint(ep.ID()); e == nil { - logrus.Warnf("deleteServiceInfoFromCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) + log.G(context.TODO()).Warnf("deleteServiceInfoFromCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) return nil } @@ -712,7 +713,7 @@ func (ep *Endpoint) deleteServiceInfoFromCluster(sb *Sandbox, fullRemove bool, m // First update the networkDB then locally if fullRemove { if err := agent.networkDB.DeleteEntry(libnetworkEPTable, n.ID(), ep.ID()); err != nil { - logrus.Warnf("deleteServiceInfoFromCluster NetworkDB DeleteEntry failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Warnf("deleteServiceInfoFromCluster NetworkDB DeleteEntry failed for %s %s err:%s", ep.id, n.id, err) } } else { disableServiceInNetworkDB(agent, n, ep) @@ -737,7 +738,7 @@ func (ep *Endpoint) deleteServiceInfoFromCluster(sb *Sandbox, fullRemove bool, m } } - logrus.Debugf("deleteServiceInfoFromCluster from %s END for %s %s", method, ep.svcName, ep.ID()) + log.G(context.TODO()).Debugf("deleteServiceInfoFromCluster from %s END for %s %s", method, ep.svcName, ep.ID()) return nil } @@ -745,29 +746,29 @@ func (ep *Endpoint) deleteServiceInfoFromCluster(sb *Sandbox, fullRemove bool, m func disableServiceInNetworkDB(a *agent, n *network, ep *Endpoint) { var epRec EndpointRecord - logrus.Debugf("disableServiceInNetworkDB for %s %s", ep.svcName, ep.ID()) + log.G(context.TODO()).Debugf("disableServiceInNetworkDB for %s %s", ep.svcName, ep.ID()) // Update existing record to indicate that the service is disabled inBuf, err := a.networkDB.GetEntry(libnetworkEPTable, n.ID(), ep.ID()) if err != nil { - logrus.Warnf("disableServiceInNetworkDB GetEntry failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Warnf("disableServiceInNetworkDB GetEntry failed for %s %s err:%s", ep.id, n.id, err) return } // Should never fail if err := proto.Unmarshal(inBuf, &epRec); err != nil { - logrus.Errorf("disableServiceInNetworkDB unmarshal failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Errorf("disableServiceInNetworkDB unmarshal failed for %s %s err:%s", ep.id, n.id, err) return } epRec.ServiceDisabled = true // Should never fail outBuf, err := proto.Marshal(&epRec) if err != nil { - logrus.Errorf("disableServiceInNetworkDB marshalling failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Errorf("disableServiceInNetworkDB marshalling failed for %s %s err:%s", ep.id, n.id, err) return } // Send update to the whole cluster if err := a.networkDB.UpdateEntry(libnetworkEPTable, n.ID(), ep.ID(), outBuf); err != nil { - logrus.Warnf("disableServiceInNetworkDB UpdateEntry failed for %s %s err:%s", ep.id, n.id, err) + log.G(context.TODO()).Warnf("disableServiceInNetworkDB UpdateEntry failed for %s %s err:%s", ep.id, n.id, err) } } @@ -789,7 +790,7 @@ func (n *network) addDriverWatches() { go c.handleTableEvents(ch, n.handleDriverTableEvent) d, err := n.driver(false) if err != nil { - logrus.Errorf("Could not resolve driver %s while walking driver tabl: %v", n.networkType, err) + log.G(context.TODO()).Errorf("Could not resolve driver %s while walking driver tabl: %v", n.networkType, err) return } @@ -802,7 +803,7 @@ func (n *network) addDriverWatches() { return false }) if err != nil { - logrus.WithError(err).Warn("Error while walking networkdb") + log.G(context.TODO()).WithError(err).Warn("Error while walking networkdb") } } } @@ -841,7 +842,7 @@ func (c *Controller) handleTableEvents(ch *events.Channel, fn func(events.Event) func (n *network) handleDriverTableEvent(ev events.Event) { d, err := n.driver(false) if err != nil { - logrus.Errorf("Could not resolve driver %s while handling driver table event: %v", n.networkType, err) + log.G(context.TODO()).Errorf("Could not resolve driver %s while handling driver table event: %v", n.networkType, err) return } @@ -886,12 +887,12 @@ func (c *Controller) handleNodeTableEvent(ev events.Event) { case networkdb.DeleteEvent: value = event.Value case networkdb.UpdateEvent: - logrus.Errorf("Unexpected update node table event = %#v", event) + log.G(context.TODO()).Errorf("Unexpected update node table event = %#v", event) } err := json.Unmarshal(value, &nodeAddr) if err != nil { - logrus.Errorf("Error unmarshalling node table event %v", err) + log.G(context.TODO()).Errorf("Error unmarshalling node table event %v", err) return } c.processNodeDiscovery([]net.IP{nodeAddr.Addr}, isAdd) @@ -919,13 +920,13 @@ func (c *Controller) handleEpTableEvent(ev events.Event) { eid = event.Key value = event.Value default: - logrus.Errorf("Unexpected update service table event = %#v", event) + log.G(context.TODO()).Errorf("Unexpected update service table event = %#v", event) return } err := proto.Unmarshal(value, &epRec) if err != nil { - logrus.Errorf("Failed to unmarshal service table value: %v", err) + log.G(context.TODO()).Errorf("Failed to unmarshal service table value: %v", err) return } @@ -939,51 +940,51 @@ func (c *Controller) handleEpTableEvent(ev events.Event) { taskAliases := epRec.TaskAliases if containerName == "" || ip == nil { - logrus.Errorf("Invalid endpoint name/ip received while handling service table event %s", value) + log.G(context.TODO()).Errorf("Invalid endpoint name/ip received while handling service table event %s", value) return } switch ev.(type) { case networkdb.CreateEvent: - logrus.Debugf("handleEpTableEvent ADD %s R:%v", eid, epRec) + log.G(context.TODO()).Debugf("handleEpTableEvent ADD %s R:%v", eid, epRec) if svcID != "" { // This is a remote task part of a service if err := c.addServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err) + log.G(context.TODO()).Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err) return } } else { // This is a remote container simply attached to an attachable network if err := c.addContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err) + log.G(context.TODO()).Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err) } } case networkdb.DeleteEvent: - logrus.Debugf("handleEpTableEvent DEL %s R:%v", eid, epRec) + log.G(context.TODO()).Debugf("handleEpTableEvent DEL %s R:%v", eid, epRec) if svcID != "" { // This is a remote task part of a service if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, true); err != nil { - logrus.Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err) + log.G(context.TODO()).Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err) return } } else { // This is a remote container simply attached to an attachable network if err := c.delContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err) + log.G(context.TODO()).Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err) } } case networkdb.UpdateEvent: - logrus.Debugf("handleEpTableEvent UPD %s R:%v", eid, epRec) + log.G(context.TODO()).Debugf("handleEpTableEvent UPD %s R:%v", eid, epRec) // We currently should only get these to inform us that an endpoint // is disabled. Report if otherwise. if svcID == "" || !epRec.ServiceDisabled { - logrus.Errorf("Unexpected update table event for %s epRec:%v", eid, epRec) + log.G(context.TODO()).Errorf("Unexpected update table event for %s epRec:%v", eid, epRec) return } // This is a remote task that is part of a service that is now disabled if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, false); err != nil { - logrus.Errorf("failed disabling service binding for %s epRec:%v err:%v", eid, epRec, err) + log.G(context.TODO()).Errorf("failed disabling service binding for %s epRec:%v err:%v", eid, epRec, err) return } } diff --git a/libnetwork/cmd/diagnostic/main.go b/libnetwork/cmd/diagnostic/main.go index 906f1a1698..6ad668fefd 100644 --- a/libnetwork/cmd/diagnostic/main.go +++ b/libnetwork/cmd/diagnostic/main.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "context" "encoding/base64" "encoding/json" "flag" @@ -11,10 +12,12 @@ import ( "os" "strings" + "github.com/sirupsen/logrus" + + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork" "github.com/docker/docker/libnetwork/diagnostic" "github.com/docker/docker/libnetwork/drivers/overlay" - "github.com/sirupsen/logrus" ) const ( @@ -30,10 +33,10 @@ const ( func httpIsOk(body io.ReadCloser) { b, err := io.ReadAll(body) if err != nil { - logrus.Fatalf("Failed the body parse %s", err) + log.G(context.TODO()).Fatalf("Failed the body parse %s", err) } if !strings.Contains(string(b), "OK") { - logrus.Fatalf("Server not ready %s", b) + log.G(context.TODO()).Fatalf("Server not ready %s", b) } body.Close() } @@ -54,14 +57,14 @@ func main() { } if _, ok := os.LookupEnv("DIND_CLIENT"); !ok && *joinPtr { - logrus.Fatal("you are not using the client in docker in docker mode, the use of the -a flag can be disruptive, " + + log.G(context.TODO()).Fatal("you are not using the client in docker in docker mode, the use of the -a flag can be disruptive, " + "please remove it (doc:https://github.com/docker/docker/libnetwork/blob/master/cmd/diagnostic/README.md)") } - logrus.Infof("Connecting to %s:%d checking ready", *ipPtr, *portPtr) + log.G(context.TODO()).Infof("Connecting to %s:%d checking ready", *ipPtr, *portPtr) resp, err := http.Get(fmt.Sprintf(readyPath, *ipPtr, *portPtr)) if err != nil { - logrus.WithError(err).Fatalf("The connection failed") + log.G(context.TODO()).WithError(err).Fatalf("The connection failed") } httpIsOk(resp.Body) @@ -70,10 +73,10 @@ func main() { var joinedNetwork bool if *networkPtr != "" { if *joinPtr { - logrus.Infof("Joining the network:%q", *networkPtr) + log.G(context.TODO()).Infof("Joining the network:%q", *networkPtr) resp, err = http.Get(fmt.Sprintf(joinNetwork, *ipPtr, *portPtr, *networkPtr)) if err != nil { - logrus.WithError(err).Fatalf("Failed joining the network") + log.G(context.TODO()).WithError(err).Fatalf("Failed joining the network") } httpIsOk(resp.Body) joinedNetwork = true @@ -81,7 +84,7 @@ func main() { networkPeers = fetchNodePeers(*ipPtr, *portPtr, *networkPtr) if len(networkPeers) == 0 { - logrus.Warnf("There is no peer on network %q, check the network ID, and verify that is the non truncated version", *networkPtr) + log.G(context.TODO()).Warnf("There is no peer on network %q, check the network ID, and verify that is the non truncated version", *networkPtr) } } @@ -93,10 +96,10 @@ func main() { } if joinedNetwork { - logrus.Infof("Leaving the network:%q", *networkPtr) + log.G(context.TODO()).Infof("Leaving the network:%q", *networkPtr) resp, err = http.Get(fmt.Sprintf(leaveNetwork, *ipPtr, *portPtr, *networkPtr)) if err != nil { - logrus.WithError(err).Fatalf("Failed leaving the network") + log.G(context.TODO()).WithError(err).Fatalf("Failed leaving the network") } httpIsOk(resp.Body) } @@ -104,9 +107,9 @@ func main() { func fetchNodePeers(ip string, port int, network string) map[string]string { if network == "" { - logrus.Infof("Fetch cluster peers") + log.G(context.TODO()).Infof("Fetch cluster peers") } else { - logrus.Infof("Fetch peers network:%q", network) + log.G(context.TODO()).Infof("Fetch peers network:%q", network) } var path string @@ -118,77 +121,77 @@ func fetchNodePeers(ip string, port int, network string) map[string]string { resp, err := http.Get(path) //nolint:gosec // G107: Potential HTTP request made with variable url if err != nil { - logrus.WithError(err).Fatalf("Failed fetching path") + log.G(context.TODO()).WithError(err).Fatalf("Failed fetching path") } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - logrus.WithError(err).Fatalf("Failed the body parse") + log.G(context.TODO()).WithError(err).Fatalf("Failed the body parse") } output := diagnostic.HTTPResult{Details: &diagnostic.TablePeersResult{}} err = json.Unmarshal(body, &output) if err != nil { - logrus.WithError(err).Fatalf("Failed the json unmarshalling") + log.G(context.TODO()).WithError(err).Fatalf("Failed the json unmarshalling") } - logrus.Debugf("Parsing JSON response") + log.G(context.TODO()).Debugf("Parsing JSON response") result := make(map[string]string, output.Details.(*diagnostic.TablePeersResult).Length) for _, v := range output.Details.(*diagnostic.TablePeersResult).Elements { - logrus.Debugf("name:%s ip:%s", v.Name, v.IP) + log.G(context.TODO()).Debugf("name:%s ip:%s", v.Name, v.IP) result[v.Name] = v.IP } return result } func fetchTable(ip string, port int, network, tableName string, clusterPeers, networkPeers map[string]string, remediate bool) { - logrus.Infof("Fetch %s table and check owners", tableName) + log.G(context.TODO()).Infof("Fetch %s table and check owners", tableName) resp, err := http.Get(fmt.Sprintf(dumpTable, ip, port, network, tableName)) if err != nil { - logrus.WithError(err).Fatalf("Failed fetching endpoint table") + log.G(context.TODO()).WithError(err).Fatalf("Failed fetching endpoint table") } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - logrus.WithError(err).Fatalf("Failed the body parse") + log.G(context.TODO()).WithError(err).Fatalf("Failed the body parse") } output := diagnostic.HTTPResult{Details: &diagnostic.TableEndpointsResult{}} err = json.Unmarshal(body, &output) if err != nil { - logrus.WithError(err).Fatalf("Failed the json unmarshalling") + log.G(context.TODO()).WithError(err).Fatalf("Failed the json unmarshalling") } - logrus.Debug("Parsing data structures") + log.G(context.TODO()).Debug("Parsing data structures") var orphanKeys []string for _, v := range output.Details.(*diagnostic.TableEndpointsResult).Elements { decoded, err := base64.StdEncoding.DecodeString(v.Value) if err != nil { - logrus.WithError(err).Errorf("Failed decoding entry") + log.G(context.TODO()).WithError(err).Errorf("Failed decoding entry") continue } switch tableName { case "endpoint_table": var elem libnetwork.EndpointRecord elem.Unmarshal(decoded) - logrus.Debugf("key:%s value:%+v owner:%s", v.Key, elem, v.Owner) + log.G(context.TODO()).Debugf("key:%s value:%+v owner:%s", v.Key, elem, v.Owner) case "overlay_peer_table": var elem overlay.PeerRecord elem.Unmarshal(decoded) - logrus.Debugf("key:%s value:%+v owner:%s", v.Key, elem, v.Owner) + log.G(context.TODO()).Debugf("key:%s value:%+v owner:%s", v.Key, elem, v.Owner) } if _, ok := networkPeers[v.Owner]; !ok { - logrus.Warnf("The element with key:%s does not belong to any node on this network", v.Key) + log.G(context.TODO()).Warnf("The element with key:%s does not belong to any node on this network", v.Key) orphanKeys = append(orphanKeys, v.Key) } if _, ok := clusterPeers[v.Owner]; !ok { - logrus.Warnf("The element with key:%s does not belong to any node on this cluster", v.Key) + log.G(context.TODO()).Warnf("The element with key:%s does not belong to any node on this cluster", v.Key) } } if len(orphanKeys) > 0 && remediate { - logrus.Warnf("The following keys:%v results as orphan, do you want to proceed with the deletion (this operation is irreversible)? [Yes/No]", orphanKeys) + log.G(context.TODO()).Warnf("The following keys:%v results as orphan, do you want to proceed with the deletion (this operation is irreversible)? [Yes/No]", orphanKeys) reader := bufio.NewReader(os.Stdin) text, _ := reader.ReadString('\n') text = strings.ReplaceAll(text, "\n", "") @@ -196,13 +199,13 @@ func fetchTable(ip string, port int, network, tableName string, clusterPeers, ne for _, k := range orphanKeys { resp, err := http.Get(fmt.Sprintf(deleteEntry, ip, port, network, tableName, k)) if err != nil { - logrus.WithError(err).Errorf("Failed deleting entry k:%s", k) + log.G(context.TODO()).WithError(err).Errorf("Failed deleting entry k:%s", k) break } resp.Body.Close() } } else { - logrus.Infof("Deletion skipped") + log.G(context.TODO()).Infof("Deletion skipped") } } } diff --git a/libnetwork/cmd/networkdb-test/dbclient/ndbClient.go b/libnetwork/cmd/networkdb-test/dbclient/ndbClient.go index 56f9dfea2c..8bfbdf767d 100644 --- a/libnetwork/cmd/networkdb-test/dbclient/ndbClient.go +++ b/libnetwork/cmd/networkdb-test/dbclient/ndbClient.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "log" "net" "net/http" "os" @@ -13,7 +12,7 @@ import ( "strings" "time" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) var servicePort string @@ -28,14 +27,14 @@ type resultTuple struct { func httpGetFatalError(ip, port, path string) { body, err := httpGet(ip, port, path) if err != nil || !strings.Contains(string(body), "OK") { - log.Fatalf("[%s] error %s %s", path, err, body) + log.G(context.TODO()).Fatalf("[%s] error %s %s", path, err, body) } } func httpGet(ip, port, path string) ([]byte, error) { resp, err := http.Get("http://" + ip + ":" + port + path) if err != nil { - logrus.Errorf("httpGet error:%s", err) + log.G(context.TODO()).Errorf("httpGet error:%s", err) return nil, err } defer resp.Body.Close() @@ -81,7 +80,7 @@ func clusterPeersNumber(ip, port string, doneCh chan resultTuple) { body, err := httpGet(ip, port, "/clusterpeers") if err != nil { - logrus.Errorf("clusterPeers %s there was an error: %s", ip, err) + log.G(context.TODO()).Errorf("clusterPeers %s there was an error: %s", ip, err) doneCh <- resultTuple{id: ip, result: -1} return } @@ -95,7 +94,7 @@ func networkPeersNumber(ip, port, networkName string, doneCh chan resultTuple) { body, err := httpGet(ip, port, "/networkpeers?nid="+networkName) if err != nil { - logrus.Errorf("networkPeersNumber %s there was an error: %s", ip, err) + log.G(context.TODO()).Errorf("networkPeersNumber %s there was an error: %s", ip, err) doneCh <- resultTuple{id: ip, result: -1} return } @@ -109,7 +108,7 @@ func dbTableEntriesNumber(ip, port, networkName, tableName string, doneCh chan r body, err := httpGet(ip, port, "/gettable?nid="+networkName+"&tname="+tableName) if err != nil { - logrus.Errorf("tableEntriesNumber %s there was an error: %s", ip, err) + log.G(context.TODO()).Errorf("tableEntriesNumber %s there was an error: %s", ip, err) doneCh <- resultTuple{id: ip, result: -1} return } @@ -122,7 +121,7 @@ func dbQueueLength(ip, port, networkName string, doneCh chan resultTuple) { body, err := httpGet(ip, port, "/networkstats?nid="+networkName) if err != nil { - logrus.Errorf("queueLength %s there was an error: %s", ip, err) + log.G(context.TODO()).Errorf("queueLength %s there was an error: %s", ip, err) doneCh <- resultTuple{id: ip, result: -1} return } @@ -142,7 +141,7 @@ func clientTableEntriesNumber(ip, port, networkName, tableName string, doneCh ch body, err := httpGet(ip, port, "/watchedtableentries?nid="+networkName+"&tname="+tableName) if err != nil { - logrus.Errorf("clientTableEntriesNumber %s there was an error: %s", ip, err) + log.G(context.TODO()).Errorf("clientTableEntriesNumber %s there was an error: %s", ip, err) doneCh <- resultTuple{id: ip, result: -1} return } @@ -253,12 +252,12 @@ func checkTable(ctx context.Context, ips []string, port, networkName, tableName // Validate test success, if the time is set means that all the tables are empty if successTime != 0 { opTime = time.Duration(successTime-startTime) / time.Millisecond - logrus.Infof("Check table passed, the cluster converged in %d msec", opTime) + log.G(ctx).Infof("Check table passed, the cluster converged in %d msec", opTime) return } - log.Fatal("Test failed, there is still entries in the tables of the nodes") + log.G(ctx).Fatal("Test failed, there is still entries in the tables of the nodes") default: - logrus.Infof("Checking table %s expected %d", tableName, expectedEntries) + log.G(ctx).Infof("Checking table %s expected %d", tableName, expectedEntries) doneCh := make(chan resultTuple, len(ips)) for _, ip := range ips { go fn(ip, servicePort, networkName, tableName, doneCh) @@ -267,7 +266,7 @@ func checkTable(ctx context.Context, ips []string, port, networkName, tableName nodesWithCorrectEntriesNum := 0 for i := len(ips); i > 0; i-- { tableEntries := <-doneCh - logrus.Infof("Node %s has %d entries", tableEntries.id, tableEntries.result) + log.G(ctx).Infof("Node %s has %d entries", tableEntries.id, tableEntries.result) if tableEntries.result == expectedEntries { nodesWithCorrectEntriesNum++ } @@ -276,7 +275,7 @@ func checkTable(ctx context.Context, ips []string, port, networkName, tableName if nodesWithCorrectEntriesNum == len(ips) { if successTime == 0 { successTime = time.Now().UnixNano() - logrus.Infof("Success after %d msec", time.Duration(successTime-startTime)/time.Millisecond) + log.G(ctx).Infof("Success after %d msec", time.Duration(successTime-startTime)/time.Millisecond) } } else { successTime = 0 @@ -290,18 +289,18 @@ func waitWriters(parallelWriters int, mustWrite bool, doneCh chan resultTuple) m var totalKeys int resultTable := make(map[string]int) for i := 0; i < parallelWriters; i++ { - logrus.Infof("Waiting for %d workers", parallelWriters-i) + log.G(context.TODO()).Infof("Waiting for %d workers", parallelWriters-i) workerReturn := <-doneCh totalKeys += workerReturn.result if mustWrite && workerReturn.result == 0 { - log.Fatalf("The worker %s did not write any key %d == 0", workerReturn.id, workerReturn.result) + log.G(context.TODO()).Fatalf("The worker %s did not write any key %d == 0", workerReturn.id, workerReturn.result) } if !mustWrite && workerReturn.result != 0 { - log.Fatalf("The worker %s was supposed to return 0 instead %d != 0", workerReturn.id, workerReturn.result) + log.G(context.TODO()).Fatalf("The worker %s was supposed to return 0 instead %d != 0", workerReturn.id, workerReturn.result) } if mustWrite { resultTable[workerReturn.id] = workerReturn.result - logrus.Infof("The worker %s wrote %d keys", workerReturn.id, workerReturn.result) + log.G(context.TODO()).Infof("The worker %s wrote %d keys", workerReturn.id, workerReturn.result) } } resultTable[totalWrittenKeys] = totalKeys @@ -355,9 +354,9 @@ func doClusterPeers(ips []string, args []string) { if node.result != expectedPeers { failed = true if retry == maxRetry-1 { - log.Fatalf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) + log.G(context.TODO()).Fatalf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) } else { - logrus.Warnf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) + log.G(context.TODO()).Warnf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) } time.Sleep(1 * time.Second) } @@ -416,9 +415,9 @@ func doNetworkPeers(ips []string, args []string) { if node.result != expectedPeers { failed = true if retry == maxRetry-1 { - log.Fatalf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) + log.G(context.TODO()).Fatalf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) } else { - logrus.Warnf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) + log.G(context.TODO()).Warnf("Expected peers from %s mismatch %d != %d", node.id, expectedPeers, node.result) } time.Sleep(1 * time.Second) } @@ -450,14 +449,14 @@ func doNetworkStatsQueue(ips []string, args []string) { switch comparison { case "lt": if node.result > size { - log.Fatalf("Expected queue size from %s to be %d < %d", node.id, node.result, size) + log.G(context.TODO()).Fatalf("Expected queue size from %s to be %d < %d", node.id, node.result, size) } case "gt": if node.result < size { - log.Fatalf("Expected queue size from %s to be %d > %d", node.id, node.result, size) + log.G(context.TODO()).Fatalf("Expected queue size from %s to be %d > %d", node.id, node.result, size) } default: - log.Fatal("unknown comparison operator") + log.G(context.TODO()).Fatal("unknown comparison operator") } avgQueueSize += node.result } @@ -484,13 +483,13 @@ func doWriteKeys(ips []string, args []string) { defer close(doneCh) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(context.TODO()).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeKeysNumber(ips[i], servicePort, networkName, tableName, key, numberOfKeys, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(context.TODO()).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // check table entries for 2 minutes ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) @@ -517,13 +516,13 @@ func doDeleteKeys(ips []string, args []string) { defer close(doneCh) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(context.TODO()).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go deleteKeysNumber(ips[i], servicePort, networkName, tableName, key, numberOfKeys, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(context.TODO()).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // check table entries for 2 minutes ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) @@ -550,14 +549,14 @@ func doWriteDeleteUniqueKeys(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeDeleteUniqueKeys(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // check table entries for 2 minutes ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) @@ -588,14 +587,14 @@ func doWriteUniqueKeys(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeUniqueKeys(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // check table entries for 2 minutes ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) @@ -617,14 +616,14 @@ func doWriteDeleteLeaveJoin(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeDeleteLeaveJoin(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap["totalKeys"]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap["totalKeys"]) // check table entries for 2 minutes ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) @@ -646,18 +645,18 @@ func doWriteDeleteWaitLeaveJoin(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeDeleteUniqueKeys(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // The writers will leave the network for i := 0; i < parallelWriters; i++ { - logrus.Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) go leaveNetwork(ips[i], servicePort, networkName, doneCh) } waitWriters(parallelWriters, false, doneCh) @@ -667,7 +666,7 @@ func doWriteDeleteWaitLeaveJoin(ips []string, args []string) { // The writers will join the network for i := 0; i < parallelWriters; i++ { - logrus.Infof("worker joinNetwork: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("worker joinNetwork: %d on IP:%s", i, ips[i]) go joinNetwork(ips[i], servicePort, networkName, doneCh) } waitWriters(parallelWriters, false, doneCh) @@ -692,18 +691,18 @@ func doWriteWaitLeave(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeUniqueKeys(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) // The writers will leave the network for i := 0; i < parallelWriters; i++ { - logrus.Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) go leaveNetwork(ips[i], servicePort, networkName, doneCh) } waitWriters(parallelWriters, false, doneCh) @@ -729,19 +728,19 @@ func doWriteWaitLeaveJoin(ips []string, args []string) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(writeTimeSec)*time.Second) for i := 0; i < parallelWriters; i++ { key := "key-" + strconv.Itoa(i) + "-" - logrus.Infof("Spawn worker: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("Spawn worker: %d on IP:%s", i, ips[i]) go writeUniqueKeys(ctx, ips[i], servicePort, networkName, tableName, key, doneCh) } // Sync with all the writers keyMap := waitWriters(parallelWriters, true, doneCh) cancel() - logrus.Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) + log.G(ctx).Infof("Written a total of %d keys on the cluster", keyMap[totalWrittenKeys]) keysExpected := keyMap[totalWrittenKeys] // The Leavers will leave the network for i := 0; i < parallelLeaver; i++ { - logrus.Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("worker leaveNetwork: %d on IP:%s", i, ips[i]) go leaveNetwork(ips[i], servicePort, networkName, doneCh) // Once a node leave all the keys written previously will be deleted, so the expected keys will consider that as removed keysExpected -= keyMap[ips[i]] @@ -753,7 +752,7 @@ func doWriteWaitLeaveJoin(ips []string, args []string) { // The writers will join the network for i := 0; i < parallelLeaver; i++ { - logrus.Infof("worker joinNetwork: %d on IP:%s", i, ips[i]) + log.G(ctx).Infof("worker joinNetwork: %d on IP:%s", i, ips[i]) go joinNetwork(ips[i], servicePort, networkName, doneCh) } waitWriters(parallelLeaver, false, doneCh) @@ -780,11 +779,11 @@ var cmdArgChec = map[string]int{ // Client is a client func Client(args []string) { - logrus.Infof("[CLIENT] Starting with arguments %v", args) + log.G(context.TODO()).Infof("[CLIENT] Starting with arguments %v", args) command := args[0] if len(args) < cmdArgChec[command] { - log.Fatalf("Command %s requires %d arguments, passed %d, aborting...", command, cmdArgChec[command], len(args)) + log.G(context.TODO()).Fatalf("Command %s requires %d arguments, passed %d, aborting...", command, cmdArgChec[command], len(args)) } switch command { @@ -792,18 +791,18 @@ func Client(args []string) { time.Sleep(1 * time.Hour) os.Exit(0) case "fail": - log.Fatalf("Test error condition with message: error error error") + log.G(context.TODO()).Fatalf("Test error condition with message: error error error") } serviceName := args[1] ips, _ := net.LookupHost("tasks." + serviceName) - logrus.Infof("got the ips %v", ips) + log.G(context.TODO()).Infof("got the ips %v", ips) if len(ips) == 0 { - log.Fatalf("Cannot resolve any IP for the service tasks.%s", serviceName) + log.G(context.TODO()).Fatalf("Cannot resolve any IP for the service tasks.%s", serviceName) } servicePort = args[2] commandArgs := args[3:] - logrus.Infof("Executing %s with args:%v", command, commandArgs) + log.G(context.TODO()).Infof("Executing %s with args:%v", command, commandArgs) switch command { case "ready": doReady(ips) @@ -856,6 +855,6 @@ func Client(args []string) { // write-wait-leave networkName tableName numParallelWriters writeTimeSec doWriteWaitLeaveJoin(ips, commandArgs) default: - log.Fatalf("Command %s not recognized", command) + log.G(context.TODO()).Fatalf("Command %s not recognized", command) } } diff --git a/libnetwork/cmd/networkdb-test/dbserver/ndbServer.go b/libnetwork/cmd/networkdb-test/dbserver/ndbServer.go index fc836bd22e..f51651aa9c 100644 --- a/libnetwork/cmd/networkdb-test/dbserver/ndbServer.go +++ b/libnetwork/cmd/networkdb-test/dbserver/ndbServer.go @@ -1,18 +1,18 @@ package dbserver import ( + "context" "errors" "fmt" - "log" "net" "net/http" "os" "strconv" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/cmd/networkdb-test/dummyclient" "github.com/docker/docker/libnetwork/diagnostic" "github.com/docker/docker/libnetwork/networkdb" - "github.com/sirupsen/logrus" ) var nDB *networkdb.NetworkDB @@ -29,25 +29,25 @@ func ipaddress(ctx interface{}, w http.ResponseWriter, r *http.Request) { // Server starts the server func Server(args []string) { - logrus.Infof("[SERVER] Starting with arguments %v", args) + log.G(context.TODO()).Infof("[SERVER] Starting with arguments %v", args) if len(args) < 1 { - log.Fatal("Port number is a mandatory argument, aborting...") + log.G(context.TODO()).Fatal("Port number is a mandatory argument, aborting...") } port, _ := strconv.Atoi(args[0]) var localNodeName string var ok bool if localNodeName, ok = os.LookupEnv("TASK_ID"); !ok { - log.Fatal("TASK_ID environment variable not set, aborting...") + log.G(context.TODO()).Fatal("TASK_ID environment variable not set, aborting...") } - logrus.Infof("[SERVER] Starting node %s on port %d", localNodeName, port) + log.G(context.TODO()).Infof("[SERVER] Starting node %s on port %d", localNodeName, port) ip, err := getIPInterface("eth0") if err != nil { - logrus.Errorf("%s There was a problem with the IP %s\n", localNodeName, err) + log.G(context.TODO()).Errorf("%s There was a problem with the IP %s\n", localNodeName, err) return } ipAddr = ip - logrus.Infof("%s uses IP %s\n", localNodeName, ipAddr) + log.G(context.TODO()).Infof("%s uses IP %s\n", localNodeName, ipAddr) server = diagnostic.New() server.Init() @@ -57,7 +57,7 @@ func Server(args []string) { conf.BindAddr = ipAddr nDB, err = networkdb.New(conf) if err != nil { - logrus.Infof("%s error in the DB init %s\n", localNodeName, err) + log.G(context.TODO()).Infof("%s error in the DB init %s\n", localNodeName, err) return } diff --git a/libnetwork/cmd/networkdb-test/dummyclient/dummyClient.go b/libnetwork/cmd/networkdb-test/dummyclient/dummyClient.go index 92498c7b25..2bf56fae70 100644 --- a/libnetwork/cmd/networkdb-test/dummyclient/dummyClient.go +++ b/libnetwork/cmd/networkdb-test/dummyclient/dummyClient.go @@ -1,14 +1,14 @@ package dummyclient import ( + "context" "fmt" - "log" "net/http" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/diagnostic" "github.com/docker/docker/libnetwork/networkdb" events "github.com/docker/go-events" - "github.com/sirupsen/logrus" ) // DummyClientPaths2Func exported paths for the client @@ -85,15 +85,15 @@ func handleTableEvents(tableName string, ch *events.Channel) { isAdd bool ) - logrus.Infof("Started watching table:%s", tableName) + log.G(context.TODO()).Infof("Started watching table:%s", tableName) for { select { case <-ch.Done(): - logrus.Infof("End watching %s", tableName) + log.G(context.TODO()).Infof("End watching %s", tableName) return case evt := <-ch.C: - logrus.Infof("Recevied new event on:%s", tableName) + log.G(context.TODO()).Infof("Recevied new event on:%s", tableName) switch event := evt.(type) { case networkdb.CreateEvent: // nid = event.NetworkID @@ -106,13 +106,13 @@ func handleTableEvents(tableName string, ch *events.Channel) { value = event.Value isAdd = false default: - log.Fatalf("Unexpected table event = %#v", event) + log.G(context.TODO()).Fatalf("Unexpected table event = %#v", event) } if isAdd { - // logrus.Infof("Add %s %s", tableName, eid) + // log.G(ctx).Infof("Add %s %s", tableName, eid) clientWatchTable[tableName].entries[eid] = string(value) } else { - // logrus.Infof("Del %s %s", tableName, eid) + // log.G(ctx).Infof("Del %s %s", tableName, eid) delete(clientWatchTable[tableName].entries, eid) } } diff --git a/libnetwork/cmd/networkdb-test/testMain.go b/libnetwork/cmd/networkdb-test/testMain.go index 8731e3440d..6ddfa1336d 100644 --- a/libnetwork/cmd/networkdb-test/testMain.go +++ b/libnetwork/cmd/networkdb-test/testMain.go @@ -1,12 +1,14 @@ package main import ( - "log" + "context" "os" + "github.com/sirupsen/logrus" + + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/cmd/networkdb-test/dbclient" "github.com/docker/docker/libnetwork/cmd/networkdb-test/dbserver" - "github.com/sirupsen/logrus" ) func main() { @@ -14,9 +16,9 @@ func main() { FullTimestamp: true, } logrus.SetFormatter(formatter) - logrus.Infof("Starting the image with these args: %v", os.Args) + log.G(context.TODO()).Infof("Starting the image with these args: %v", os.Args) if len(os.Args) < 1 { - log.Fatal("You need at least 1 argument [client/server]") + log.G(context.TODO()).Fatal("You need at least 1 argument [client/server]") } switch os.Args[1] { diff --git a/libnetwork/config/config.go b/libnetwork/config/config.go index 0833c7d72d..e449eb55b6 100644 --- a/libnetwork/config/config.go +++ b/libnetwork/config/config.go @@ -1,8 +1,10 @@ package config import ( + "context" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/cluster" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/ipamutils" @@ -10,7 +12,6 @@ import ( "github.com/docker/docker/libnetwork/osl" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/libkv/store" - "github.com/sirupsen/logrus" ) const ( @@ -60,7 +61,7 @@ type Option func(c *Config) // OptionDefaultNetwork function returns an option setter for a default network func OptionDefaultNetwork(dn string) Option { return func(c *Config) { - logrus.Debugf("Option DefaultNetwork: %s", dn) + log.G(context.TODO()).Debugf("Option DefaultNetwork: %s", dn) c.DefaultNetwork = strings.TrimSpace(dn) } } @@ -68,7 +69,7 @@ func OptionDefaultNetwork(dn string) Option { // OptionDefaultDriver function returns an option setter for default driver func OptionDefaultDriver(dd string) Option { return func(c *Config) { - logrus.Debugf("Option DefaultDriver: %s", dd) + log.G(context.TODO()).Debugf("Option DefaultDriver: %s", dd) c.DefaultDriver = strings.TrimSpace(dd) } } @@ -123,9 +124,9 @@ func OptionPluginGetter(pg plugingetter.PluginGetter) Option { // OptionNetworkControlPlaneMTU function returns an option setter for control plane MTU func OptionNetworkControlPlaneMTU(exp int) Option { return func(c *Config) { - logrus.Debugf("Network Control Plane MTU: %d", exp) + log.G(context.TODO()).Debugf("Network Control Plane MTU: %d", exp) if exp < warningThNetworkControlPlaneMTU { - logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave,"+ + log.G(context.TODO()).Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave,"+ " defaulting to minimum value (%d)", exp, minimumNetworkControlPlaneMTU) if exp < minimumNetworkControlPlaneMTU { exp = minimumNetworkControlPlaneMTU @@ -143,7 +144,7 @@ func IsValidName(name string) bool { // OptionLocalKVProvider function returns an option setter for kvstore provider func OptionLocalKVProvider(provider string) Option { return func(c *Config) { - logrus.Debugf("Option OptionLocalKVProvider: %s", provider) + log.G(context.TODO()).Debugf("Option OptionLocalKVProvider: %s", provider) c.Scope.Client.Provider = strings.TrimSpace(provider) } } @@ -151,7 +152,7 @@ func OptionLocalKVProvider(provider string) Option { // OptionLocalKVProviderURL function returns an option setter for kvstore url func OptionLocalKVProviderURL(url string) Option { return func(c *Config) { - logrus.Debugf("Option OptionLocalKVProviderURL: %s", url) + log.G(context.TODO()).Debugf("Option OptionLocalKVProviderURL: %s", url) c.Scope.Client.Address = strings.TrimSpace(url) } } @@ -159,7 +160,7 @@ func OptionLocalKVProviderURL(url string) Option { // OptionLocalKVProviderConfig function returns an option setter for kvstore config func OptionLocalKVProviderConfig(config *store.Config) Option { return func(c *Config) { - logrus.Debugf("Option OptionLocalKVProviderConfig: %v", config) + log.G(context.TODO()).Debugf("Option OptionLocalKVProviderConfig: %v", config) c.Scope.Client.Config = config } } diff --git a/libnetwork/controller.go b/libnetwork/controller.go index cd5ad761c9..e32f54bacf 100644 --- a/libnetwork/controller.go +++ b/libnetwork/controller.go @@ -44,6 +44,7 @@ create network namespaces and allocate interfaces for containers to use. package libnetwork import ( + "context" "fmt" "net" "path/filepath" @@ -52,6 +53,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/cluster" "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" @@ -70,7 +72,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // NetworkWalker is a client provided function which will be used to walk the Networks. @@ -418,7 +419,7 @@ func (c *Controller) pushNodeDiscovery(d driverapi.Driver, cap driverapi.Capabil err = d.DiscoverDelete(discoverapi.NodeDiscovery, nodeData) } if err != nil { - logrus.Debugf("discovery notification error: %v", err) + log.G(context.TODO()).Debugf("discovery notification error: %v", err) } } } @@ -570,7 +571,7 @@ func (c *Controller) NewNetwork(networkType, name string, id string, options ... defer func() { if err == nil && !skipCfgEpCount { if err := t.getEpCnt().IncEndpointCnt(); err != nil { - logrus.Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v", + log.G(context.TODO()).Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v", t.Name(), nw.Name(), err) } } @@ -600,7 +601,7 @@ func (c *Controller) NewNetwork(networkType, name string, id string, options ... defer func() { if err != nil { if e := nw.deleteNetwork(); e != nil { - logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", nw.name, err) + log.G(context.TODO()).Warnf("couldn't roll back driver network on network %s creation failure: %v", nw.name, err) } } }() @@ -631,7 +632,7 @@ addToStore: defer func() { if err != nil { if e := c.deleteFromStore(epCnt); e != nil { - logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e) + log.G(context.TODO()).Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e) } } }() @@ -643,7 +644,7 @@ addToStore: defer func() { if err != nil { if e := c.deleteFromStore(nw); e != nil { - logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", nw, err, e) + log.G(context.TODO()).Warnf("could not rollback from store, network %v on failure (%v): %v", nw, err, e) } } }() @@ -657,7 +658,7 @@ addToStore: if err != nil { nw.cancelDriverWatches() if e := nw.leaveCluster(); e != nil { - logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", nw.name, err, e) + log.G(context.TODO()).Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", nw.name, err, e) } } }() @@ -684,7 +685,7 @@ var joinCluster NetworkWalker = func(nw Network) bool { return false } if err := n.joinCluster(); err != nil { - logrus.Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err) } n.addDriverWatches() return false @@ -693,7 +694,7 @@ var joinCluster NetworkWalker = func(nw Network) bool { func (c *Controller) reservePools() { networks, err := c.getNetworks() if err != nil { - logrus.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err) + log.G(context.TODO()).Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err) return } @@ -728,26 +729,26 @@ func (c *Controller) reservePools() { } // Reserve pools if err := n.ipamAllocate(); err != nil { - logrus.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err) } // Reserve existing endpoints' addresses ipam, _, err := n.getController().getIPAMDriver(n.ipamType) if err != nil { - logrus.Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID()) + log.G(context.TODO()).Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID()) continue } epl, err := n.getEndpointsFromStore() if err != nil { - logrus.Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID()) + log.G(context.TODO()).Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID()) continue } for _, ep := range epl { if ep.Iface() == nil { - logrus.Warnf("endpoint interface is empty for %q (%s)", ep.Name(), ep.ID()) + log.G(context.TODO()).Warnf("endpoint interface is empty for %q (%s)", ep.Name(), ep.ID()) continue } if err := ep.assignAddress(ipam, true, ep.Iface().AddressIPv6() != nil); err != nil { - logrus.Warnf("Failed to reserve current address for endpoint %q (%s) on network %q (%s)", + log.G(context.TODO()).Warnf("Failed to reserve current address for endpoint %q (%s) on network %q (%s)", ep.Name(), ep.ID(), n.Name(), n.ID()) } } @@ -757,7 +758,7 @@ func (c *Controller) reservePools() { func doReplayPoolReserve(n *network) bool { _, caps, err := n.getController().getIPAMDriver(n.ipamType) if err != nil { - logrus.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err) return false } return caps.RequiresRequestReplay @@ -948,7 +949,7 @@ func (c *Controller) NewSandbox(containerID string, options ...SandboxOption) (* }) if err != nil { - logrus.Errorf("Failed to apply performance tuning sysctls to the sandbox: %v", err) + log.G(context.TODO()).Errorf("Failed to apply performance tuning sysctls to the sandbox: %v", err) } // Keep this just so performance is not changed sb.osSbox.ApplyOSTweaks(sb.oslTypes) diff --git a/libnetwork/default_gateway.go b/libnetwork/default_gateway.go index 15fd06b0aa..5c65848b98 100644 --- a/libnetwork/default_gateway.go +++ b/libnetwork/default_gateway.go @@ -1,12 +1,13 @@ package libnetwork import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -78,7 +79,7 @@ func (sb *Sandbox) setupDefaultGW() error { defer func() { if err != nil { if err2 := newEp.Delete(true); err2 != nil { - logrus.Warnf("Failed to remove gw endpoint for container %s after failing to join the gateway network: %v", + log.G(context.TODO()).Warnf("Failed to remove gw endpoint for container %s after failing to join the gateway network: %v", sb.containerID, err2) } } diff --git a/libnetwork/diagnostic/server.go b/libnetwork/diagnostic/server.go index 4af9bcc679..efeb15c17f 100644 --- a/libnetwork/diagnostic/server.go +++ b/libnetwork/diagnostic/server.go @@ -11,6 +11,7 @@ import ( "sync/atomic" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/internal/caller" "github.com/docker/docker/pkg/stack" "github.com/sirupsen/logrus" @@ -89,11 +90,11 @@ func (s *Server) EnableDiagnostic(ip string, port int) { s.port = port if s.enable == 1 { - logrus.Info("The server is already up and running") + log.G(context.TODO()).Info("The server is already up and running") return } - logrus.Infof("Starting the diagnostic server listening on %d for commands", port) + log.G(context.TODO()).Infof("Starting the diagnostic server listening on %d for commands", port) srv := &http.Server{ Addr: net.JoinHostPort(ip, strconv.Itoa(port)), Handler: s, @@ -104,7 +105,7 @@ func (s *Server) EnableDiagnostic(ip string, port int) { go func(n *Server) { // Ignore ErrServerClosed that is returned on the Shutdown call if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - logrus.Errorf("ListenAndServe error: %s", err) + log.G(context.TODO()).Errorf("ListenAndServe error: %s", err) atomic.SwapInt32(&n.enable, 0) } }(s) @@ -118,7 +119,7 @@ func (s *Server) DisableDiagnostic() { s.srv.Shutdown(context.Background()) //nolint:errcheck s.srv = nil s.enable = 0 - logrus.Info("Disabling the diagnostic server") + log.G(context.TODO()).Info("Disabling the diagnostic server") } // IsDiagnosticEnabled returns true when the debug is enabled @@ -134,7 +135,7 @@ func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) { rsp := WrongCommand("not implemented", fmt.Sprintf("URL path: %s no method implemented check /help\n", r.URL.Path)) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("command not implemented done") HTTPReply(w, rsp, json) //nolint:errcheck @@ -145,7 +146,7 @@ func help(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("help done") n, ok := ctx.(*Server) @@ -163,7 +164,7 @@ func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("ready done") HTTPReply(w, CommandSucceed(&StringCmd{Info: "OK"}), json) //nolint:errcheck } @@ -173,7 +174,7 @@ func stackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("stack trace") path, err := stack.DumpToFile("/tmp/") @@ -189,7 +190,7 @@ func stackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) { // DebugHTTPForm helper to print the form url parameters func DebugHTTPForm(r *http.Request) { for k, v := range r.Form { - logrus.Debugf("Form[%q] = %q\n", k, v) + log.G(context.TODO()).Debugf("Form[%q] = %q\n", k, v) } } diff --git a/libnetwork/drivers/bridge/bridge.go b/libnetwork/drivers/bridge/bridge.go index a71f097df9..64f6ca7e39 100644 --- a/libnetwork/drivers/bridge/bridge.go +++ b/libnetwork/drivers/bridge/bridge.go @@ -3,6 +3,7 @@ package bridge import ( + "context" "errors" "fmt" "net" @@ -11,6 +12,7 @@ import ( "strconv" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" @@ -22,7 +24,6 @@ import ( "github.com/docker/docker/libnetwork/portallocator" "github.com/docker/docker/libnetwork/portmapper" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -380,7 +381,7 @@ func (d *driver) configure(option map[string]interface{}) error { if config.EnableIPTables || config.EnableIP6Tables { if _, err := os.Stat("/proc/sys/net/bridge"); err != nil { if out, err := exec.Command("modprobe", "-va", "bridge", "br_netfilter").CombinedOutput(); err != nil { - logrus.Warnf("Running modprobe bridge br_netfilter failed with message: %s, error: %v", out, err) + log.G(context.TODO()).Warnf("Running modprobe bridge br_netfilter failed with message: %s, error: %v", out, err) } } } @@ -395,9 +396,9 @@ func (d *driver) configure(option map[string]interface{}) error { // Make sure on firewall reload, first thing being re-played is chains creation iptables.OnReloaded(func() { - logrus.Debugf("Recreating iptables chains on firewall reload") + log.G(context.TODO()).Debugf("Recreating iptables chains on firewall reload") if _, _, _, _, err := setupIPChains(config, iptables.IPv4); err != nil { - logrus.WithError(err).Error("Error reloading iptables chains") + log.G(context.TODO()).WithError(err).Error("Error reloading iptables chains") } }) } @@ -412,9 +413,9 @@ func (d *driver) configure(option map[string]interface{}) error { // Make sure on firewall reload, first thing being re-played is chains creation iptables.OnReloaded(func() { - logrus.Debugf("Recreating ip6tables chains on firewall reload") + log.G(context.TODO()).Debugf("Recreating ip6tables chains on firewall reload") if _, _, _, _, err := setupIPChains(config, iptables.IPv6); err != nil { - logrus.WithError(err).Error("Error reloading ip6tables chains") + log.G(context.TODO()).WithError(err).Error("Error reloading ip6tables chains") } }) } @@ -422,7 +423,7 @@ func (d *driver) configure(option map[string]interface{}) error { if config.EnableIPForwarding { err = setupIPForwarding(config.EnableIPTables, config.EnableIP6Tables) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) return err } } @@ -631,9 +632,9 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d return err } // Got a conflict with a stale default network, clean that up and continue - logrus.Warn(nerr) + log.G(context.TODO()).Warn(nerr) if err := d.deleteNetwork(nerr.ID); err != nil { - logrus.WithError(err).Debug("Error while cleaning up network on conflict") + log.G(context.TODO()).WithError(err).Debug("Error while cleaning up network on conflict") } } @@ -657,7 +658,7 @@ func (d *driver) checkConflict(config *networkConfiguration) error { // We must delete it as libnetwork is the source of truth // The default network being created must be the only one // This can happen only from docker 1.12 on ward - logrus.Infof("Found stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName) + log.G(context.TODO()).Infof("Found stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName) return defaultBridgeNetworkConflict{nwConfig.ID} } @@ -710,7 +711,7 @@ func (d *driver) createNetwork(config *networkConfiguration) (err error) { setupNetworkIsolationRules := func(config *networkConfiguration, i *bridgeInterface) error { if err := network.isolateNetwork(true); err != nil { if err = network.isolateNetwork(false); err != nil { - logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err) + log.G(context.TODO()).Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err) } return err } @@ -823,16 +824,16 @@ func (d *driver) deleteNetwork(nid string) error { // delele endpoints belong to this network for _, ep := range n.endpoints { if err := n.releasePorts(ep); err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } if link, err := d.nlh.LinkByName(ep.srcName); err == nil { if err := d.nlh.LinkDel(link); err != nil { - logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) } } @@ -858,7 +859,7 @@ func (d *driver) deleteNetwork(nid string) error { // it is not the default one (to keep the backward compatible behavior.) if !config.DefaultBridge { if err := d.nlh.LinkDel(n.bridge.Link); err != nil { - logrus.Warnf("Failed to remove bridge interface %s on network %s delete: %v", config.BridgeName, nid, err) + log.G(context.TODO()).Warnf("Failed to remove bridge interface %s on network %s delete: %v", config.BridgeName, nid, err) } } case ifaceCreatedByUser: @@ -868,7 +869,7 @@ func (d *driver) deleteNetwork(nid string) error { // clean all relevant iptables rules for _, cleanFunc := range n.iptCleanFuncs { if errClean := cleanFunc(); errClean != nil { - logrus.Warnf("Failed to clean iptables rules for bridge network: %v", errClean) + log.G(context.TODO()).Warnf("Failed to clean iptables rules for bridge network: %v", errClean) } } return d.storeDelete(config) @@ -880,7 +881,7 @@ func addToBridge(nlh *netlink.Handle, ifaceName, bridgeName string) error { return fmt.Errorf("could not find interface %s: %v", ifaceName, err) } if err := nlh.LinkSetMaster(lnk, &netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: bridgeName}}); err != nil { - logrus.WithError(err).Errorf("Failed to add %s to bridge via netlink", ifaceName) + log.G(context.TODO()).WithError(err).Errorf("Failed to add %s to bridge via netlink", ifaceName) return err } return nil @@ -981,7 +982,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, defer func() { if err != nil { if err := d.nlh.LinkDel(host); err != nil { - logrus.WithError(err).Warnf("Failed to delete host side interface (%s)'s link", hostIfName) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete host side interface (%s)'s link", hostIfName) } } }() @@ -994,7 +995,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, defer func() { if err != nil { if err := d.nlh.LinkDel(sbox); err != nil { - logrus.WithError(err).Warnf("Failed to delete sandbox side interface (%s)'s link", containerIfName) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete sandbox side interface (%s)'s link", containerIfName) } } }() @@ -1131,12 +1132,12 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { // Also make sure defer does not see this error either. if link, err := d.nlh.LinkByName(ep.srcName); err == nil { if err := d.nlh.LinkDel(link); err != nil { - logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) } return nil @@ -1296,7 +1297,7 @@ func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string defer func() { if err != nil { if e := network.releasePorts(endpoint); e != nil { - logrus.Errorf("Failed to release ports allocated for the bridge endpoint %s on failure %v because of %v", + log.G(context.TODO()).Errorf("Failed to release ports allocated for the bridge endpoint %s on failure %v because of %v", eid, err, e) } endpoint.portMapping = nil @@ -1335,7 +1336,7 @@ func (d *driver) RevokeExternalConnectivity(nid, eid string) error { err = network.releasePorts(endpoint) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } endpoint.portMapping = nil diff --git a/libnetwork/drivers/bridge/bridge_store.go b/libnetwork/drivers/bridge/bridge_store.go index 42a53f7a09..4735a360cb 100644 --- a/libnetwork/drivers/bridge/bridge_store.go +++ b/libnetwork/drivers/bridge/bridge_store.go @@ -3,15 +3,16 @@ package bridge import ( + "context" "encoding/json" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -62,9 +63,9 @@ func (d *driver) populateNetworks() error { for _, kvo := range kvol { ncfg := kvo.(*networkConfiguration) if err = d.createNetwork(ncfg); err != nil { - logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err) + log.G(context.TODO()).Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err) } - logrus.Debugf("Network (%.7s) restored", ncfg.ID) + log.G(context.TODO()).Debugf("Network (%.7s) restored", ncfg.ID) } return nil @@ -84,16 +85,16 @@ func (d *driver) populateEndpoints() error { ep := kvo.(*bridgeEndpoint) n, ok := d.networks[ep.nid] if !ok { - logrus.Debugf("Network (%.7s) not found for restored bridge endpoint (%.7s)", ep.nid, ep.id) - logrus.Debugf("Deleting stale bridge endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Network (%.7s) not found for restored bridge endpoint (%.7s)", ep.nid, ep.id) + log.G(context.TODO()).Debugf("Deleting stale bridge endpoint (%.7s) from store", ep.id) if err := d.storeDelete(ep); err != nil { - logrus.Debugf("Failed to delete stale bridge endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Failed to delete stale bridge endpoint (%.7s) from store", ep.id) } continue } n.endpoints[ep.id] = ep n.restorePortAllocations(ep) - logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) + log.G(context.TODO()).Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) } return nil @@ -101,7 +102,7 @@ func (d *driver) populateEndpoints() error { func (d *driver) storeUpdate(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Warnf("bridge store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Warnf("bridge store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) return nil } @@ -114,7 +115,7 @@ func (d *driver) storeUpdate(kvObject datastore.KVObject) error { func (d *driver) storeDelete(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Debugf("bridge store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Debugf("bridge store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) return nil } @@ -315,19 +316,19 @@ func (ep *bridgeEndpoint) UnmarshalJSON(b []byte) error { ep.srcName = epMap["SrcName"].(string) d, _ := json.Marshal(epMap["Config"]) if err := json.Unmarshal(d, &ep.config); err != nil { - logrus.Warnf("Failed to decode endpoint config %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint config %v", err) } d, _ = json.Marshal(epMap["ContainerConfig"]) if err := json.Unmarshal(d, &ep.containerConfig); err != nil { - logrus.Warnf("Failed to decode endpoint container config %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint container config %v", err) } d, _ = json.Marshal(epMap["ExternalConnConfig"]) if err := json.Unmarshal(d, &ep.extConnConfig); err != nil { - logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint external connectivity configuration %v", err) } d, _ = json.Marshal(epMap["PortMapping"]) if err := json.Unmarshal(d, &ep.portMapping); err != nil { - logrus.Warnf("Failed to decode endpoint port mapping %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint port mapping %v", err) } return nil @@ -394,7 +395,7 @@ func (n *bridgeNetwork) restorePortAllocations(ep *bridgeEndpoint) { ep.extConnConfig.PortBindings = ep.portMapping _, err := n.allocatePorts(ep, n.config.DefaultBindingIP, n.driver.config.EnableUserlandProxy) if err != nil { - logrus.Warnf("Failed to reserve existing port mapping for endpoint %.7s:%v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to reserve existing port mapping for endpoint %.7s:%v", ep.id, err) } ep.extConnConfig.PortBindings = tmp } diff --git a/libnetwork/drivers/bridge/interface.go b/libnetwork/drivers/bridge/interface.go index abf6290826..4f78d81381 100644 --- a/libnetwork/drivers/bridge/interface.go +++ b/libnetwork/drivers/bridge/interface.go @@ -3,10 +3,11 @@ package bridge import ( + "context" "fmt" "net" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "github.com/vishvananda/netlink" ) @@ -42,7 +43,7 @@ func newInterface(nlh *netlink.Handle, config *networkConfiguration) (*bridgeInt // Attempt to find an existing bridge named with the specified name. i.Link, err = nlh.LinkByName(config.BridgeName) if err != nil { - logrus.Debugf("Did not find any interface with name %s: %v", config.BridgeName, err) + log.G(context.TODO()).Debugf("Did not find any interface with name %s: %v", config.BridgeName, err) } else if _, ok := i.Link.(*netlink.Bridge); !ok { return nil, fmt.Errorf("existing interface %s is not a bridge", i.Link.Attrs().Name) } diff --git a/libnetwork/drivers/bridge/link.go b/libnetwork/drivers/bridge/link.go index 527b561b93..b841da3e4e 100644 --- a/libnetwork/drivers/bridge/link.go +++ b/libnetwork/drivers/bridge/link.go @@ -3,12 +3,13 @@ package bridge import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/iptables" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) type link struct { @@ -45,7 +46,7 @@ func (l *link) Disable() { // -D == iptables delete flag err := linkContainers("-D", l.parentIP, l.childIP, l.ports, l.bridge, true) if err != nil { - logrus.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error()) + log.G(context.TODO()).Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error()) } // Return proper error once we move to use a proper iptables package // that returns typed errors diff --git a/libnetwork/drivers/bridge/port_mapping.go b/libnetwork/drivers/bridge/port_mapping.go index b4c405e171..d1ef05ab67 100644 --- a/libnetwork/drivers/bridge/port_mapping.go +++ b/libnetwork/drivers/bridge/port_mapping.go @@ -4,14 +4,15 @@ package bridge import ( "bytes" + "context" "errors" "fmt" "net" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" "github.com/ishidawataru/sctp" - "github.com/sirupsen/logrus" ) func (n *bridgeNetwork) allocatePorts(ep *bridgeEndpoint, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) { @@ -46,7 +47,7 @@ func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, cont if err := n.allocatePort(&bIPv4, ulPxyEnabled); err != nil { // On allocation failure, release previously allocated ports. On cleanup error, just log a warning message if cuErr := n.releasePortsInternal(bs); cuErr != nil { - logrus.Warnf("allocation failure for %v, failed to clear previously allocated ipv4 port bindings: %v", bIPv4, cuErr) + log.G(context.TODO()).Warnf("allocation failure for %v, failed to clear previously allocated ipv4 port bindings: %v", bIPv4, cuErr) } return nil, err } @@ -72,7 +73,7 @@ func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, cont if err := n.allocatePort(&bIPv6, ulPxyEnabled); err != nil { // On allocation failure, release previously allocated ports. On cleanup error, just log a warning message if cuErr := n.releasePortsInternal(bs); cuErr != nil { - logrus.Warnf("allocation failure for %v, failed to clear previously allocated ipv6 port bindings: %v", bIPv6, cuErr) + log.G(context.TODO()).Warnf("allocation failure for %v, failed to clear previously allocated ipv6 port bindings: %v", bIPv6, cuErr) } return nil, err } @@ -159,10 +160,10 @@ func (n *bridgeNetwork) allocatePort(bnd *types.PortBinding, ulPxyEnabled bool) } // There is no point in immediately retrying to map an explicitly chosen port. if bnd.HostPort != 0 { - logrus.Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err) + log.G(context.TODO()).Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err) break } - logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1) + log.G(context.TODO()).Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { return err @@ -235,7 +236,7 @@ func IsV6Listenable() bool { // When the kernel was booted with `ipv6.disable=1`, // we get err "listen tcp6 [::1]:0: socket: address family not supported by protocol" // https://github.com/moby/moby/issues/42288 - logrus.Debugf("port_mapping: v6Listenable=false (%v)", err) + log.G(context.TODO()).Debugf("port_mapping: v6Listenable=false (%v)", err) } else { v6ListenableCached = true ln.Close() diff --git a/libnetwork/drivers/bridge/setup_bridgenetfiltering.go b/libnetwork/drivers/bridge/setup_bridgenetfiltering.go index 58bbb52b30..31ebc22cb2 100644 --- a/libnetwork/drivers/bridge/setup_bridgenetfiltering.go +++ b/libnetwork/drivers/bridge/setup_bridgenetfiltering.go @@ -3,12 +3,13 @@ package bridge import ( + "context" "errors" "fmt" "os" "syscall" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) // Enumeration type saying which versions of IP protocol to process. @@ -36,7 +37,7 @@ func setupBridgeNetFiltering(config *networkConfiguration, i *bridgeInterface) e if ptherr, ok := err.(*os.PathError); ok { if errno, ok := ptherr.Err.(syscall.Errno); ok && errno == syscall.ENOENT { if isRunningInContainer() { - logrus.Warnf("running inside docker container, ignoring missing kernel params: %v", err) + log.G(context.TODO()).Warnf("running inside docker container, ignoring missing kernel params: %v", err) err = nil } else { err = errors.New("please ensure that br_netfilter kernel module is loaded") @@ -63,7 +64,7 @@ func checkBridgeNetFiltering(config *networkConfiguration, i *bridgeInterface) e } enabled, err := isPacketForwardingEnabled(ipVer, iface) if err != nil { - logrus.Warnf("failed to check %s forwarding: %v", ipVerName, err) + log.G(context.TODO()).Warnf("failed to check %s forwarding: %v", ipVerName, err) } else if enabled { enabled, err := getKernelBoolParam(getBridgeNFKernelParam(ipVer)) if err != nil || enabled { diff --git a/libnetwork/drivers/bridge/setup_device.go b/libnetwork/drivers/bridge/setup_device.go index 12f211b836..00c4ebd875 100644 --- a/libnetwork/drivers/bridge/setup_device.go +++ b/libnetwork/drivers/bridge/setup_device.go @@ -3,12 +3,13 @@ package bridge import ( + "context" "fmt" "os" "path/filepath" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/netutils" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -36,10 +37,10 @@ func setupDevice(config *networkConfiguration, i *bridgeInterface) error { // Set the bridge's MAC address. Requires kernel version 3.3 or up. hwAddr := netutils.GenerateRandomMAC() i.Link.Attrs().HardwareAddr = hwAddr - logrus.Debugf("Setting bridge mac address to %s", hwAddr) + log.G(context.TODO()).Debugf("Setting bridge mac address to %s", hwAddr) if err := i.nlh.LinkAdd(i.Link); err != nil { - logrus.WithError(err).Errorf("Failed to create bridge %s via netlink", config.BridgeName) + log.G(context.TODO()).WithError(err).Errorf("Failed to create bridge %s via netlink", config.BridgeName) return err } @@ -50,14 +51,14 @@ func setupDefaultSysctl(config *networkConfiguration, i *bridgeInterface) error // Disable IPv6 router advertisements originating on the bridge sysPath := filepath.Join("/proc/sys/net/ipv6/conf/", config.BridgeName, "accept_ra") if _, err := os.Stat(sysPath); err != nil { - logrus. + log.G(context.TODO()). WithField("bridge", config.BridgeName). WithField("syspath", sysPath). Info("failed to read ipv6 net.ipv6.conf..accept_ra") return nil } if err := os.WriteFile(sysPath, []byte{'0', '\n'}, 0644); err != nil { - logrus.WithError(err).Warn("unable to disable IPv6 router advertisement") + log.G(context.TODO()).WithError(err).Warn("unable to disable IPv6 router advertisement") } return nil } @@ -74,7 +75,7 @@ func setupDeviceUp(config *networkConfiguration, i *bridgeInterface) error { if lnk, err := i.nlh.LinkByName(config.BridgeName); err == nil { i.Link = lnk } else { - logrus.Warnf("Failed to retrieve link for interface (%s): %v", config.BridgeName, err) + log.G(context.TODO()).Warnf("Failed to retrieve link for interface (%s): %v", config.BridgeName, err) } return nil } diff --git a/libnetwork/drivers/bridge/setup_ip_forwarding.go b/libnetwork/drivers/bridge/setup_ip_forwarding.go index f2d6a7df95..096f06ee49 100644 --- a/libnetwork/drivers/bridge/setup_ip_forwarding.go +++ b/libnetwork/drivers/bridge/setup_ip_forwarding.go @@ -3,11 +3,12 @@ package bridge import ( + "context" "fmt" "os" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/iptables" - "github.com/sirupsen/logrus" ) const ( @@ -42,14 +43,14 @@ func setupIPForwarding(enableIPTables bool, enableIP6Tables bool) error { iptable := iptables.GetIptable(iptables.IPv4) if err := iptable.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { if err := configureIPForwarding(false); err != nil { - logrus.Errorf("Disabling IP forwarding failed, %v", err) + log.G(context.TODO()).Errorf("Disabling IP forwarding failed, %v", err) } return err } iptables.OnReloaded(func() { - logrus.Debug("Setting the default DROP policy on firewall reload") + log.G(context.TODO()).Debug("Setting the default DROP policy on firewall reload") if err := iptable.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { - logrus.Warnf("Setting the default DROP policy on firewall reload failed, %v", err) + log.G(context.TODO()).Warnf("Setting the default DROP policy on firewall reload failed, %v", err) } }) } @@ -59,12 +60,12 @@ func setupIPForwarding(enableIPTables bool, enableIP6Tables bool) error { if enableIP6Tables { iptable := iptables.GetIptable(iptables.IPv6) if err := iptable.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { - logrus.Warnf("Setting the default DROP policy on firewall reload failed, %v", err) + log.G(context.TODO()).Warnf("Setting the default DROP policy on firewall reload failed, %v", err) } iptables.OnReloaded(func() { - logrus.Debug("Setting the default DROP policy on firewall reload") + log.G(context.TODO()).Debug("Setting the default DROP policy on firewall reload") if err := iptable.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { - logrus.Warnf("Setting the default DROP policy on firewall reload failed, %v", err) + log.G(context.TODO()).Warnf("Setting the default DROP policy on firewall reload failed, %v", err) } }) } diff --git a/libnetwork/drivers/bridge/setup_ip_tables.go b/libnetwork/drivers/bridge/setup_ip_tables.go index f6e631d612..8c799cd01c 100644 --- a/libnetwork/drivers/bridge/setup_ip_tables.go +++ b/libnetwork/drivers/bridge/setup_ip_tables.go @@ -3,13 +3,14 @@ package bridge import ( + "context" "errors" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/iptables" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -46,7 +47,7 @@ func setupIPChains(config configuration, version iptables.IPVersion) (*iptables. defer func() { if err != nil { if err := iptable.RemoveExistingChain(DockerChain, iptables.Nat); err != nil { - logrus.Warnf("failed on removing iptables NAT chain %s on cleanup: %v", DockerChain, err) + log.G(context.TODO()).Warnf("failed on removing iptables NAT chain %s on cleanup: %v", DockerChain, err) } } }() @@ -58,7 +59,7 @@ func setupIPChains(config configuration, version iptables.IPVersion) (*iptables. defer func() { if err != nil { if err := iptable.RemoveExistingChain(DockerChain, iptables.Filter); err != nil { - logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", DockerChain, err) + log.G(context.TODO()).Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", DockerChain, err) } } }() @@ -70,7 +71,7 @@ func setupIPChains(config configuration, version iptables.IPVersion) (*iptables. defer func() { if err != nil { if err := iptable.RemoveExistingChain(IsolationChain1, iptables.Filter); err != nil { - logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain1, err) + log.G(context.TODO()).Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain1, err) } } }() @@ -82,7 +83,7 @@ func setupIPChains(config configuration, version iptables.IPVersion) (*iptables. defer func() { if err != nil { if err := iptable.RemoveExistingChain(IsolationChain2, iptables.Filter); err != nil { - logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain2, err) + log.G(context.TODO()).Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain2, err) } } }() @@ -360,12 +361,12 @@ func setINC(version iptables.IPVersion, iface string, enable bool) error { if i == 1 { // Rollback the rule installed on first chain if err2 := iptable.ProgramRule(iptables.Filter, chains[0], iptables.Delete, rules[0]); err2 != nil { - logrus.Warnf("Failed to rollback iptables rule after failure (%v): %v", err, err2) + log.G(context.TODO()).Warnf("Failed to rollback iptables rule after failure (%v): %v", err, err2) } } return fmt.Errorf(msg) } - logrus.Warn(msg) + log.G(context.TODO()).Warn(msg) } } @@ -390,7 +391,7 @@ func removeIPChains(version iptables.IPVersion) { {Name: oldIsolationChain, Table: iptables.Filter, IPTable: ipt}, } { if err := chainInfo.Remove(); err != nil { - logrus.Warnf("Failed to remove existing iptables entries in table %s chain %s : %v", chainInfo.Table, chainInfo.Name, err) + log.G(context.TODO()).Warnf("Failed to remove existing iptables entries in table %s chain %s : %v", chainInfo.Table, chainInfo.Name, err) } } } diff --git a/libnetwork/drivers/bridge/setup_ipv4.go b/libnetwork/drivers/bridge/setup_ipv4.go index b8787c08b9..856b1f7182 100644 --- a/libnetwork/drivers/bridge/setup_ipv4.go +++ b/libnetwork/drivers/bridge/setup_ipv4.go @@ -3,14 +3,15 @@ package bridge import ( + "context" "errors" "fmt" "net" "os" "path/filepath" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -43,7 +44,7 @@ func setupBridgeIPv4(config *networkConfiguration, i *bridgeInterface) error { return fmt.Errorf("failed to remove current ip address from bridge: %v", err) } } - logrus.Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4) + log.G(context.TODO()).Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4) if err := i.nlh.AddrAdd(i.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil { return &IPv4AddrAddError{IP: config.AddressIPv4, Err: err} } diff --git a/libnetwork/drivers/bridge/setup_ipv6.go b/libnetwork/drivers/bridge/setup_ipv6.go index cf3fbe3b21..e466b83338 100644 --- a/libnetwork/drivers/bridge/setup_ipv6.go +++ b/libnetwork/drivers/bridge/setup_ipv6.go @@ -3,11 +3,12 @@ package bridge import ( + "context" "fmt" "net" "os" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "github.com/vishvananda/netlink" ) @@ -54,14 +55,14 @@ func setupBridgeIPv6(config *networkConfiguration, i *bridgeInterface) error { } // Setting route to global IPv6 subnet - logrus.Debugf("Adding route to IPv6 network %s via device %s", config.AddressIPv6.String(), config.BridgeName) + log.G(context.TODO()).Debugf("Adding route to IPv6 network %s via device %s", config.AddressIPv6.String(), config.BridgeName) err = i.nlh.RouteAdd(&netlink.Route{ Scope: netlink.SCOPE_UNIVERSE, LinkIndex: i.Link.Attrs().Index, Dst: config.AddressIPv6, }) if err != nil && !os.IsExist(err) { - logrus.Errorf("Could not add route to IPv6 network %s via device %s: %s", config.AddressIPv6.String(), config.BridgeName, err) + log.G(context.TODO()).Errorf("Could not add route to IPv6 network %s via device %s: %s", config.AddressIPv6.String(), config.BridgeName, err) } return nil @@ -90,7 +91,7 @@ func setupIPv6Forwarding(config *networkConfiguration, i *bridgeInterface) error // Enable IPv6 default forwarding only if it is not already enabled if ipv6ForwardDataDefault[0] != '1' { if err := os.WriteFile(ipv6ForwardConfDefault, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil { - logrus.Warnf("Unable to enable IPv6 default forwarding: %v", err) + log.G(context.TODO()).Warnf("Unable to enable IPv6 default forwarding: %v", err) } } @@ -102,7 +103,7 @@ func setupIPv6Forwarding(config *networkConfiguration, i *bridgeInterface) error // Enable IPv6 all forwarding only if it is not already enabled if ipv6ForwardDataAll[0] != '1' { if err := os.WriteFile(ipv6ForwardConfAll, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil { - logrus.Warnf("Unable to enable IPv6 all forwarding: %v", err) + log.G(context.TODO()).Warnf("Unable to enable IPv6 all forwarding: %v", err) } } diff --git a/libnetwork/drivers/bridge/setup_verify.go b/libnetwork/drivers/bridge/setup_verify.go index 5e5aacf6be..ad40cf6567 100644 --- a/libnetwork/drivers/bridge/setup_verify.go +++ b/libnetwork/drivers/bridge/setup_verify.go @@ -3,12 +3,13 @@ package bridge import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -42,7 +43,7 @@ func setupVerifyAndReconcile(config *networkConfiguration, i *bridgeInterface) e addrv6 := addrv6 if addrv6.IP.IsGlobalUnicast() && !types.CompareIPNet(addrv6.IPNet, i.bridgeIPv6) { if err := i.nlh.AddrDel(i.Link, &addrv6); err != nil { - logrus.Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err) + log.G(context.TODO()).Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err) } } } diff --git a/libnetwork/drivers/ipvlan/ipvlan_endpoint.go b/libnetwork/drivers/ipvlan/ipvlan_endpoint.go index b3fd395a1b..f91e74fc64 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_endpoint.go +++ b/libnetwork/drivers/ipvlan/ipvlan_endpoint.go @@ -4,13 +4,14 @@ package ipvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) // CreateEndpoint assigns the mac, ip and endpoint id for the new container @@ -40,7 +41,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, if opt, ok := epOptions[netlabel.PortMap]; ok { if _, ok := opt.([]types.PortBinding); ok { if len(opt.([]types.PortBinding)) > 0 { - logrus.Warnf("ipvlan driver does not support port mappings") + log.G(context.TODO()).Warnf("ipvlan driver does not support port mappings") } } } @@ -48,7 +49,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, if opt, ok := epOptions[netlabel.ExposedPorts]; ok { if _, ok := opt.([]types.TransportPort); ok { if len(opt.([]types.TransportPort)) > 0 { - logrus.Warnf("ipvlan driver does not support port exposures") + log.G(context.TODO()).Warnf("ipvlan driver does not support port exposures") } } } @@ -77,12 +78,12 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { } if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { if err := ns.NlHandle().LinkDel(link); err != nil { - logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err) } n.deleteEndpoint(ep.id) return nil diff --git a/libnetwork/drivers/ipvlan/ipvlan_joinleave.go b/libnetwork/drivers/ipvlan/ipvlan_joinleave.go index 3753045d54..635fc277c5 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_joinleave.go +++ b/libnetwork/drivers/ipvlan/ipvlan_joinleave.go @@ -4,14 +4,15 @@ package ipvlan import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) type staticRoute struct { @@ -63,7 +64,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err := jinfo.AddStaticRoute(defaultRoute.Destination, defaultRoute.RouteType, defaultRoute.NextHop); err != nil { return fmt.Errorf("failed to set an ipvlan l3/l3s mode ipv4 default gateway: %v", err) } - logrus.Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Ipvlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Ipvlan_Mode: %s, Parent: %s", ep.addr.IP.String(), n.config.IpvlanMode, n.config.Parent) // If the endpoint has a v6 address, set a v6 default route if ep.addrv6 != nil { @@ -74,7 +75,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err = jinfo.AddStaticRoute(default6Route.Destination, default6Route.RouteType, default6Route.NextHop); err != nil { return fmt.Errorf("failed to set an ipvlan l3/l3s mode ipv6 default gateway: %v", err) } - logrus.Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Ipvlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Ipvlan_Mode: %s, Parent: %s", ep.addrv6.IP.String(), n.config.IpvlanMode, n.config.Parent) } case modeL2: @@ -92,7 +93,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err != nil { return err } - logrus.Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", ep.addr.IP.String(), v4gw.String(), n.config.IpvlanMode, n.config.Parent) } // parse and correlate the endpoint v6 address with the available v6 subnets @@ -109,17 +110,17 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err != nil { return err } - logrus.Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", ep.addrv6.IP.String(), v6gw.String(), n.config.IpvlanMode, n.config.Parent) } } } else { if len(n.config.Ipv4Subnets) > 0 { - logrus.Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, IpVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, IpVlan_Mode: %s, Parent: %s", ep.addr.IP.String(), n.config.IpvlanMode, n.config.Parent) } if len(n.config.Ipv6Subnets) > 0 { - logrus.Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s IpVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s IpVlan_Mode: %s, Parent: %s", ep.addrv6.IP.String(), n.config.IpvlanMode, n.config.Parent) } } diff --git a/libnetwork/drivers/ipvlan/ipvlan_network.go b/libnetwork/drivers/ipvlan/ipvlan_network.go index 4eb60b1b3c..cb8e9ff5c7 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_network.go +++ b/libnetwork/drivers/ipvlan/ipvlan_network.go @@ -4,8 +4,10 @@ package ipvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/ns" @@ -13,7 +15,6 @@ import ( "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) // CreateNetwork the network for the specified driver type @@ -55,7 +56,7 @@ func (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo err = d.storeUpdate(config) if err != nil { d.deleteNetwork(config.ID) - logrus.Debugf("encountered an error rolling back a network create for %s : %v", config.ID, err) + log.G(context.TODO()).Debugf("encountered an error rolling back a network create for %s : %v", config.ID, err) return err } @@ -72,7 +73,7 @@ func (d *driver) createNetwork(config *configuration) (bool, error) { return false, fmt.Errorf("network %s is already using parent interface %s", getDummyName(stringid.TruncateID(nw.config.ID)), config.Parent) } - logrus.Debugf("Create Network for the same ID %s\n", config.ID) + log.G(context.TODO()).Debugf("Create Network for the same ID %s\n", config.ID) foundExisting = true break } @@ -87,7 +88,7 @@ func (d *driver) createNetwork(config *configuration) (bool, error) { config.CreatedSlaveLink = true // notify the user in logs that they have limited communications - logrus.Debugf("Empty -o parent= flags limit communications to other containers inside of network: %s", + log.G(context.TODO()).Debugf("Empty -o parent= flags limit communications to other containers inside of network: %s", config.Parent) } else { // if the subinterface parent_iface.vlan_id checks do not pass, return err. @@ -128,14 +129,14 @@ func (d *driver) DeleteNetwork(nid string) error { if n.config.Parent == getDummyName(stringid.TruncateID(nid)) { err := delDummyLink(n.config.Parent) if err != nil { - logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + log.G(context.TODO()).Debugf("link %s was not deleted, continuing the delete network operation: %v", n.config.Parent, err) } } else { // only delete the link if it matches iface.vlan naming err := delVlanLink(n.config.Parent) if err != nil { - logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + log.G(context.TODO()).Debugf("link %s was not deleted, continuing the delete network operation: %v", n.config.Parent, err) } } @@ -144,12 +145,12 @@ func (d *driver) DeleteNetwork(nid string) error { for _, ep := range n.endpoints { if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { if err := ns.NlHandle().LinkDel(link); err != nil { - logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err) } } // delete the *network diff --git a/libnetwork/drivers/ipvlan/ipvlan_setup.go b/libnetwork/drivers/ipvlan/ipvlan_setup.go index 7e8e793485..97142472a3 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_setup.go +++ b/libnetwork/drivers/ipvlan/ipvlan_setup.go @@ -4,12 +4,13 @@ package ipvlan import ( + "context" "fmt" "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/ns" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -122,7 +123,7 @@ func createVlanLink(parentName string) error { if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil { return fmt.Errorf("failed to enable %s the ipvlan parent link %v", vlanLink.Name, err) } - logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) + log.G(context.TODO()).Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) return nil } @@ -149,7 +150,7 @@ func delVlanLink(linkName string) error { if err := ns.NlHandle().LinkDel(vlanLink); err != nil { return fmt.Errorf("failed to delete %s link: %v", linkName, err) } - logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) + log.G(context.TODO()).Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) } // if the subinterface doesn't parse to iface.vlan_id leave the interface in // place since it could be a user specified name not created by the driver. @@ -215,7 +216,7 @@ func delDummyLink(linkName string) error { if err := ns.NlHandle().LinkDel(dummyLink); err != nil { return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err) } - logrus.Debugf("Deleted a dummy parent link: %s", linkName) + log.G(context.TODO()).Debugf("Deleted a dummy parent link: %s", linkName) return nil } diff --git a/libnetwork/drivers/ipvlan/ipvlan_state.go b/libnetwork/drivers/ipvlan/ipvlan_state.go index fc26f1613e..2971542d66 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_state.go +++ b/libnetwork/drivers/ipvlan/ipvlan_state.go @@ -4,10 +4,11 @@ package ipvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) func (d *driver) network(nid string) *network { @@ -15,7 +16,7 @@ func (d *driver) network(nid string) *network { n, ok := d.networks[nid] d.Unlock() if !ok { - logrus.Errorf("network id %s not found", nid) + log.G(context.TODO()).Errorf("network id %s not found", nid) } return n diff --git a/libnetwork/drivers/ipvlan/ipvlan_store.go b/libnetwork/drivers/ipvlan/ipvlan_store.go index 00421aa7f4..14b57ea4be 100644 --- a/libnetwork/drivers/ipvlan/ipvlan_store.go +++ b/libnetwork/drivers/ipvlan/ipvlan_store.go @@ -4,15 +4,16 @@ package ipvlan import ( + "context" "encoding/json" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -80,7 +81,7 @@ func (d *driver) populateNetworks() error { for _, kvo := range kvol { config := kvo.(*configuration) if _, err = d.createNetwork(config); err != nil { - logrus.Warnf("could not create ipvlan network for id %s from persistent state", config.ID) + log.G(context.TODO()).Warnf("could not create ipvlan network for id %s from persistent state", config.ID) } } @@ -101,15 +102,15 @@ func (d *driver) populateEndpoints() error { ep := kvo.(*endpoint) n, ok := d.networks[ep.nid] if !ok { - logrus.Debugf("Network (%.7s) not found for restored ipvlan endpoint (%.7s)", ep.nid, ep.id) - logrus.Debugf("Deleting stale ipvlan endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Network (%.7s) not found for restored ipvlan endpoint (%.7s)", ep.nid, ep.id) + log.G(context.TODO()).Debugf("Deleting stale ipvlan endpoint (%.7s) from store", ep.id) if err := d.storeDelete(ep); err != nil { - logrus.Debugf("Failed to delete stale ipvlan endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Failed to delete stale ipvlan endpoint (%.7s) from store", ep.id) } continue } n.endpoints[ep.id] = ep - logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) + log.G(context.TODO()).Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) } return nil @@ -118,7 +119,7 @@ func (d *driver) populateEndpoints() error { // storeUpdate used to update persistent ipvlan network records as they are created func (d *driver) storeUpdate(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Warnf("ipvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Warnf("ipvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) return nil } if err := d.store.PutObjectAtomic(kvObject); err != nil { @@ -131,7 +132,7 @@ func (d *driver) storeUpdate(kvObject datastore.KVObject) error { // storeDelete used to delete ipvlan network records from persistent cache as they are deleted func (d *driver) storeDelete(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Debugf("ipvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Debugf("ipvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) return nil } retry: diff --git a/libnetwork/drivers/macvlan/macvlan_endpoint.go b/libnetwork/drivers/macvlan/macvlan_endpoint.go index e564db1686..3e70252b2f 100644 --- a/libnetwork/drivers/macvlan/macvlan_endpoint.go +++ b/libnetwork/drivers/macvlan/macvlan_endpoint.go @@ -4,14 +4,15 @@ package macvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) // CreateEndpoint assigns the mac, ip and endpoint id for the new container @@ -45,7 +46,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, if opt, ok := epOptions[netlabel.PortMap]; ok { if _, ok := opt.([]types.PortBinding); ok { if len(opt.([]types.PortBinding)) > 0 { - logrus.Warnf("macvlan driver does not support port mappings") + log.G(context.TODO()).Warnf("macvlan driver does not support port mappings") } } } @@ -53,7 +54,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, if opt, ok := epOptions[netlabel.ExposedPorts]; ok { if _, ok := opt.([]types.TransportPort); ok { if len(opt.([]types.TransportPort)) > 0 { - logrus.Warnf("macvlan driver does not support port exposures") + log.G(context.TODO()).Warnf("macvlan driver does not support port exposures") } } } @@ -82,12 +83,12 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { } if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { if err := ns.NlHandle().LinkDel(link); err != nil { - logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err) } n.deleteEndpoint(ep.id) diff --git a/libnetwork/drivers/macvlan/macvlan_joinleave.go b/libnetwork/drivers/macvlan/macvlan_joinleave.go index 2756a2a79a..e1b2ac5265 100644 --- a/libnetwork/drivers/macvlan/macvlan_joinleave.go +++ b/libnetwork/drivers/macvlan/macvlan_joinleave.go @@ -4,13 +4,14 @@ package macvlan import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/ns" - "github.com/sirupsen/logrus" ) // Join method is invoked when a Sandbox is attached to an endpoint. @@ -54,7 +55,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err != nil { return err } - logrus.Debugf("Macvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, MacVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Macvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, MacVlan_Mode: %s, Parent: %s", ep.addr.IP.String(), v4gw.String(), n.config.MacvlanMode, n.config.Parent) } // parse and match the endpoint address with the available v6 subnets @@ -71,16 +72,16 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err != nil { return err } - logrus.Debugf("Macvlan Endpoint Joined with IPv6_Addr: %s Gateway: %s MacVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Macvlan Endpoint Joined with IPv6_Addr: %s Gateway: %s MacVlan_Mode: %s, Parent: %s", ep.addrv6.IP.String(), v6gw.String(), n.config.MacvlanMode, n.config.Parent) } } else { if len(n.config.Ipv4Subnets) > 0 { - logrus.Debugf("Macvlan Endpoint Joined with IPv4_Addr: %s, MacVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Macvlan Endpoint Joined with IPv4_Addr: %s, MacVlan_Mode: %s, Parent: %s", ep.addr.IP.String(), n.config.MacvlanMode, n.config.Parent) } if len(n.config.Ipv6Subnets) > 0 { - logrus.Debugf("Macvlan Endpoint Joined with IPv6_Addr: %s MacVlan_Mode: %s, Parent: %s", + log.G(context.TODO()).Debugf("Macvlan Endpoint Joined with IPv6_Addr: %s MacVlan_Mode: %s, Parent: %s", ep.addrv6.IP.String(), n.config.MacvlanMode, n.config.Parent) } } diff --git a/libnetwork/drivers/macvlan/macvlan_network.go b/libnetwork/drivers/macvlan/macvlan_network.go index f1529c763e..6418135310 100644 --- a/libnetwork/drivers/macvlan/macvlan_network.go +++ b/libnetwork/drivers/macvlan/macvlan_network.go @@ -4,15 +4,16 @@ package macvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) // CreateNetwork the network for the specified driver type @@ -45,7 +46,7 @@ func (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo err = d.storeUpdate(config) if err != nil { d.deleteNetwork(config.ID) - logrus.Debugf("encountered an error rolling back a network create for %s : %v", config.ID, err) + log.G(context.TODO()).Debugf("encountered an error rolling back a network create for %s : %v", config.ID, err) return err } @@ -62,7 +63,7 @@ func (d *driver) createNetwork(config *configuration) (bool, error) { return false, fmt.Errorf("network %s is already using parent interface %s", getDummyName(stringid.TruncateID(nw.config.ID)), config.Parent) } - logrus.Debugf("Create Network for the same ID %s\n", config.ID) + log.G(context.TODO()).Debugf("Create Network for the same ID %s\n", config.ID) foundExisting = true break } @@ -77,7 +78,7 @@ func (d *driver) createNetwork(config *configuration) (bool, error) { config.CreatedSlaveLink = true // notify the user in logs that they have limited communications - logrus.Debugf("Empty -o parent= flags limit communications to other containers inside of network: %s", + log.G(context.TODO()).Debugf("Empty -o parent= flags limit communications to other containers inside of network: %s", config.Parent) } else { // if the subinterface parent_iface.vlan_id checks do not pass, return err. @@ -118,14 +119,14 @@ func (d *driver) DeleteNetwork(nid string) error { if n.config.Parent == getDummyName(stringid.TruncateID(nid)) { err := delDummyLink(n.config.Parent) if err != nil { - logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + log.G(context.TODO()).Debugf("link %s was not deleted, continuing the delete network operation: %v", n.config.Parent, err) } } else { // only delete the link if it matches iface.vlan naming err := delVlanLink(n.config.Parent) if err != nil { - logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + log.G(context.TODO()).Debugf("link %s was not deleted, continuing the delete network operation: %v", n.config.Parent, err) } } @@ -134,12 +135,12 @@ func (d *driver) DeleteNetwork(nid string) error { for _, ep := range n.endpoints { if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { if err := ns.NlHandle().LinkDel(link); err != nil { - logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) } } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err) } } // delete the *network diff --git a/libnetwork/drivers/macvlan/macvlan_setup.go b/libnetwork/drivers/macvlan/macvlan_setup.go index ec40207d41..ee267553a8 100644 --- a/libnetwork/drivers/macvlan/macvlan_setup.go +++ b/libnetwork/drivers/macvlan/macvlan_setup.go @@ -4,12 +4,13 @@ package macvlan import ( + "context" "fmt" "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/ns" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -102,7 +103,7 @@ func createVlanLink(parentName string) error { if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil { return fmt.Errorf("failed to enable %s the macvlan parent link %v", vlanLink.Name, err) } - logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) + log.G(context.TODO()).Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) return nil } @@ -129,7 +130,7 @@ func delVlanLink(linkName string) error { if err := ns.NlHandle().LinkDel(vlanLink); err != nil { return fmt.Errorf("failed to delete %s link: %v", linkName, err) } - logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) + log.G(context.TODO()).Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) } // if the subinterface doesn't parse to iface.vlan_id leave the interface in // place since it could be a user specified name not created by the driver. @@ -195,7 +196,7 @@ func delDummyLink(linkName string) error { if err := ns.NlHandle().LinkDel(dummyLink); err != nil { return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err) } - logrus.Debugf("Deleted a dummy parent link: %s", linkName) + log.G(context.TODO()).Debugf("Deleted a dummy parent link: %s", linkName) return nil } diff --git a/libnetwork/drivers/macvlan/macvlan_state.go b/libnetwork/drivers/macvlan/macvlan_state.go index bb3e326867..45fa0921ca 100644 --- a/libnetwork/drivers/macvlan/macvlan_state.go +++ b/libnetwork/drivers/macvlan/macvlan_state.go @@ -4,10 +4,11 @@ package macvlan import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) func (d *driver) network(nid string) *network { @@ -15,7 +16,7 @@ func (d *driver) network(nid string) *network { n, ok := d.networks[nid] d.Unlock() if !ok { - logrus.Errorf("network id %s not found", nid) + log.G(context.TODO()).Errorf("network id %s not found", nid) } return n diff --git a/libnetwork/drivers/macvlan/macvlan_store.go b/libnetwork/drivers/macvlan/macvlan_store.go index 89cad764b4..bd4aecf356 100644 --- a/libnetwork/drivers/macvlan/macvlan_store.go +++ b/libnetwork/drivers/macvlan/macvlan_store.go @@ -4,15 +4,16 @@ package macvlan import ( + "context" "encoding/json" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -79,7 +80,7 @@ func (d *driver) populateNetworks() error { for _, kvo := range kvol { config := kvo.(*configuration) if _, err = d.createNetwork(config); err != nil { - logrus.Warnf("Could not create macvlan network for id %s from persistent state", config.ID) + log.G(context.TODO()).Warnf("Could not create macvlan network for id %s from persistent state", config.ID) } } @@ -100,15 +101,15 @@ func (d *driver) populateEndpoints() error { ep := kvo.(*endpoint) n, ok := d.networks[ep.nid] if !ok { - logrus.Debugf("Network (%.7s) not found for restored macvlan endpoint (%.7s)", ep.nid, ep.id) - logrus.Debugf("Deleting stale macvlan endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Network (%.7s) not found for restored macvlan endpoint (%.7s)", ep.nid, ep.id) + log.G(context.TODO()).Debugf("Deleting stale macvlan endpoint (%.7s) from store", ep.id) if err := d.storeDelete(ep); err != nil { - logrus.Debugf("Failed to delete stale macvlan endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Failed to delete stale macvlan endpoint (%.7s) from store", ep.id) } continue } n.endpoints[ep.id] = ep - logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) + log.G(context.TODO()).Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) } return nil @@ -117,7 +118,7 @@ func (d *driver) populateEndpoints() error { // storeUpdate used to update persistent macvlan network records as they are created func (d *driver) storeUpdate(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Warnf("macvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Warnf("macvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) return nil } if err := d.store.PutObjectAtomic(kvObject); err != nil { @@ -130,7 +131,7 @@ func (d *driver) storeUpdate(kvObject datastore.KVObject) error { // storeDelete used to delete macvlan records from persistent cache as they are deleted func (d *driver) storeDelete(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Debugf("macvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Debugf("macvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) return nil } retry: diff --git a/libnetwork/drivers/overlay/encryption.go b/libnetwork/drivers/overlay/encryption.go index e459b83c30..68f9b20c2a 100644 --- a/libnetwork/drivers/overlay/encryption.go +++ b/libnetwork/drivers/overlay/encryption.go @@ -4,6 +4,7 @@ package overlay import ( "bytes" + "context" "encoding/binary" "encoding/hex" "fmt" @@ -13,11 +14,11 @@ import ( "sync" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/drivers/overlay/overlayutils" "github.com/docker/docker/libnetwork/iptables" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -112,7 +113,7 @@ func (e *encrMap) String() string { } func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) error { - logrus.Debugf("checkEncryption(%.7s, %v, %t)", nid, rIP, isLocal) + log.G(context.TODO()).Debugf("checkEncryption(%.7s, %v, %t)", nid, rIP, isLocal) n := d.network(nid) if n == nil || !n.secure { @@ -135,7 +136,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro } return false }); err != nil { - logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %.5s: %v", nid, err) + log.G(context.TODO()).Warnf("Failed to retrieve list of participating nodes in overlay network %.5s: %v", nid, err) } default: if len(d.network(nid).endpoints) > 0 { @@ -143,18 +144,18 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro } } - logrus.Debugf("List of nodes: %s", nodes) + log.G(context.TODO()).Debugf("List of nodes: %s", nodes) if add { for _, rIP := range nodes { if err := setupEncryption(lIP, aIP, rIP, d.secMap, d.keys); err != nil { - logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err) + log.G(context.TODO()).Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err) } } } else { if len(nodes) == 0 { if err := removeEncryption(lIP, rIP, d.secMap); err != nil { - logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err) + log.G(context.TODO()).Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err) } } } @@ -165,7 +166,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro // setupEncryption programs the encryption parameters for secure communication // between the local node and a remote node. func setupEncryption(localIP, advIP, remoteIP net.IP, em *encrMap, keys []*key) error { - logrus.Debugf("Programming encryption between %s and %s", localIP, remoteIP) + log.G(context.TODO()).Debugf("Programming encryption between %s and %s", localIP, remoteIP) rIPs := remoteIP.String() indices := make([]*spi, 0, len(keys)) @@ -178,7 +179,7 @@ func setupEncryption(localIP, advIP, remoteIP net.IP, em *encrMap, keys []*key) } fSA, rSA, err := programSA(localIP, remoteIP, spis, k, dir, true) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } indices = append(indices, spis) if i != 0 { @@ -186,7 +187,7 @@ func setupEncryption(localIP, advIP, remoteIP net.IP, em *encrMap, keys []*key) } err = programSP(fSA, rSA, true) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } } @@ -211,14 +212,14 @@ func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error { } fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } if i != 0 { continue } err = programSP(fSA, rSA, false) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } } return nil @@ -315,9 +316,9 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f } if add != exists { - logrus.Debugf("%s: rSA{%s}", action, rSA) + log.G(context.TODO()).Debugf("%s: rSA{%s}", action, rSA) if err := xfrmProgram(rSA); err != nil { - logrus.Warnf("Failed %s rSA{%s}: %v", action, rSA, err) + log.G(context.TODO()).Warnf("Failed %s rSA{%s}: %v", action, rSA, err) } } } @@ -341,9 +342,9 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f } if add != exists { - logrus.Debugf("%s fSA{%s}", action, fSA) + log.G(context.TODO()).Debugf("%s fSA{%s}", action, fSA) if err := xfrmProgram(fSA); err != nil { - logrus.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err) + log.G(context.TODO()).Warnf("Failed %s fSA{%s}: %v.", action, fSA, err) } } } @@ -389,9 +390,9 @@ func programSP(fSA *netlink.XfrmState, rSA *netlink.XfrmState, add bool) error { } if add != exists { - logrus.Debugf("%s fSP{%s}", action, fPol) + log.G(context.TODO()).Debugf("%s fSP{%s}", action, fPol) if err := xfrmProgram(fPol); err != nil { - logrus.Warnf("%s fSP{%s}: %v", action, fPol, err) + log.G(context.TODO()).Warnf("%s fSP{%s}: %v", action, fPol, err) } } @@ -407,7 +408,7 @@ func saExists(sa *netlink.XfrmState) (bool, error) { return false, nil default: err = fmt.Errorf("Error while checking for SA existence: %v", err) - logrus.Warn(err) + log.G(context.TODO()).Warn(err) return false, err } } @@ -421,7 +422,7 @@ func spExists(sp *netlink.XfrmPolicy) (bool, error) { return false, nil default: err = fmt.Errorf("Error while checking for SP existence: %v", err) - logrus.Warn(err) + log.G(context.TODO()).Warn(err) return false, err } } @@ -469,16 +470,16 @@ func (d *driver) setKeys(keys []*key) error { d.keys = keys d.secMap = &encrMap{nodes: map[string][]*spi{}} d.Unlock() - logrus.Debugf("Initial encryption keys: %v", keys) + log.G(context.TODO()).Debugf("Initial encryption keys: %v", keys) return nil } // updateKeys allows to add a new key and/or change the primary key and/or prune an existing key // The primary key is the key used in transmission and will go in first position in the list. func (d *driver) updateKeys(newKey, primary, pruneKey *key) error { - logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey) + log.G(context.TODO()).Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey) - logrus.Debugf("Current: %v", d.keys) + log.G(context.TODO()).Debugf("Current: %v", d.keys) var ( newIdx = -1 @@ -533,7 +534,7 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error { d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...) } - logrus.Debugf("Updated: %v", d.keys) + log.G(context.TODO()).Debugf("Updated: %v", d.keys) return nil } @@ -546,10 +547,10 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error { // Spis and keys are sorted in such away the one in position 0 is the primary func updateNodeKey(lIP, aIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi { - logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx) + log.G(context.TODO()).Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx) spis := idxs - logrus.Debugf("Current: %v", spis) + log.G(context.TODO()).Debugf("Current: %v", spis) // add new if newIdx != -1 { @@ -596,9 +597,9 @@ func updateNodeKey(lIP, aIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, pr }, }, } - logrus.Debugf("Updating fSP{%s}", fSP1) + log.G(context.TODO()).Debugf("Updating fSP{%s}", fSP1) if err := ns.NlHandle().XfrmPolicyUpdate(fSP1); err != nil { - logrus.Warnf("Failed to update fSP{%s}: %v", fSP1, err) + log.G(context.TODO()).Warnf("Failed to update fSP{%s}: %v", fSP1, err) } // -fSA1 @@ -619,7 +620,7 @@ func updateNodeKey(lIP, aIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, pr spis = append(spis[:delIdx], spis[delIdx+1:]...) } - logrus.Debugf("Updated: %v", spis) + log.G(context.TODO()).Debugf("Updated: %v", spis) return spis } @@ -643,30 +644,30 @@ func clearEncryptionStates() { nlh := ns.NlHandle() spList, err := nlh.XfrmPolicyList(netlink.FAMILY_ALL) if err != nil { - logrus.Warnf("Failed to retrieve SP list for cleanup: %v", err) + log.G(context.TODO()).Warnf("Failed to retrieve SP list for cleanup: %v", err) } saList, err := nlh.XfrmStateList(netlink.FAMILY_ALL) if err != nil { - logrus.Warnf("Failed to retrieve SA list for cleanup: %v", err) + log.G(context.TODO()).Warnf("Failed to retrieve SA list for cleanup: %v", err) } for _, sp := range spList { sp := sp if sp.Mark != nil && sp.Mark.Value == spMark.Value { if err := nlh.XfrmPolicyDel(&sp); err != nil { - logrus.Warnf("Failed to delete stale SP %s: %v", sp, err) + log.G(context.TODO()).Warnf("Failed to delete stale SP %s: %v", sp, err) continue } - logrus.Debugf("Removed stale SP: %s", sp) + log.G(context.TODO()).Debugf("Removed stale SP: %s", sp) } } for _, sa := range saList { sa := sa if sa.Reqid == mark { if err := nlh.XfrmStateDel(&sa); err != nil { - logrus.Warnf("Failed to delete stale SA %s: %v", sa, err) + log.G(context.TODO()).Warnf("Failed to delete stale SA %s: %v", sa, err) continue } - logrus.Debugf("Removed stale SA: %s", sa) + log.G(context.TODO()).Debugf("Removed stale SA: %s", sa) } } } diff --git a/libnetwork/drivers/overlay/joinleave.go b/libnetwork/drivers/overlay/joinleave.go index 1d99e7d1a4..b8b8a7b6d4 100644 --- a/libnetwork/drivers/overlay/joinleave.go +++ b/libnetwork/drivers/overlay/joinleave.go @@ -3,15 +3,16 @@ package overlay import ( + "context" "fmt" "net" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" "github.com/gogo/protobuf/proto" - "github.com/sirupsen/logrus" ) // Join method is invoked when a Sandbox is attached to an endpoint. @@ -95,7 +96,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, continue } if err = jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil { - logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id) + log.G(context.TODO()).Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id) } } @@ -109,7 +110,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), false, false, true) if err = d.checkEncryption(nid, nil, true, true); err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } buf, err := proto.Marshal(&PeerRecord{ @@ -122,7 +123,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, } if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil { - logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err) + log.G(context.TODO()).Errorf("overlay: Failed adding table entry to joininfo: %v", err) } return nil @@ -130,13 +131,13 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { if tablename != ovPeerTable { - logrus.Errorf("DecodeTableEntry: unexpected table name %s", tablename) + log.G(context.TODO()).Errorf("DecodeTableEntry: unexpected table name %s", tablename) return "", nil } var peer PeerRecord if err := proto.Unmarshal(value, &peer); err != nil { - logrus.Errorf("DecodeTableEntry: failed to unmarshal peer record for key %s: %v", key, err) + log.G(context.TODO()).Errorf("DecodeTableEntry: failed to unmarshal peer record for key %s: %v", key, err) return "", nil } @@ -147,7 +148,7 @@ func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (s func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { if tableName != ovPeerTable { - logrus.Errorf("Unexpected table notification for table %s received", tableName) + log.G(context.TODO()).Errorf("Unexpected table notification for table %s received", tableName) return } @@ -155,7 +156,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri var peer PeerRecord if err := proto.Unmarshal(value, &peer); err != nil { - logrus.Errorf("Failed to unmarshal peer record: %v", err) + log.G(context.TODO()).Errorf("Failed to unmarshal peer record: %v", err) return } @@ -167,19 +168,19 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri addr, err := types.ParseCIDR(peer.EndpointIP) if err != nil { - logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) + log.G(context.TODO()).Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) return } mac, err := net.ParseMAC(peer.EndpointMAC) if err != nil { - logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) + log.G(context.TODO()).Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) return } vtep := net.ParseIP(peer.TunnelEndpointIP) if vtep == nil { - logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) + log.G(context.TODO()).Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) return } diff --git a/libnetwork/drivers/overlay/ov_endpoint.go b/libnetwork/drivers/overlay/ov_endpoint.go index 4b51865c4c..1174d6b0eb 100644 --- a/libnetwork/drivers/overlay/ov_endpoint.go +++ b/libnetwork/drivers/overlay/ov_endpoint.go @@ -3,13 +3,14 @@ package overlay import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/ns" - "github.com/sirupsen/logrus" ) type endpointTable map[string]*endpoint @@ -112,11 +113,11 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { link, err := nlh.LinkByName(ep.ifName) if err != nil { - logrus.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + log.G(context.TODO()).Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) return nil } if err := nlh.LinkDel(link); err != nil { - logrus.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + log.G(context.TODO()).Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) } return nil diff --git a/libnetwork/drivers/overlay/ov_network.go b/libnetwork/drivers/overlay/ov_network.go index b83b31345a..4de485ddc9 100644 --- a/libnetwork/drivers/overlay/ov_network.go +++ b/libnetwork/drivers/overlay/ov_network.go @@ -3,6 +3,7 @@ package overlay import ( + "context" "errors" "fmt" "net" @@ -13,6 +14,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/ns" @@ -108,7 +110,7 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d if !ok { return errors.New("no VNI provided") } - logrus.Debugf("overlay: Received vxlan IDs: %s", vnisOpt) + log.G(context.TODO()).Debugf("overlay: Received vxlan IDs: %s", vnisOpt) vniStrings := strings.Split(vnisOpt, ",") for _, vniStr := range vniStrings { vni, err := strconv.Atoi(vniStr) @@ -206,7 +208,7 @@ func (d *driver) DeleteNetwork(nid string) error { if ep.ifName != "" { if link, err := ns.NlHandle().LinkByName(ep.ifName); err == nil { if err := ns.NlHandle().LinkDel(link); err != nil { - logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.ifName, ep.id) + log.G(context.TODO()).WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.ifName, ep.id) } } } @@ -218,14 +220,14 @@ func (d *driver) DeleteNetwork(nid string) error { if n.secure { for _, s := range n.subnets { if err := programMangle(s.vni, false); err != nil { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ logrus.ErrorKey: err, "network_id": n.id, "subnet": s.subnetIP, }).Warn("Failed to clean up iptables rules during overlay network deletion") } if err := programInput(s.vni, false); err != nil { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ logrus.ErrorKey: err, "network_id": n.id, "subnet": s.subnetIP, @@ -317,7 +319,7 @@ func (n *network) destroySandbox() { if n.sbox != nil { for _, iface := range n.sbox.Info().Interfaces() { if err := iface.Remove(); err != nil { - logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err) + log.G(context.TODO()).Debugf("Remove interface %s failed: %v", iface.SrcName(), err) } } @@ -325,7 +327,7 @@ func (n *network) destroySandbox() { if s.vxlanName != "" { err := deleteInterface(s.vxlanName) if err != nil { - logrus.Warnf("could not cleanup sandbox properly: %v", err) + log.G(context.TODO()).Warnf("could not cleanup sandbox properly: %v", err) } } } @@ -348,26 +350,26 @@ func populateVNITbl() { n, err := netns.GetFromPath(path) if err != nil { - logrus.Errorf("Could not open namespace path %s during vni population: %v", path, err) + log.G(context.TODO()).Errorf("Could not open namespace path %s during vni population: %v", path, err) return nil } defer n.Close() nlh, err := netlink.NewHandleAt(n, unix.NETLINK_ROUTE) if err != nil { - logrus.Errorf("Could not open netlink handle during vni population for ns %s: %v", path, err) + log.G(context.TODO()).Errorf("Could not open netlink handle during vni population for ns %s: %v", path, err) return nil } defer nlh.Close() err = nlh.SetSocketTimeout(soTimeout) if err != nil { - logrus.Warnf("Failed to set the timeout on the netlink handle sockets for vni table population: %v", err) + log.G(context.TODO()).Warnf("Failed to set the timeout on the netlink handle sockets for vni table population: %v", err) } links, err := nlh.LinkList() if err != nil { - logrus.Errorf("Failed to list interfaces during vni population for ns %s: %v", path, err) + log.G(context.TODO()).Errorf("Failed to list interfaces during vni population for ns %s: %v", path, err) return nil } @@ -416,7 +418,7 @@ func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error if ok { deleteVxlanByVNI(path, s.vni) if err := unix.Unmount(path, unix.MNT_FORCE); err != nil { - logrus.Errorf("unmount of %s failed: %v", path, err) + log.G(context.TODO()).Errorf("unmount of %s failed: %v", path, err) } os.Remove(path) @@ -446,7 +448,7 @@ func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error for _, iface := range sbox.Info().Interfaces() { if iface.SrcName() == brName { if ierr := iface.Remove(); ierr != nil { - logrus.Errorf("removing bridge failed from ov ns %v failed, %v", n.sbox.Key(), ierr) + log.G(context.TODO()).Errorf("removing bridge failed from ov ns %v failed, %v", n.sbox.Key(), ierr) } } } @@ -456,14 +458,14 @@ func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error // failure of vxlan device creation if the vni is assigned to some other // network. if deleteErr := deleteInterface(vxlanName); deleteErr != nil { - logrus.Warnf("could not delete vxlan interface, %s, error %v, after config error, %v", vxlanName, deleteErr, err) + log.G(context.TODO()).Warnf("could not delete vxlan interface, %s, error %v, after config error, %v", vxlanName, deleteErr, err) } return fmt.Errorf("vxlan interface creation failed for subnet %q: %v", s.subnetIP.String(), err) } if err := setDefaultVLAN(sbox); err != nil { // not a fatal error - logrus.WithError(err).Error("set bridge default vlan failed") + log.G(context.TODO()).WithError(err).Error("set bridge default vlan failed") } return nil } diff --git a/libnetwork/drivers/overlay/ov_utils.go b/libnetwork/drivers/overlay/ov_utils.go index 21467cfc67..80986bbc8a 100644 --- a/libnetwork/drivers/overlay/ov_utils.go +++ b/libnetwork/drivers/overlay/ov_utils.go @@ -3,13 +3,14 @@ package overlay import ( + "context" "fmt" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/drivers/overlay/overlayutils" "github.com/docker/docker/libnetwork/netutils" "github.com/docker/docker/libnetwork/ns" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" ) @@ -101,7 +102,7 @@ func deleteVxlanByVNI(path string, vni uint32) error { defer nlh.Close() err = nlh.SetSocketTimeout(soTimeout) if err != nil { - logrus.Warnf("Failed to set the timeout on the netlink handle sockets for vxlan deletion: %v", err) + log.G(context.TODO()).Warnf("Failed to set the timeout on the netlink handle sockets for vxlan deletion: %v", err) } } diff --git a/libnetwork/drivers/overlay/overlay.go b/libnetwork/drivers/overlay/overlay.go index 4422cef8c7..89fc26199c 100644 --- a/libnetwork/drivers/overlay/overlay.go +++ b/libnetwork/drivers/overlay/overlay.go @@ -5,13 +5,14 @@ package overlay //go:generate protoc -I=. -I=../../../vendor/ --gogofaster_out=import_path=github.com/docker/docker/libnetwork/drivers/overlay:. overlay.proto import ( + "context" "fmt" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" - "github.com/sirupsen/logrus" ) const ( @@ -107,7 +108,7 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) keys = append(keys, k) } if err := d.setKeys(keys); err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } case discoverapi.EncryptionKeysUpdate: var newKey, delKey, priKey *key diff --git a/libnetwork/drivers/overlay/ovmanager/ovmanager.go b/libnetwork/drivers/overlay/ovmanager/ovmanager.go index dc5c621847..ccd075a00f 100644 --- a/libnetwork/drivers/overlay/ovmanager/ovmanager.go +++ b/libnetwork/drivers/overlay/ovmanager/ovmanager.go @@ -1,19 +1,20 @@ package ovmanager import ( + "context" "fmt" "net" "strconv" "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/idm" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -91,7 +92,7 @@ func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, vxlanIDList := make([]uint32, 0, len(ipV4Data)) for key, val := range option { if key == netlabel.OverlayVxlanIDList { - logrus.Debugf("overlay network option: %s", val) + log.G(context.TODO()).Debugf("overlay network option: %s", val) valStrList := strings.Split(val, ",") for _, idStr := range valStrList { vni, err := strconv.Atoi(idStr) diff --git a/libnetwork/drivers/overlay/peerdb.go b/libnetwork/drivers/overlay/peerdb.go index b233680a76..f508593b48 100644 --- a/libnetwork/drivers/overlay/peerdb.go +++ b/libnetwork/drivers/overlay/peerdb.go @@ -3,14 +3,15 @@ package overlay import ( + "context" "fmt" "net" "sync" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/internal/setmatrix" "github.com/docker/docker/libnetwork/osl" - "github.com/sirupsen/logrus" ) const ovPeerTable = "overlay_peer_table" @@ -131,7 +132,7 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool var pKey peerKey pEntry := pEntry if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil { - logrus.Warnf("Peer key scan on network %s failed: %v", nid, err) + log.G(context.TODO()).Warnf("Peer key scan on network %s failed: %v", nid, err) } if f(&pKey, &pEntry) { return nil @@ -192,7 +193,7 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask if i != 1 { // Transient case, there is more than one endpoint that is using the same IP,MAC pair s, _ := pMap.mp.String(pKey.String()) - logrus.Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) + log.G(context.TODO()).Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) } return b, i } @@ -224,7 +225,7 @@ func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPM if i != 0 { // Transient case, there is more than one endpoint that is using the same IP,MAC pair s, _ := pMap.mp.String(pKey.String()) - logrus.Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) + log.G(context.TODO()).Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) } return b, i } @@ -244,7 +245,7 @@ func (d *driver) initSandboxPeerDB(nid string) { d.peerOpMu.Lock() defer d.peerOpMu.Unlock() if err := d.peerInitOp(nid); err != nil { - logrus.WithError(err).Warn("Peer init operation failed") + log.G(context.TODO()).WithError(err).Warn("Peer init operation failed") } } @@ -267,7 +268,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, defer d.peerOpMu.Unlock() err := d.peerAddOp(nid, eid, peerIP, peerIPMask, peerMac, vtep, l2Miss, l3Miss, true, localPeer) if err != nil { - logrus.WithError(err).Warn("Peer add operation failed") + log.G(context.TODO()).WithError(err).Warn("Peer add operation failed") } } @@ -281,7 +282,7 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask if updateDB { inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer) if !inserted { - logrus.Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", + log.G(context.TODO()).Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", nid, eid, peerIP, peerMac, localPeer, vtep) } } @@ -319,7 +320,7 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask } if err := d.checkEncryption(nid, vtep, false, true); err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } // Add neighbor entry for the peer IP @@ -348,7 +349,7 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas defer d.peerOpMu.Unlock() err := d.peerDeleteOp(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer) if err != nil { - logrus.WithError(err).Warn("Peer delete operation failed") + log.G(context.TODO()).WithError(err).Warn("Peer delete operation failed") } } @@ -359,7 +360,7 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer) if !deleted { - logrus.Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", + log.G(context.TODO()).Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", nid, eid, peerIP, peerMac, localPeer, vtep) } @@ -374,7 +375,7 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM } if err := d.checkEncryption(nid, vtep, localPeer, false); err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } // Local peers do not have any local configuration to delete @@ -404,7 +405,7 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM // Restore one configuration for the directly from the database, note that is guaranteed that there is one peerKey, peerEntry, err := d.peerDbSearch(nid, peerIP) if err != nil { - logrus.Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err) + log.G(context.TODO()).Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err) return err } return d.peerAddOp(nid, peerEntry.eid, peerIP, peerEntry.peerIPMask, peerKey.peerMac, peerEntry.vtep, false, false, false, peerEntry.isLocal) @@ -414,7 +415,7 @@ func (d *driver) peerFlush(nid string) { d.peerOpMu.Lock() defer d.peerOpMu.Unlock() if err := d.peerFlushOp(nid); err != nil { - logrus.WithError(err).Warn("Peer flush operation failed") + log.G(context.TODO()).WithError(err).Warn("Peer flush operation failed") } } diff --git a/libnetwork/drivers/remote/driver.go b/libnetwork/drivers/remote/driver.go index 99cdb0cf67..3a848e45d3 100644 --- a/libnetwork/drivers/remote/driver.go +++ b/libnetwork/drivers/remote/driver.go @@ -1,9 +1,11 @@ package remote import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" @@ -12,7 +14,6 @@ import ( "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type driver struct { @@ -44,11 +45,11 @@ func Register(r driverapi.Registerer, pg plugingetter.PluginGetter) error { d := newDriver(name, client) c, err := d.(*driver).getCapabilities() if err != nil { - logrus.Errorf("error getting capability for %s due to %v", name, err) + log.G(context.TODO()).Errorf("error getting capability for %s due to %v", name, err) return } if err = r.RegisterDriver(name, d, *c); err != nil { - logrus.Errorf("error registering driver for %s due to %v", name, err) + log.G(context.TODO()).Errorf("error registering driver for %s due to %v", name, err) } } diff --git a/libnetwork/drivers/windows/overlay/joinleave_windows.go b/libnetwork/drivers/windows/overlay/joinleave_windows.go index 44b132cc90..1eddd23e60 100644 --- a/libnetwork/drivers/windows/overlay/joinleave_windows.go +++ b/libnetwork/drivers/windows/overlay/joinleave_windows.go @@ -1,13 +1,14 @@ package overlay import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/types" "github.com/gogo/protobuf/proto" - "github.com/sirupsen/logrus" ) // Join method is invoked when a Sandbox is attached to an endpoint. @@ -37,7 +38,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, } if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil { - logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err) + log.G(context.TODO()).Errorf("overlay: Failed adding table entry to joininfo: %v", err) } if ep.disablegateway { @@ -49,7 +50,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { if tableName != ovPeerTable { - logrus.Errorf("Unexpected table notification for table %s received", tableName) + log.G(context.TODO()).Errorf("Unexpected table notification for table %s received", tableName) return } @@ -57,7 +58,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri var peer PeerRecord if err := proto.Unmarshal(value, &peer); err != nil { - logrus.Errorf("Failed to unmarshal peer record: %v", err) + log.G(context.TODO()).Errorf("Failed to unmarshal peer record: %v", err) return } @@ -74,19 +75,19 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri addr, err := types.ParseCIDR(peer.EndpointIP) if err != nil { - logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) + log.G(context.TODO()).Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) return } mac, err := net.ParseMAC(peer.EndpointMAC) if err != nil { - logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) + log.G(context.TODO()).Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) return } vtep := net.ParseIP(peer.TunnelEndpointIP) if vtep == nil { - logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) + log.G(context.TODO()).Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) return } @@ -97,7 +98,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri err = d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true) if err != nil { - logrus.Errorf("peerAdd failed (%v) for ip %s with mac %s", err, addr.IP.String(), mac.String()) + log.G(context.TODO()).Errorf("peerAdd failed (%v) for ip %s with mac %s", err, addr.IP.String(), mac.String()) } } diff --git a/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go b/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go index 6453c74156..c1c5be533e 100644 --- a/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go +++ b/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go @@ -1,6 +1,7 @@ package overlay import ( + "context" "encoding/json" "fmt" "net" @@ -8,11 +9,11 @@ import ( "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) type endpointTable map[string]*endpoint @@ -84,10 +85,10 @@ func (n *network) removeEndpointWithAddress(addr *net.IPNet) { n.Unlock() if networkEndpoint != nil { - logrus.Debugf("Removing stale endpoint from HNS") + log.G(context.TODO()).Debugf("Removing stale endpoint from HNS") _, err := endpointRequest("DELETE", networkEndpoint.profileID, "") if err != nil { - logrus.Debugf("Failed to delete stale overlay endpoint (%.7s) from hns", networkEndpoint.id) + log.G(context.TODO()).Debugf("Failed to delete stale overlay endpoint (%.7s) from hns", networkEndpoint.id) } } } @@ -106,7 +107,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, ep := n.endpoint(eid) if ep != nil { - logrus.Debugf("Deleting stale endpoint %s", eid) + log.G(context.TODO()).Debugf("Deleting stale endpoint %s", eid) n.deleteEndpoint(eid) _, err := endpointRequest("DELETE", ep.profileID, "") if err != nil { diff --git a/libnetwork/drivers/windows/overlay/ov_network_windows.go b/libnetwork/drivers/windows/overlay/ov_network_windows.go index 4dec6b56a3..fbea159115 100644 --- a/libnetwork/drivers/windows/overlay/ov_network_windows.go +++ b/libnetwork/drivers/windows/overlay/ov_network_windows.go @@ -1,6 +1,7 @@ package overlay import ( + "context" "encoding/json" "fmt" "net" @@ -9,11 +10,11 @@ import ( "sync" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/portmapper" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) var ( @@ -83,10 +84,10 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d existingNetwork := d.network(id) if existingNetwork != nil { - logrus.Debugf("Network preexists. Deleting %s", id) + log.G(context.TODO()).Debugf("Network preexists. Deleting %s", id) err := d.DeleteNetwork(id) if err != nil { - logrus.Errorf("Error deleting stale network %s", err.Error()) + log.G(context.TODO()).Errorf("Error deleting stale network %s", err.Error()) } } @@ -237,7 +238,7 @@ func (d *driver) network(nid string) *network { } // func (n *network) restoreNetworkEndpoints() error { -// logrus.Infof("Restoring endpoints for overlay network: %s", n.id) +// log.G(ctx).Infof("Restoring endpoints for overlay network: %s", n.id) // hnsresponse, err := hcsshim.HNSListEndpointRequest("GET", "", "") // if err != nil { @@ -252,7 +253,7 @@ func (d *driver) network(nid string) *network { // ep := n.convertToOverlayEndpoint(&endpoint) // if ep != nil { -// logrus.Debugf("Restored endpoint:%s Remote:%t", ep.id, ep.remote) +// log.G(ctx).Debugf("Restored endpoint:%s Remote:%t", ep.id, ep.remote) // n.addEndpoint(ep) // } // } @@ -323,7 +324,7 @@ func (d *driver) createHnsNetwork(n *network) error { } configuration := string(configurationb) - logrus.Infof("HNSNetwork Request =%v", configuration) + log.G(context.TODO()).Infof("HNSNetwork Request =%v", configuration) hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration) if err != nil { diff --git a/libnetwork/drivers/windows/overlay/overlay_windows.go b/libnetwork/drivers/windows/overlay/overlay_windows.go index e44d2ea9ff..39418a6341 100644 --- a/libnetwork/drivers/windows/overlay/overlay_windows.go +++ b/libnetwork/drivers/windows/overlay/overlay_windows.go @@ -3,16 +3,17 @@ package overlay //go:generate protoc -I=. -I=../../../../vendor/ --gogo_out=import_path=github.com/docker/docker/libnetwork/drivers/overlay:. overlay.proto import ( + "context" "encoding/json" "net" "sync" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -43,7 +44,7 @@ func Register(r driverapi.Registerer, config map[string]interface{}) error { } func (d *driver) restoreHNSNetworks() error { - logrus.Infof("Restoring existing overlay networks from HNS into docker") + log.G(context.TODO()).Infof("Restoring existing overlay networks from HNS into docker") hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { @@ -55,7 +56,7 @@ func (d *driver) restoreHNSNetworks() error { continue } - logrus.Infof("Restoring overlay network: %s", v.Name) + log.G(context.TODO()).Infof("Restoring overlay network: %s", v.Name) n := d.convertToOverlayNetwork(&v) d.addNetwork(n) @@ -96,7 +97,7 @@ func (d *driver) convertToOverlayNetwork(v *hcsshim.HNSNetwork) *network { _, subnetIP, err := net.ParseCIDR(hnsSubnet.AddressPrefix) if err != nil { - logrus.Errorf("Error parsing subnet address %s ", hnsSubnet.AddressPrefix) + log.G(context.TODO()).Errorf("Error parsing subnet address %s ", hnsSubnet.AddressPrefix) continue } diff --git a/libnetwork/drivers/windows/overlay/peerdb_windows.go b/libnetwork/drivers/windows/overlay/peerdb_windows.go index 983acf172f..52621bf2eb 100644 --- a/libnetwork/drivers/windows/overlay/peerdb_windows.go +++ b/libnetwork/drivers/windows/overlay/peerdb_windows.go @@ -1,13 +1,14 @@ package overlay import ( + "context" "fmt" "net" "encoding/json" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim" ) @@ -17,7 +18,7 @@ const ovPeerTable = "overlay_peer_table" func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error { - logrus.Debugf("WINOVERLAY: Enter peerAdd for ca ip %s with ca mac %s", peerIP.String(), peerMac.String()) + log.G(context.TODO()).Debugf("WINOVERLAY: Enter peerAdd for ca ip %s with ca mac %s", peerIP.String(), peerMac.String()) if err := validateID(nid, eid); err != nil { return err @@ -29,7 +30,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, } if updateDb { - logrus.Info("WINOVERLAY: peerAdd: notifying HNS of the REMOTE endpoint") + log.G(context.TODO()).Info("WINOVERLAY: peerAdd: notifying HNS of the REMOTE endpoint") hnsEndpoint := &hcsshim.HNSEndpoint{ Name: eid, @@ -90,7 +91,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error { - logrus.Infof("WINOVERLAY: Enter peerDelete for endpoint %s and peer ip %s", eid, peerIP.String()) + log.G(context.TODO()).Infof("WINOVERLAY: Enter peerDelete for endpoint %s and peer ip %s", eid, peerIP.String()) if err := validateID(nid, eid); err != nil { return err diff --git a/libnetwork/drivers/windows/port_mapping.go b/libnetwork/drivers/windows/port_mapping.go index e9037c88f6..e327f2c473 100644 --- a/libnetwork/drivers/windows/port_mapping.go +++ b/libnetwork/drivers/windows/port_mapping.go @@ -4,14 +4,15 @@ package windows import ( "bytes" + "context" "errors" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/portmapper" "github.com/docker/docker/libnetwork/types" "github.com/ishidawataru/sctp" - "github.com/sirupsen/logrus" ) const ( @@ -33,7 +34,7 @@ func AllocatePorts(portMapper *portmapper.PortMapper, bindings []types.PortBindi if err := allocatePort(portMapper, &b, containerIP); err != nil { // On allocation failure, release previously allocated ports. On cleanup error, just log a warning message if cuErr := ReleasePorts(portMapper, bs); cuErr != nil { - logrus.Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr) + log.G(context.TODO()).Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr) } return nil, err } @@ -75,10 +76,10 @@ func allocatePort(portMapper *portmapper.PortMapper, bnd *types.PortBinding, con } // There is no point in immediately retrying to map an explicitly chosen port. if bnd.HostPort != 0 { - logrus.Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err) + log.G(context.TODO()).Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err) break } - logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1) + log.G(context.TODO()).Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { return err diff --git a/libnetwork/drivers/windows/windows.go b/libnetwork/drivers/windows/windows.go index 707e788404..319f74c8c1 100644 --- a/libnetwork/drivers/windows/windows.go +++ b/libnetwork/drivers/windows/windows.go @@ -12,6 +12,7 @@ package windows import ( + "context" "encoding/json" "fmt" "net" @@ -20,13 +21,13 @@ import ( "sync" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/driverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/portmapper" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) // networkConfiguration for network specific configuration @@ -354,7 +355,7 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d } configuration := string(configurationb) - logrus.Debugf("HNSNetwork Request =%v Address Space=%v", configuration, subnets) + log.G(context.TODO()).Debugf("HNSNetwork Request =%v Address Space=%v", configuration, subnets) hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration) if err != nil { @@ -399,15 +400,15 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d if endpoints, err := hcsshim.HNSListEndpointRequest(); err == nil { for _, ep := range endpoints { if ep.VirtualNetwork == config.HnsID { - logrus.Infof("Removing stale HNS endpoint %s", ep.Id) + log.G(context.TODO()).Infof("Removing stale HNS endpoint %s", ep.Id) _, err = hcsshim.HNSEndpointRequest("DELETE", ep.Id, "") if err != nil { - logrus.Warnf("Error removing HNS endpoint %s", ep.Id) + log.G(context.TODO()).Warnf("Error removing HNS endpoint %s", ep.Id) } } } } else { - logrus.Warnf("Error listing HNS endpoints for network %s", config.HnsID) + log.G(context.TODO()).Warnf("Error listing HNS endpoints for network %s", config.HnsID) } n.created = true @@ -440,7 +441,7 @@ func (d *driver) DeleteNetwork(nid string) error { // delele endpoints belong to this network for _, ep := range n.endpoints { if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) } } @@ -675,13 +676,13 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, // overwrite the ep DisableDNS option if DisableGatewayDNS was set to true during the network creation option if n.config.DisableGatewayDNS { - logrus.Debugf("n.config.DisableGatewayDNS[%v] overwrites epOption.DisableDNS[%v]", n.config.DisableGatewayDNS, epOption.DisableDNS) + log.G(context.TODO()).Debugf("n.config.DisableGatewayDNS[%v] overwrites epOption.DisableDNS[%v]", n.config.DisableGatewayDNS, epOption.DisableDNS) epOption.DisableDNS = n.config.DisableGatewayDNS } if n.driver.name == "nat" && !epOption.DisableDNS { endpointStruct.EnableInternalDNS = true - logrus.Debugf("endpointStruct.EnableInternalDNS =[%v]", endpointStruct.EnableInternalDNS) + log.G(context.TODO()).Debugf("endpointStruct.EnableInternalDNS =[%v]", endpointStruct.EnableInternalDNS) } endpointStruct.DisableICC = epOption.DisableICC @@ -750,7 +751,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, } if err = d.storeUpdate(endpoint); err != nil { - logrus.Errorf("Failed to save endpoint %.7s to store: %v", endpoint.id, err) + log.G(context.TODO()).Errorf("Failed to save endpoint %.7s to store: %v", endpoint.id, err) } return nil @@ -781,7 +782,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { } if err := d.storeDelete(ep); err != nil { - logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) + log.G(context.TODO()).Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err) } return nil } diff --git a/libnetwork/drivers/windows/windows_store.go b/libnetwork/drivers/windows/windows_store.go index 988bf409fe..fb453b9005 100644 --- a/libnetwork/drivers/windows/windows_store.go +++ b/libnetwork/drivers/windows/windows_store.go @@ -3,15 +3,16 @@ package windows import ( + "context" "encoding/json" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -62,7 +63,7 @@ func (d *driver) populateNetworks() error { continue } d.createNetwork(ncfg) - logrus.Debugf("Network %v (%.7s) restored", d.name, ncfg.ID) + log.G(context.TODO()).Debugf("Network %v (%.7s) restored", d.name, ncfg.ID) } return nil @@ -85,15 +86,15 @@ func (d *driver) populateEndpoints() error { } n, ok := d.networks[ep.nid] if !ok { - logrus.Debugf("Network (%.7s) not found for restored endpoint (%.7s)", ep.nid, ep.id) - logrus.Debugf("Deleting stale endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Network (%.7s) not found for restored endpoint (%.7s)", ep.nid, ep.id) + log.G(context.TODO()).Debugf("Deleting stale endpoint (%.7s) from store", ep.id) if err := d.storeDelete(ep); err != nil { - logrus.Debugf("Failed to delete stale endpoint (%.7s) from store", ep.id) + log.G(context.TODO()).Debugf("Failed to delete stale endpoint (%.7s) from store", ep.id) } continue } n.endpoints[ep.id] = ep - logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) + log.G(context.TODO()).Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) } return nil @@ -101,7 +102,7 @@ func (d *driver) populateEndpoints() error { func (d *driver) storeUpdate(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Warnf("store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Warnf("store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) return nil } @@ -114,7 +115,7 @@ func (d *driver) storeUpdate(kvObject datastore.KVObject) error { func (d *driver) storeDelete(kvObject datastore.KVObject) error { if d.store == nil { - logrus.Debugf("store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + log.G(context.TODO()).Debugf("store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) return nil } @@ -259,7 +260,7 @@ func (ep *hnsEndpoint) UnmarshalJSON(b []byte) error { } if v, ok := epMap["Addr"]; ok { if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { - logrus.Warnf("failed to decode endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) + log.G(context.TODO()).Warnf("failed to decode endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) } } if v, ok := epMap["gateway"]; ok { @@ -271,15 +272,15 @@ func (ep *hnsEndpoint) UnmarshalJSON(b []byte) error { ep.profileID = epMap["profileID"].(string) d, _ := json.Marshal(epMap["epOption"]) if err := json.Unmarshal(d, &ep.epOption); err != nil { - logrus.Warnf("Failed to decode endpoint container config %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint container config %v", err) } d, _ = json.Marshal(epMap["epConnectivity"]) if err := json.Unmarshal(d, &ep.epConnectivity); err != nil { - logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint external connectivity configuration %v", err) } d, _ = json.Marshal(epMap["PortMapping"]) if err := json.Unmarshal(d, &ep.portMapping); err != nil { - logrus.Warnf("Failed to decode endpoint port mapping %v", err) + log.G(context.TODO()).Warnf("Failed to decode endpoint port mapping %v", err) } return nil diff --git a/libnetwork/endpoint.go b/libnetwork/endpoint.go index e0cab348e3..ee70f8657a 100644 --- a/libnetwork/endpoint.go +++ b/libnetwork/endpoint.go @@ -1,17 +1,18 @@ package libnetwork import ( + "context" "encoding/json" "fmt" "net" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) // EndpointOption is an option setter function type used to pass various options to Network @@ -118,12 +119,12 @@ func (ep *Endpoint) UnmarshalJSON(b []byte) (err error) { bytes, err := json.Marshal(tmp) if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) break } err = json.Unmarshal(bytes, &pb) if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) break } pblist = append(pblist, pb) @@ -140,12 +141,12 @@ func (ep *Endpoint) UnmarshalJSON(b []byte) (err error) { bytes, err := json.Marshal(tmp) if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) break } err = json.Unmarshal(bytes, &tp) if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) break } tplist = append(tplist, tp) @@ -450,7 +451,7 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) { defer func() { if err != nil { if e := d.Leave(nid, epid); e != nil { - logrus.Warnf("driver leave failed while rolling back join: %v", e) + log.G(context.TODO()).Warnf("driver leave failed while rolling back join: %v", e) } } }() @@ -502,7 +503,7 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) { defer func() { if err != nil { if e := ep.deleteDriverInfoFromCluster(); e != nil { - logrus.Errorf("Could not delete endpoint state for endpoint %s from cluster on join failure: %v", ep.Name(), e) + log.G(context.TODO()).Errorf("Could not delete endpoint state for endpoint %s from cluster on join failure: %v", ep.Name(), e) } } }() @@ -521,7 +522,7 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) { if moveExtConn { if extEp != nil { - logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) + log.G(context.TODO()).Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) extN, err := extEp.getNetworkFromStore() if err != nil { return fmt.Errorf("failed to get network from store for revoking external connectivity during join: %v", err) @@ -538,14 +539,14 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) { defer func() { if err != nil { if e := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); e != nil { - logrus.Warnf("Failed to roll-back external connectivity on endpoint %s (%s): %v", + log.G(context.TODO()).Warnf("Failed to roll-back external connectivity on endpoint %s (%s): %v", extEp.Name(), extEp.ID(), e) } } }() } if !n.internal { - logrus.Debugf("Programming external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) + log.G(context.TODO()).Debugf("Programming external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) if err = d.ProgramExternalConnectivity(n.ID(), ep.ID(), sb.Labels()); err != nil { return types.InternalErrorf( "driver failed programming external connectivity on endpoint %s (%s): %v", @@ -556,7 +557,7 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) { if !sb.needDefaultGW() { if e := sb.clearDefaultGW(); e != nil { - logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", + log.G(context.TODO()).Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", sb.ID(), sb.ContainerID(), e) } } @@ -580,7 +581,7 @@ func (ep *Endpoint) rename(name string) error { sb, ok := ep.getSandbox() if !ok { - logrus.Warnf("rename for %s aborted, sandbox %s is not anymore present", ep.ID(), ep.sandboxID) + log.G(context.TODO()).Warnf("rename for %s aborted, sandbox %s is not anymore present", ep.ID(), ep.sandboxID) return nil } @@ -610,12 +611,12 @@ func (ep *Endpoint) rename(name string) error { defer func() { if err != nil { if err2 := ep.deleteServiceInfoFromCluster(sb, true, "rename"); err2 != nil { - logrus.WithField("main error", err).WithError(err2).Debug("Error during cleanup due deleting service info from cluster while cleaning up due to other error") + log.G(context.TODO()).WithField("main error", err).WithError(err2).Debug("Error during cleanup due deleting service info from cluster while cleaning up due to other error") } ep.name = oldName ep.anonymous = oldAnonymous if err2 := ep.addServiceInfoToCluster(sb); err2 != nil { - logrus.WithField("main error", err).WithError(err2).Debug("Error during cleanup due adding service to from cluster while cleaning up due to other error") + log.G(context.TODO()).WithField("main error", err).WithError(err2).Debug("Error during cleanup due adding service to from cluster while cleaning up due to other error") } } }() @@ -706,26 +707,26 @@ func (ep *Endpoint) sbLeave(sb *Sandbox, force bool, options ...EndpointOption) if d != nil { if moveExtConn { - logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) + log.G(context.TODO()).Debugf("Revoking external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) if err := d.RevokeExternalConnectivity(n.id, ep.id); err != nil { - logrus.Warnf("driver failed revoking external connectivity on endpoint %s (%s): %v", + log.G(context.TODO()).Warnf("driver failed revoking external connectivity on endpoint %s (%s): %v", ep.Name(), ep.ID(), err) } } if err := d.Leave(n.id, ep.id); err != nil { if _, ok := err.(types.MaskableError); !ok { - logrus.Warnf("driver error disconnecting container %s : %v", ep.name, err) + log.G(context.TODO()).Warnf("driver error disconnecting container %s : %v", ep.name, err) } } } if err := ep.deleteServiceInfoFromCluster(sb, true, "sbLeave"); err != nil { - logrus.Warnf("Failed to clean up service info on container %s disconnect: %v", ep.name, err) + log.G(context.TODO()).Warnf("Failed to clean up service info on container %s disconnect: %v", ep.name, err) } if err := sb.clearNetworkResources(ep); err != nil { - logrus.Warnf("Failed to clean up network resources on container %s disconnect: %v", ep.name, err) + log.G(context.TODO()).Warnf("Failed to clean up network resources on container %s disconnect: %v", ep.name, err) } // Update the store about the sandbox detach only after we @@ -738,7 +739,7 @@ func (ep *Endpoint) sbLeave(sb *Sandbox, force bool, options ...EndpointOption) } if e := ep.deleteDriverInfoFromCluster(); e != nil { - logrus.Errorf("Failed to delete endpoint state for endpoint %s from cluster: %v", ep.Name(), e) + log.G(context.TODO()).Errorf("Failed to delete endpoint state for endpoint %s from cluster: %v", ep.Name(), e) } sb.deleteHostsEntries(n.getSvcRecords(ep)) @@ -749,7 +750,7 @@ func (ep *Endpoint) sbLeave(sb *Sandbox, force bool, options ...EndpointOption) // New endpoint providing external connectivity for the sandbox extEp = sb.getGatewayEndpoint() if moveExtConn && extEp != nil { - logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) + log.G(context.TODO()).Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) extN, err := extEp.getNetworkFromStore() if err != nil { return fmt.Errorf("failed to get network from store for programming external connectivity during leave: %v", err) @@ -759,14 +760,14 @@ func (ep *Endpoint) sbLeave(sb *Sandbox, force bool, options ...EndpointOption) return fmt.Errorf("failed to get driver for programming external connectivity during leave: %v", err) } if err := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); err != nil { - logrus.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v", + log.G(context.TODO()).Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v", extEp.Name(), extEp.ID(), err) } } if !sb.needDefaultGW() { if err := sb.clearDefaultGW(); err != nil { - logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", + log.G(context.TODO()).Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", sb.ID(), sb.ContainerID(), err) } } @@ -800,7 +801,7 @@ func (ep *Endpoint) Delete(force bool) error { if sb != nil { if e := ep.sbLeave(sb, force); e != nil { - logrus.Warnf("failed to leave sandbox for endpoint %s : %v", name, e) + log.G(context.TODO()).Warnf("failed to leave sandbox for endpoint %s : %v", name, e) } } @@ -812,7 +813,7 @@ func (ep *Endpoint) Delete(force bool) error { if err != nil && !force { ep.dbExists = false if e := n.getController().updateToStore(ep); e != nil { - logrus.Warnf("failed to recreate endpoint in store %s : %v", name, e) + log.G(context.TODO()).Warnf("failed to recreate endpoint in store %s : %v", name, e) } } }() @@ -827,7 +828,7 @@ func (ep *Endpoint) Delete(force bool) error { ep.releaseAddress() if err := n.getEpCnt().DecEndpointCnt(); err != nil { - logrus.Warnf("failed to decrement endpoint count for ep %s: %v", ep.ID(), err) + log.G(context.TODO()).Warnf("failed to decrement endpoint count for ep %s: %v", ep.ID(), err) } return nil @@ -855,7 +856,7 @@ func (ep *Endpoint) deleteEndpoint(force bool) error { } if _, ok := err.(types.MaskableError); !ok { - logrus.Warnf("driver error deleting endpoint %s : %v", name, err) + log.G(context.TODO()).Warnf("driver error deleting endpoint %s : %v", name, err) } } @@ -1023,7 +1024,7 @@ func JoinOptionPriority(prio int) EndpointOption { sb, ok := c.sandboxes[ep.sandboxID] c.mu.Unlock() if !ok { - logrus.Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id) + log.G(context.TODO()).Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id) return } sb.epPriority[ep.id] = prio @@ -1042,7 +1043,7 @@ func (ep *Endpoint) assignAddress(ipam ipamapi.Ipam, assignIPv4, assignIPv6 bool return nil } - logrus.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) + log.G(context.TODO()).Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) if assignIPv4 { if err = ep.assignAddressVersion(4, ipam); err != nil { @@ -1122,23 +1123,23 @@ func (ep *Endpoint) releaseAddress() { return } - logrus.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) + log.G(context.TODO()).Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) ipam, _, err := n.getController().getIPAMDriver(n.ipamType) if err != nil { - logrus.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err) return } if ep.iface.addr != nil { if err := ipam.ReleaseAddress(ep.iface.v4PoolID, ep.iface.addr.IP); err != nil { - logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err) } } if ep.iface.addrv6 != nil && ep.iface.addrv6.IP.IsGlobalUnicast() { if err := ipam.ReleaseAddress(ep.iface.v6PoolID, ep.iface.addrv6.IP); err != nil { - logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err) } } } @@ -1153,7 +1154,7 @@ func (c *Controller) cleanupLocalEndpoints() { } nl, err := c.getNetworks() if err != nil { - logrus.Warnf("Could not get list of networks during endpoint cleanup: %v", err) + log.G(context.TODO()).Warnf("Could not get list of networks during endpoint cleanup: %v", err) return } @@ -1163,7 +1164,7 @@ func (c *Controller) cleanupLocalEndpoints() { } epl, err := n.getEndpointsFromStore() if err != nil { - logrus.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err) + log.G(context.TODO()).Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err) continue } @@ -1171,23 +1172,23 @@ func (c *Controller) cleanupLocalEndpoints() { if _, ok := eps[ep.id]; ok { continue } - logrus.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id) + log.G(context.TODO()).Infof("Removing stale endpoint %s (%s)", ep.name, ep.id) if err := ep.Delete(true); err != nil { - logrus.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) + log.G(context.TODO()).Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) } } epl, err = n.getEndpointsFromStore() if err != nil { - logrus.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err) + log.G(context.TODO()).Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err) continue } epCnt := n.getEpCnt().EndpointCnt() if epCnt != uint64(len(epl)) { - logrus.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt) + log.G(context.TODO()).Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt) if err := n.getEpCnt().setCnt(uint64(len(epl))); err != nil { - logrus.WithField("network", n.name).WithError(err).Warn("Error while fixing inconsistent endpoint_cnt for network") + log.G(context.TODO()).WithField("network", n.name).WithError(err).Warn("Error while fixing inconsistent endpoint_cnt for network") } } } diff --git a/libnetwork/firewall_linux.go b/libnetwork/firewall_linux.go index 4eef752d19..29f6134ba6 100644 --- a/libnetwork/firewall_linux.go +++ b/libnetwork/firewall_linux.go @@ -1,8 +1,10 @@ package libnetwork import ( + "context" + + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/iptables" - "github.com/sirupsen/logrus" ) const userChain = "DOCKER-USER" @@ -43,18 +45,18 @@ func arrangeUserFilterRule() { iptable := iptables.GetIptable(ipVer) _, err := iptable.NewChain(userChain, iptables.Filter, false) if err != nil { - logrus.WithError(err).Warnf("Failed to create %s %v chain", userChain, ipVer) + log.G(context.TODO()).WithError(err).Warnf("Failed to create %s %v chain", userChain, ipVer) return } if err = iptable.AddReturnRule(userChain); err != nil { - logrus.WithError(err).Warnf("Failed to add the RETURN rule for %s %v", userChain, ipVer) + log.G(context.TODO()).WithError(err).Warnf("Failed to add the RETURN rule for %s %v", userChain, ipVer) return } err = iptable.EnsureJumpRule("FORWARD", userChain) if err != nil { - logrus.WithError(err).Warnf("Failed to ensure the jump rule for %s %v", userChain, ipVer) + log.G(context.TODO()).WithError(err).Warnf("Failed to ensure the jump rule for %s %v", userChain, ipVer) } } } diff --git a/libnetwork/ipam/allocator.go b/libnetwork/ipam/allocator.go index 46e5c52225..8c2b1a5468 100644 --- a/libnetwork/ipam/allocator.go +++ b/libnetwork/ipam/allocator.go @@ -1,16 +1,17 @@ package ipam import ( + "context" "fmt" "net" "net/netip" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/bitmap" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/ipbits" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -67,7 +68,7 @@ func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) { // If subPool is not empty, it must be a valid IP address and length in CIDR notation which is a sub-range of pool. // subPool must be empty if pool is empty. func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { - logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) + log.G(context.TODO()).Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) parseErr := func(err error) (string, *net.IPNet, map[string]string, error) { return "", nil, nil, types.InternalErrorf("failed to parse pool request for address space %q pool %q subpool %q: %v", addressSpace, pool, subPool, err) @@ -116,7 +117,7 @@ func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[ // ReleasePool releases the address pool identified by the passed id func (a *Allocator) ReleasePool(poolID string) error { - logrus.Debugf("ReleasePool(%s)", poolID) + log.G(context.TODO()).Debugf("ReleasePool(%s)", poolID) k := PoolID{} if err := k.FromString(poolID); err != nil { return types.BadRequestErrorf("invalid pool id: %s", poolID) @@ -225,7 +226,7 @@ func (aSpace *addrSpace) allocatePredefinedPool(ipV6 bool) (netip.Prefix, error) // RequestAddress returns an address from the specified pool ID func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { - logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) + log.G(context.TODO()).Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) k := PoolID{} if err := k.FromString(poolID); err != nil { return nil, nil, types.BadRequestErrorf("invalid pool id: %s", poolID) @@ -285,7 +286,7 @@ func (aSpace *addrSpace) requestAddress(nw, sub netip.Prefix, prefAddress netip. // ReleaseAddress releases the address from the specified pool ID func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error { - logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address) + log.G(context.TODO()).Debugf("ReleaseAddress(%s, %v)", poolID, address) k := PoolID{} if err := k.FromString(poolID); err != nil { return types.BadRequestErrorf("invalid pool id: %s", poolID) @@ -326,7 +327,7 @@ func (aSpace *addrSpace) releaseAddress(nw, sub netip.Prefix, address netip.Addr return ipamapi.ErrIPOutOfRange } - defer logrus.Debugf("Released address Address:%v Sequence:%s", address, p.addrs) + defer log.G(context.TODO()).Debugf("Released address Address:%v Sequence:%s", address, p.addrs) return p.addrs.Unset(hostID(address, uint(nw.Bits()))) } @@ -337,7 +338,7 @@ func getAddress(base netip.Prefix, bitmask *bitmap.Bitmap, prefAddress netip.Add err error ) - logrus.Debugf("Request address PoolID:%v %s Serial:%v PrefAddress:%v ", base, bitmask, serial, prefAddress) + log.G(context.TODO()).Debugf("Request address PoolID:%v %s Serial:%v PrefAddress:%v ", base, bitmask, serial, prefAddress) if bitmask.Unselected() == 0 { return netip.Addr{}, ipamapi.ErrNoAvailableIPs diff --git a/libnetwork/ipams/remote/remote.go b/libnetwork/ipams/remote/remote.go index 9ce06c55c3..c3219ab746 100644 --- a/libnetwork/ipams/remote/remote.go +++ b/libnetwork/ipams/remote/remote.go @@ -1,9 +1,11 @@ package remote import ( + "context" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/discoverapi" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/ipams/remote/api" @@ -11,7 +13,6 @@ import ( "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type allocator struct { @@ -43,13 +44,13 @@ func Register(cb ipamapi.Registerer, pg plugingetter.PluginGetter) error { a := newAllocator(name, client) if cps, err := a.(*allocator).getCapabilities(); err == nil { if err := cb.RegisterIpamDriverWithCapabilities(name, a, cps); err != nil { - logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) + log.G(context.TODO()).Errorf("error registering remote ipam driver %s due to %v", name, err) } } else { - logrus.Infof("remote ipam driver %s does not support capabilities", name) - logrus.Debug(err) + log.G(context.TODO()).Infof("remote ipam driver %s does not support capabilities", name) + log.G(context.TODO()).Debug(err) if err := cb.RegisterIpamDriver(name, a); err != nil { - logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) + log.G(context.TODO()).Errorf("error registering remote ipam driver %s due to %v", name, err) } } } diff --git a/libnetwork/ipams/windowsipam/windowsipam.go b/libnetwork/ipams/windowsipam/windowsipam.go index 9eaeb63521..323649bf65 100644 --- a/libnetwork/ipams/windowsipam/windowsipam.go +++ b/libnetwork/ipams/windowsipam/windowsipam.go @@ -1,11 +1,12 @@ package windowsipam import ( + "context" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -35,7 +36,7 @@ func (a *allocator) GetDefaultAddressSpaces() (string, string, error) { // RequestPool returns an address pool along with its unique id. This is a null ipam driver. It allocates the // subnet user asked and does not validate anything. Doesn't support subpool allocation func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { - logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) + log.G(context.TODO()).Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) if subPool != "" || v6 { return "", nil, nil, types.InternalErrorf("This request is not supported by null ipam driver") } @@ -57,14 +58,14 @@ func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[ // ReleasePool releases the address pool - always succeeds func (a *allocator) ReleasePool(poolID string) error { - logrus.Debugf("ReleasePool(%s)", poolID) + log.G(context.TODO()).Debugf("ReleasePool(%s)", poolID) return nil } // RequestAddress returns an address from the specified pool ID. // Always allocate the 0.0.0.0/32 ip if no preferred address was specified func (a *allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { - logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) + log.G(context.TODO()).Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) _, ipNet, err := net.ParseCIDR(poolID) if err != nil { @@ -80,7 +81,7 @@ func (a *allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s // ReleaseAddress releases the address - always succeeds func (a *allocator) ReleaseAddress(poolID string, address net.IP) error { - logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address) + log.G(context.TODO()).Debugf("ReleaseAddress(%s, %v)", poolID, address) return nil } diff --git a/libnetwork/iptables/conntrack.go b/libnetwork/iptables/conntrack.go index 211b0859c7..8737afcfa2 100644 --- a/libnetwork/iptables/conntrack.go +++ b/libnetwork/iptables/conntrack.go @@ -4,12 +4,13 @@ package iptables import ( + "context" "errors" "net" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -34,7 +35,7 @@ func DeleteConntrackEntries(nlh *netlink.Handle, ipv4List []net.IP, ipv6List []n for _, ipAddress := range ipv4List { flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET, ipAddress) if err != nil { - logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) continue } totalIPv4FlowPurged += flowPurged @@ -44,13 +45,13 @@ func DeleteConntrackEntries(nlh *netlink.Handle, ipv4List []net.IP, ipv6List []n for _, ipAddress := range ipv6List { flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET6, ipAddress) if err != nil { - logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) continue } totalIPv6FlowPurged += flowPurged } - logrus.Debugf("DeleteConntrackEntries purged ipv4:%d, ipv6:%d", totalIPv4FlowPurged, totalIPv6FlowPurged) + log.G(context.TODO()).Debugf("DeleteConntrackEntries purged ipv4:%d, ipv6:%d", totalIPv4FlowPurged, totalIPv6FlowPurged) return totalIPv4FlowPurged, totalIPv6FlowPurged, nil } @@ -65,28 +66,28 @@ func DeleteConntrackEntriesByPort(nlh *netlink.Handle, proto types.Protocol, por for _, port := range ports { filter := &netlink.ConntrackFilter{} if err := filter.AddProtocol(uint8(proto)); err != nil { - logrus.Warnf("Failed to delete conntrack state for %s port %d: %v", proto.String(), port, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for %s port %d: %v", proto.String(), port, err) continue } if err := filter.AddPort(netlink.ConntrackOrigDstPort, port); err != nil { - logrus.Warnf("Failed to delete conntrack state for %s port %d: %v", proto.String(), port, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for %s port %d: %v", proto.String(), port, err) continue } v4FlowPurged, err := nlh.ConntrackDeleteFilter(netlink.ConntrackTable, syscall.AF_INET, filter) if err != nil { - logrus.Warnf("Failed to delete conntrack state for IPv4 %s port %d: %v", proto.String(), port, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for IPv4 %s port %d: %v", proto.String(), port, err) } totalIPv4FlowPurged += v4FlowPurged v6FlowPurged, err := nlh.ConntrackDeleteFilter(netlink.ConntrackTable, syscall.AF_INET6, filter) if err != nil { - logrus.Warnf("Failed to delete conntrack state for IPv6 %s port %d: %v", proto.String(), port, err) + log.G(context.TODO()).Warnf("Failed to delete conntrack state for IPv6 %s port %d: %v", proto.String(), port, err) } totalIPv6FlowPurged += v6FlowPurged } - logrus.Debugf("DeleteConntrackEntriesByPort for %s ports purged ipv4:%d, ipv6:%d", proto.String(), totalIPv4FlowPurged, totalIPv6FlowPurged) + log.G(context.TODO()).Debugf("DeleteConntrackEntriesByPort for %s ports purged ipv4:%d, ipv6:%d", proto.String(), totalIPv4FlowPurged, totalIPv6FlowPurged) return nil } diff --git a/libnetwork/iptables/firewalld.go b/libnetwork/iptables/firewalld.go index 8674d78b30..2a7ddd947c 100644 --- a/libnetwork/iptables/firewalld.go +++ b/libnetwork/iptables/firewalld.go @@ -4,11 +4,12 @@ package iptables import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" dbus "github.com/godbus/dbus/v5" - "github.com/sirupsen/logrus" ) // IPV defines the table string @@ -191,7 +192,7 @@ func checkRunning() bool { // Passthrough method simply passes args through to iptables/ip6tables func Passthrough(ipv IPV, args ...string) ([]byte, error) { var output string - logrus.Debugf("Firewalld passthrough: %s, %s", ipv, args) + log.G(context.TODO()).Debugf("Firewalld passthrough: %s, %s", ipv, args) if err := connection.sysObj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output); err != nil { return nil, err } @@ -235,10 +236,10 @@ func setupDockerZone() error { return err } if contains(zones, dockerZone) { - logrus.Infof("Firewalld: %s zone already exists, returning", dockerZone) + log.G(context.TODO()).Infof("Firewalld: %s zone already exists, returning", dockerZone) return nil } - logrus.Debugf("Firewalld: creating %s zone", dockerZone) + log.G(context.TODO()).Debugf("Firewalld: creating %s zone", dockerZone) settings := getDockerZoneSettings() // Permanent @@ -262,11 +263,11 @@ func AddInterfaceFirewalld(intf string) error { } // Return if interface is already part of the zone if contains(intfs, intf) { - logrus.Infof("Firewalld: interface %s already part of %s zone, returning", intf, dockerZone) + log.G(context.TODO()).Infof("Firewalld: interface %s already part of %s zone, returning", intf, dockerZone) return nil } - logrus.Debugf("Firewalld: adding %s interface to %s zone", intf, dockerZone) + log.G(context.TODO()).Debugf("Firewalld: adding %s interface to %s zone", intf, dockerZone) // Runtime if err := connection.sysObj.Call(dbusInterface+".zone.addInterface", 0, dockerZone, intf).Err; err != nil { return err @@ -286,7 +287,7 @@ func DelInterfaceFirewalld(intf string) error { return fmt.Errorf("Firewalld: unable to find interface %s in %s zone", intf, dockerZone) } - logrus.Debugf("Firewalld: removing %s interface from %s zone", intf, dockerZone) + log.G(context.TODO()).Debugf("Firewalld: removing %s interface from %s zone", intf, dockerZone) // Runtime if err := connection.sysObj.Call(dbusInterface+".zone.removeInterface", 0, dockerZone, intf).Err; err != nil { return err diff --git a/libnetwork/iptables/iptables.go b/libnetwork/iptables/iptables.go index 86e0c364b2..baca163278 100644 --- a/libnetwork/iptables/iptables.go +++ b/libnetwork/iptables/iptables.go @@ -4,6 +4,7 @@ package iptables import ( + "context" "errors" "fmt" "net" @@ -13,8 +14,8 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/rootless" - "github.com/sirupsen/logrus" ) // Action signifies the iptable action. @@ -90,7 +91,7 @@ func (e ChainError) Error() string { func detectIptables() { path, err := exec.LookPath("iptables") if err != nil { - logrus.WithError(err).Warnf("failed to find iptables") + log.G(context.TODO()).WithError(err).Warnf("failed to find iptables") return } iptablesPath = path @@ -98,14 +99,14 @@ func detectIptables() { // The --wait flag was added in iptables v1.6.0. // TODO remove this check once we drop support for CentOS/RHEL 7, which uses an older version of iptables if out, err := exec.Command(path, "--wait", "-L", "-n").CombinedOutput(); err != nil { - logrus.WithError(err).Infof("unable to detect if iptables supports xlock: 'iptables --wait -L -n': `%s`", strings.TrimSpace(string(out))) + log.G(context.TODO()).WithError(err).Infof("unable to detect if iptables supports xlock: 'iptables --wait -L -n': `%s`", strings.TrimSpace(string(out))) } else { supportsXlock = true } path, err = exec.LookPath("ip6tables") if err != nil { - logrus.WithError(err).Warnf("unable to find ip6tables") + log.G(context.TODO()).WithError(err).Warnf("unable to find ip6tables") } else { ip6tablesPath = path } @@ -115,11 +116,11 @@ func initFirewalld() { // When running with RootlessKit, firewalld is running as the root outside our network namespace // https://github.com/moby/moby/issues/43781 if rootless.RunningWithRootlessKit() { - logrus.Info("skipping firewalld management for rootless mode") + log.G(context.TODO()).Info("skipping firewalld management for rootless mode") return } if err := FirewalldInit(); err != nil { - logrus.WithError(err).Debugf("unable to initialize firewalld; using raw iptables instead") + log.G(context.TODO()).WithError(err).Debugf("unable to initialize firewalld; using raw iptables instead") } } @@ -474,7 +475,7 @@ func filterOutput(start time.Time, output []byte, args ...string) []byte { // Flag operations that have taken a long time to complete opTime := time.Since(start) if opTime > opWarnTime { - logrus.Warnf("xtables contention detected while running [%s]: Waited for %.2f seconds and received %q", strings.Join(args, " "), float64(opTime)/float64(time.Second), string(output)) + log.G(context.TODO()).Warnf("xtables contention detected while running [%s]: Waited for %.2f seconds and received %q", strings.Join(args, " "), float64(opTime)/float64(time.Second), string(output)) } // ignore iptables' message about xtables lock: // it is a warning, not an error. @@ -524,7 +525,7 @@ func (iptable IPTable) raw(args ...string) ([]byte, error) { commandName = "ip6tables" } - logrus.Debugf("%s, %v", path, args) + log.G(context.TODO()).Debugf("%s, %v", path, args) startTime := time.Now() output, err := exec.Command(path, args...).CombinedOutput() diff --git a/libnetwork/libnetwork_linux_test.go b/libnetwork/libnetwork_linux_test.go index 6cc332466a..ab5c0498fb 100644 --- a/libnetwork/libnetwork_linux_test.go +++ b/libnetwork/libnetwork_linux_test.go @@ -2,6 +2,7 @@ package libnetwork_test import ( "bytes" + "context" "encoding/json" "fmt" "net" @@ -11,6 +12,7 @@ import ( "sync" "testing" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/netlabel" @@ -20,7 +22,6 @@ import ( "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/reexec" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" "golang.org/x/sync/errgroup" @@ -491,7 +492,7 @@ func externalKeyTest(t *testing.T, reexec bool) { } else { defer func() { if err := extOsBox.Destroy(); err != nil { - logrus.Warnf("Failed to remove os sandbox: %v", err) + log.G(context.TODO()).Warnf("Failed to remove os sandbox: %v", err) } }() } @@ -1051,7 +1052,7 @@ func isV6Listenable() bool { // When the kernel was booted with `ipv6.disable=1`, // we get err "listen tcp6 [::1]:0: socket: address family not supported by protocol" // https://github.com/moby/moby/issues/42288 - logrus.Debugf("port_mapping: v6Listenable=false (%v)", err) + log.G(context.TODO()).Debugf("port_mapping: v6Listenable=false (%v)", err) } else { v6ListenableCached = true ln.Close() diff --git a/libnetwork/network.go b/libnetwork/network.go index b32984419e..596143c672 100644 --- a/libnetwork/network.go +++ b/libnetwork/network.go @@ -1,6 +1,7 @@ package libnetwork import ( + "context" "encoding/json" "fmt" "net" @@ -9,6 +10,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/driverapi" @@ -21,7 +23,6 @@ import ( "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) // A Network represents a logical connectivity zone that containers may @@ -622,7 +623,7 @@ func (n *network) UnmarshalJSON(b []byte) (err error) { if v, ok := netMap["created"]; ok { // n.created is time.Time but marshalled as string if err = n.created.UnmarshalText([]byte(v.(string))); err != nil { - logrus.Warnf("failed to unmarshal creation time %v: %v", v, err) + log.G(context.TODO()).Warnf("failed to unmarshal creation time %v: %v", v, err) n.created = time.Time{} } } @@ -1025,7 +1026,7 @@ func (n *network) delete(force bool, rmLBEndpoint bool) error { return err } // continue deletion when force is true even on error - logrus.Warnf("Error deleting load balancer sandbox: %v", err) + log.G(context.TODO()).Warnf("Error deleting load balancer sandbox: %v", err) } // Reload the network from the store to update the epcnt. n, err = c.getNetworkFromStore(id) @@ -1048,11 +1049,11 @@ func (n *network) delete(force bool, rmLBEndpoint bool) error { if n.ConfigFrom() != "" { if t, err := c.getConfigNetwork(n.ConfigFrom()); err == nil { if err := t.getEpCnt().DecEndpointCnt(); err != nil { - logrus.Warnf("Failed to update reference count for configuration network %q on removal of network %q: %v", + log.G(context.TODO()).Warnf("Failed to update reference count for configuration network %q on removal of network %q: %v", t.Name(), n.Name(), err) } } else { - logrus.Warnf("Could not find configuration network %q during removal of network %q", n.configFrom, n.Name()) + log.G(context.TODO()).Warnf("Could not find configuration network %q during removal of network %q", n.configFrom, n.Name()) } } @@ -1070,7 +1071,7 @@ func (n *network) delete(force bool, rmLBEndpoint bool) error { // bindings cleanup requires the network in the store. n.cancelDriverWatches() if err = n.leaveCluster(); err != nil { - logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err) + log.G(context.TODO()).Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err) } // Cleanup the service discovery for this network @@ -1088,7 +1089,7 @@ func (n *network) delete(force bool, rmLBEndpoint bool) error { if !force { return err } - logrus.Debugf("driver failed to delete stale network %s (%s): %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Debugf("driver failed to delete stale network %s (%s): %v", n.Name(), n.ID(), err) } removeFromStore: @@ -1099,7 +1100,7 @@ removeFromStore: if !force { return fmt.Errorf("error deleting network endpoint count from store: %v", err) } - logrus.Debugf("Error deleting endpoint count from store for stale network %s (%s) for deletion: %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Debugf("Error deleting endpoint count from store for stale network %s (%s) for deletion: %v", n.Name(), n.ID(), err) } if err = c.deleteFromStore(n); err != nil { @@ -1122,7 +1123,7 @@ func (n *network) deleteNetwork() error { } if _, ok := err.(types.MaskableError); !ok { - logrus.Warnf("driver error deleting network %s : %v", n.name, err) + log.G(context.TODO()).Warnf("driver error deleting network %s : %v", n.name, err) } } @@ -1178,7 +1179,7 @@ func (n *network) createEndpoint(name string, options ...EndpointOption) (*Endpo ep.network = n ep.network, err = ep.getNetworkFromStore() if err != nil { - logrus.Errorf("failed to get network during CreateEndpoint: %v", err) + log.G(context.TODO()).Errorf("failed to get network during CreateEndpoint: %v", err) return nil, err } n = ep.network @@ -1227,7 +1228,7 @@ func (n *network) createEndpoint(name string, options ...EndpointOption) (*Endpo defer func() { if err != nil { if e := ep.deleteEndpoint(false); e != nil { - logrus.Warnf("cleaning up endpoint failed %s : %v", name, e) + log.G(context.TODO()).Warnf("cleaning up endpoint failed %s : %v", name, e) } } }() @@ -1240,7 +1241,7 @@ func (n *network) createEndpoint(name string, options ...EndpointOption) (*Endpo defer func() { if err != nil { if e := n.getController().deleteFromStore(ep); e != nil { - logrus.Warnf("error rolling back endpoint %s from store: %v", name, e) + log.G(context.TODO()).Warnf("error rolling back endpoint %s from store: %v", name, e) } } }() @@ -1268,7 +1269,7 @@ func (n *network) createEndpoint(name string, options ...EndpointOption) (*Endpo func (n *network) Endpoints() []*Endpoint { endpoints, err := n.getEndpointsFromStore() if err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) } return endpoints } @@ -1400,7 +1401,7 @@ func (n *network) addSvcRecords(eID, name, serviceID string, epIP, epIPv6 net.IP return } networkID := n.ID() - logrus.Debugf("%s (%.7s).addSvcRecords(%s, %s, %s, %t) %s sid:%s", eID, networkID, name, epIP, epIPv6, ipMapUpdate, method, serviceID) + log.G(context.TODO()).Debugf("%s (%.7s).addSvcRecords(%s, %s, %s, %t) %s sid:%s", eID, networkID, name, epIP, epIPv6, ipMapUpdate, method, serviceID) c := n.getController() c.mu.Lock() @@ -1432,7 +1433,7 @@ func (n *network) deleteSvcRecords(eID, name, serviceID string, epIP net.IP, epI return } networkID := n.ID() - logrus.Debugf("%s (%.7s).deleteSvcRecords(%s, %s, %s, %t) %s sid:%s ", eID, networkID, name, epIP, epIPv6, ipMapUpdate, method, serviceID) + log.G(context.TODO()).Debugf("%s (%.7s).deleteSvcRecords(%s, %s, %s, %t) %s sid:%s ", eID, networkID, name, epIP, epIPv6, ipMapUpdate, method, serviceID) c := n.getController() c.mu.Lock() @@ -1490,7 +1491,7 @@ func (n *network) getSvcRecords(ep *Endpoint) []etchosts.Record { continue } if len(mapEntryList) == 0 { - logrus.Warnf("Found empty list of IP addresses for service %s on network %s (%s)", k, n.name, n.id) + log.G(context.TODO()).Warnf("Found empty list of IP addresses for service %s on network %s (%s)", k, n.name, n.id) continue } @@ -1571,7 +1572,7 @@ func (n *network) requestPoolHelper(ipam ipamapi.Ipam, addressSpace, preferredPo // pools. defer func() { if err := ipam.ReleasePool(poolID); err != nil { - logrus.Warnf("Failed to release overlapping pool %s while returning from pool request helper for network %s", pool, n.Name()) + log.G(context.TODO()).Warnf("Failed to release overlapping pool %s while returning from pool request helper for network %s", pool, n.Name()) } }() @@ -1609,7 +1610,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { *infoList = make([]*IpamInfo, len(*cfgList)) - logrus.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID()) + log.G(context.TODO()).Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID()) for i, cfg := range *cfgList { if err = cfg.Validate(); err != nil { @@ -1627,7 +1628,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { defer func() { if err != nil { if err := ipam.ReleasePool(d.PoolID); err != nil { - logrus.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID()) + log.G(context.TODO()).Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID()) } } }() @@ -1679,7 +1680,7 @@ func (n *network) ipamRelease() { } ipam, _, err := n.getController().getIPAMDriver(n.ipamType) if err != nil { - logrus.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err) return } n.ipamReleaseVersion(4, ipam) @@ -1695,7 +1696,7 @@ func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) { case 6: infoList = &n.ipamV6Info default: - logrus.Warnf("incorrect ip version passed to ipam release: %d", ipVer) + log.G(context.TODO()).Warnf("incorrect ip version passed to ipam release: %d", ipVer) return } @@ -1703,25 +1704,25 @@ func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) { return } - logrus.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID()) + log.G(context.TODO()).Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID()) for _, d := range *infoList { if d.Gateway != nil { if err := ipam.ReleaseAddress(d.PoolID, d.Gateway.IP); err != nil { - logrus.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err) } } if d.IPAMData.AuxAddresses != nil { for k, nw := range d.IPAMData.AuxAddresses { if d.Pool.Contains(nw.IP) { if err := ipam.ReleaseAddress(d.PoolID, nw.IP); err != nil && err != ipamapi.ErrIPOutOfRange { - logrus.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err) } } } } if err := ipam.ReleasePool(d.PoolID); err != nil { - logrus.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err) + log.G(context.TODO()).Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err) } } @@ -1823,7 +1824,7 @@ func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamC for i, c := range n.ipamV4Config { cc := &IpamConf{} if err := c.CopyTo(cc); err != nil { - logrus.WithError(err).Error("Error copying ipam ipv4 config") + log.G(context.TODO()).WithError(err).Error("Error copying ipam ipv4 config") } v4L[i] = cc } @@ -1831,7 +1832,7 @@ func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamC for i, c := range n.ipamV6Config { cc := &IpamConf{} if err := c.CopyTo(cc); err != nil { - logrus.WithError(err).Debug("Error copying ipam ipv6 config") + log.G(context.TODO()).WithError(err).Debug("Error copying ipam ipv6 config") } v6L[i] = cc } @@ -1849,7 +1850,7 @@ func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { for i, info := range n.ipamV4Info { ic := &IpamInfo{} if err := info.CopyTo(ic); err != nil { - logrus.WithError(err).Error("Error copying ipv4 ipam config") + log.G(context.TODO()).WithError(err).Error("Error copying ipv4 ipam config") } v4Info[i] = ic } @@ -1857,7 +1858,7 @@ func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { for i, info := range n.ipamV6Info { ic := &IpamInfo{} if err := info.CopyTo(ic); err != nil { - logrus.WithError(err).Error("Error copying ipv6 ipam config") + log.G(context.TODO()).WithError(err).Error("Error copying ipv6 ipam config") } v6Info[i] = ic } @@ -2065,7 +2066,7 @@ func (n *network) ResolveService(name string) ([]*net.SRV, []net.IP) { srv := []*net.SRV{} ip := []net.IP{} - logrus.Debugf("Service name To resolve: %v", name) + log.G(context.TODO()).Debugf("Service name To resolve: %v", name) // There are DNS implementations that allow SRV queries for names not in // the format defined by RFC 2782. Hence specific validations checks are @@ -2169,7 +2170,7 @@ func (n *network) createLoadBalancerSandbox() (retErr error) { defer func() { if retErr != nil { if e := n.ctrlr.SandboxDestroy(sandboxName); e != nil { - logrus.Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, retErr, e) + log.G(context.TODO()).Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, retErr, e) } } }() @@ -2190,7 +2191,7 @@ func (n *network) createLoadBalancerSandbox() (retErr error) { defer func() { if retErr != nil { if e := ep.Delete(true); e != nil { - logrus.Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, retErr, e) + log.G(context.TODO()).Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, retErr, e) } } }() @@ -2213,21 +2214,21 @@ func (n *network) deleteLoadBalancerSandbox() error { endpoint, err := n.EndpointByName(endpointName) if err != nil { - logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err) + log.G(context.TODO()).Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err) } else { info := endpoint.Info() if info != nil { sb := info.Sandbox() if sb != nil { if err := sb.DisableService(); err != nil { - logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err) + log.G(context.TODO()).Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err) // Ignore error and attempt to delete the load balancer endpoint } } } if err := endpoint.Delete(true); err != nil { - logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err) + log.G(context.TODO()).Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err) // Ignore error and attempt to delete the sandbox. } } diff --git a/libnetwork/network_windows.go b/libnetwork/network_windows.go index 09aaa7ce3c..7cba9be2d1 100644 --- a/libnetwork/network_windows.go +++ b/libnetwork/network_windows.go @@ -3,21 +3,22 @@ package libnetwork import ( + "context" "runtime" "time" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/ipamapi" "github.com/docker/docker/libnetwork/ipams/windowsipam" - "github.com/sirupsen/logrus" ) func executeInCompartment(compartmentID uint32, x func()) { runtime.LockOSThread() if err := hcsshim.SetCurrentThreadCompartmentId(compartmentID); err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) } defer func() { hcsshim.SetCurrentThreadCompartmentId(0) @@ -32,7 +33,7 @@ func (n *network) startResolver() { return } n.resolverOnce.Do(func() { - logrus.Debugf("Launching DNS server for network %q", n.Name()) + log.G(context.TODO()).Debugf("Launching DNS server for network %q", n.Name()) options := n.Info().DriverOptions() hnsid := options[windows.HNSID] @@ -42,7 +43,7 @@ func (n *network) startResolver() { hnsresponse, err := hcsshim.HNSNetworkRequest("GET", hnsid, "") if err != nil { - logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) + log.G(context.TODO()).Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) return } @@ -50,14 +51,14 @@ func (n *network) startResolver() { if subnet.GatewayAddress != "" { for i := 0; i < 3; i++ { resolver := NewResolver(subnet.GatewayAddress, false, n) - logrus.Debugf("Binding a resolver on network %s gateway %s", n.Name(), subnet.GatewayAddress) + log.G(context.TODO()).Debugf("Binding a resolver on network %s gateway %s", n.Name(), subnet.GatewayAddress) executeInCompartment(hnsresponse.DNSServerCompartment, resolver.SetupFunc(53)) if err = resolver.Start(); err != nil { - logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) + log.G(context.TODO()).Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) time.Sleep(1 * time.Second) } else { - logrus.Debugf("Resolver bound successfully for network %s", n.Name()) + log.G(context.TODO()).Debugf("Resolver bound successfully for network %s", n.Name()) n.resolver = append(n.resolver, resolver) break } diff --git a/libnetwork/networkdb/cluster.go b/libnetwork/networkdb/cluster.go index bd53f9fe6a..7860dc6195 100644 --- a/libnetwork/networkdb/cluster.go +++ b/libnetwork/networkdb/cluster.go @@ -6,15 +6,15 @@ import ( "crypto/rand" "encoding/hex" "fmt" - "log" + golog "log" "math/big" rnd "math/rand" "net" "strings" "time" + "github.com/containerd/containerd/log" "github.com/hashicorp/memberlist" - "github.com/sirupsen/logrus" ) const ( @@ -36,16 +36,16 @@ func (l *logWriter) Write(p []byte) (int, error) { switch { case strings.HasPrefix(str, "[WARN] "): str = strings.TrimPrefix(str, "[WARN] ") - logrus.Warn(str) + log.G(context.TODO()).Warn(str) case strings.HasPrefix(str, "[DEBUG] "): str = strings.TrimPrefix(str, "[DEBUG] ") - logrus.Debug(str) + log.G(context.TODO()).Debug(str) case strings.HasPrefix(str, "[INFO] "): str = strings.TrimPrefix(str, "[INFO] ") - logrus.Info(str) + log.G(context.TODO()).Info(str) case strings.HasPrefix(str, "[ERR] "): str = strings.TrimPrefix(str, "[ERR] ") - logrus.Warn(str) + log.G(context.TODO()).Warn(str) } return len(p), nil @@ -53,7 +53,7 @@ func (l *logWriter) Write(p []byte) (int, error) { // SetKey adds a new key to the key ring func (nDB *NetworkDB) SetKey(key []byte) { - logrus.Debugf("Adding key %.5s", hex.EncodeToString(key)) + log.G(context.TODO()).Debugf("Adding key %.5s", hex.EncodeToString(key)) nDB.Lock() defer nDB.Unlock() for _, dbKey := range nDB.config.Keys { @@ -70,7 +70,7 @@ func (nDB *NetworkDB) SetKey(key []byte) { // SetPrimaryKey sets the given key as the primary key. This should have // been added apriori through SetKey func (nDB *NetworkDB) SetPrimaryKey(key []byte) { - logrus.Debugf("Primary Key %.5s", hex.EncodeToString(key)) + log.G(context.TODO()).Debugf("Primary Key %.5s", hex.EncodeToString(key)) nDB.RLock() defer nDB.RUnlock() for _, dbKey := range nDB.config.Keys { @@ -86,7 +86,7 @@ func (nDB *NetworkDB) SetPrimaryKey(key []byte) { // RemoveKey removes a key from the key ring. The key being removed // can't be the primary key func (nDB *NetworkDB) RemoveKey(key []byte) { - logrus.Debugf("Remove Key %.5s", hex.EncodeToString(key)) + log.G(context.TODO()).Debugf("Remove Key %.5s", hex.EncodeToString(key)) nDB.Lock() defer nDB.Unlock() for i, dbKey := range nDB.config.Keys { @@ -119,12 +119,12 @@ func (nDB *NetworkDB) clusterInit() error { config.Events = &eventDelegate{nDB: nDB} // custom logger that does not add time or date, so they are not // duplicated by logrus - config.Logger = log.New(&logWriter{}, "", 0) + config.Logger = golog.New(&logWriter{}, "", 0) var err error if len(nDB.config.Keys) > 0 { for i, key := range nDB.config.Keys { - logrus.Debugf("Encryption key %d: %.5s", i+1, hex.EncodeToString(key)) + log.G(context.TODO()).Debugf("Encryption key %d: %.5s", i+1, hex.EncodeToString(key)) } nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0]) if err != nil { @@ -188,11 +188,11 @@ func (nDB *NetworkDB) retryJoin(ctx context.Context, members []string) { select { case <-t.C: if _, err := nDB.memberlist.Join(members); err != nil { - logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err) + log.G(ctx).Errorf("Failed to join memberlist %s on retry: %v", members, err) continue } if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil { - logrus.Errorf("failed to send node join on retry: %v", err) + log.G(ctx).Errorf("failed to send node join on retry: %v", err) continue } return @@ -223,7 +223,7 @@ func (nDB *NetworkDB) clusterLeave() error { mlist := nDB.memberlist if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil { - logrus.Errorf("failed to send node leave: %v", err) + log.G(context.TODO()).Errorf("failed to send node leave: %v", err) } if err := mlist.Leave(time.Second); err != nil { @@ -270,7 +270,7 @@ func (nDB *NetworkDB) reapDeadNode() { n.reapTime -= nodeReapPeriod continue } - logrus.Debugf("Garbage collect node %v", n.Name) + log.G(context.TODO()).Debugf("Garbage collect node %v", n.Name) delete(nodeMap, id) } } @@ -289,7 +289,7 @@ func (nDB *NetworkDB) rejoinClusterBootStrap() { myself, ok := nDB.nodes[nDB.config.NodeID] if !ok { nDB.RUnlock() - logrus.Warnf("rejoinClusterBootstrap unable to find local node info using ID:%v", nDB.config.NodeID) + log.G(context.TODO()).Warnf("rejoinClusterBootstrap unable to find local node info using ID:%v", nDB.config.NodeID) return } bootStrapIPs := make([]string, 0, len(nDB.bootStrapIP)) @@ -317,11 +317,11 @@ func (nDB *NetworkDB) rejoinClusterBootStrap() { nDB.RUnlock() if len(bootStrapIPs) == 0 { // this will also avoid to call the Join with an empty list erasing the current bootstrap ip list - logrus.Debug("rejoinClusterBootStrap did not find any valid IP") + log.G(context.TODO()).Debug("rejoinClusterBootStrap did not find any valid IP") return } // None of the bootStrap nodes are in the cluster, call memberlist join - logrus.Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs) + log.G(context.TODO()).Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs) ctx, cancel := context.WithTimeout(nDB.ctx, nDB.config.rejoinClusterDuration) defer cancel() nDB.retryJoin(ctx, bootStrapIPs) @@ -351,7 +351,7 @@ func (nDB *NetworkDB) reconnectNode() { return } - logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name) + log.G(context.TODO()).Debugf("Initiating bulk sync with node %s after reconnect", node.Name) nDB.bulkSync([]string{node.Name}, true) } @@ -418,10 +418,10 @@ func (nDB *NetworkDB) reapTableEntries() { okTable, okNetwork := nDB.deleteEntry(nid, tname, key) if !okTable { - logrus.Errorf("Table tree delete failed, entry with key:%s does not exist in the table:%s network:%s", key, tname, nid) + log.G(context.TODO()).Errorf("Table tree delete failed, entry with key:%s does not exist in the table:%s network:%s", key, tname, nid) } if !okNetwork { - logrus.Errorf("Network tree delete failed, entry with key:%s does not exist in the network:%s table:%s", key, nid, tname) + log.G(context.TODO()).Errorf("Network tree delete failed, entry with key:%s does not exist in the network:%s table:%s", key, nid, tname) } return false @@ -444,7 +444,7 @@ func (nDB *NetworkDB) gossip() { if printHealth { healthScore := nDB.memberlist.GetHealthScore() if healthScore != 0 { - logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore) + log.G(context.TODO()).Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore) } nDB.lastHealthTimestamp = time.Now() } @@ -467,7 +467,7 @@ func (nDB *NetworkDB) gossip() { broadcastQ := network.tableBroadcasts if broadcastQ == nil { - logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid) + log.G(context.TODO()).Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid) continue } @@ -476,7 +476,7 @@ func (nDB *NetworkDB) gossip() { network.qMessagesSent.Add(int64(len(msgs))) if printStats { msent := network.qMessagesSent.Swap(0) - logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d", + log.G(context.TODO()).Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d", nDB.config.Hostname, nDB.config.NodeID, nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber.Load(), broadcastQ.NumQueued(), msent/int64((nDB.config.StatsPrintPeriod/time.Second))) @@ -500,7 +500,7 @@ func (nDB *NetworkDB) gossip() { // Send the compound message if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil { - logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err) + log.G(context.TODO()).Errorf("Failed to send gossip to %s: %s", mnode.Addr, err) } } } @@ -540,7 +540,7 @@ func (nDB *NetworkDB) bulkSyncTables() { completed, err := nDB.bulkSync(nodes, false) if err != nil { - logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err) + log.G(context.TODO()).Errorf("periodic bulk sync failure for network %s: %v", nid, err) continue } @@ -583,12 +583,12 @@ func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) { if node == nDB.config.NodeID { continue } - logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node) + log.G(context.TODO()).Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node) networks = nDB.findCommonNetworks(node) err = nDB.bulkSyncNode(networks, node, true) if err != nil { err = fmt.Errorf("bulk sync to node %s failed: %v", node, err) - logrus.Warn(err.Error()) + log.G(context.TODO()).Warn(err.Error()) } else { // bulk sync succeeded success = true @@ -618,7 +618,7 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b unsolMsg = "unsolicited" } - logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s", + log.G(context.TODO()).Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s", nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node) nDB.RLock() @@ -655,7 +655,7 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b msg, err := encodeMessage(MessageTypeTableEvent, &tEvent) if err != nil { - logrus.Errorf("Encode failure during bulk sync: %#v", tEvent) + log.G(context.TODO()).Errorf("Encode failure during bulk sync: %#v", tEvent) return false } @@ -701,9 +701,9 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b t := time.NewTimer(30 * time.Second) select { case <-t.C: - logrus.Errorf("Bulk sync to node %s timed out", node) + log.G(context.TODO()).Errorf("Bulk sync to node %s timed out", node) case <-ch: - logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime)) + log.G(context.TODO()).Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime)) } t.Stop() } @@ -719,7 +719,7 @@ func randomOffset(n int) int { val, err := rand.Int(rand.Reader, big.NewInt(int64(n))) if err != nil { - logrus.Errorf("Failed to get a random offset: %v", err) + log.G(context.TODO()).Errorf("Failed to get a random offset: %v", err) return 0 } diff --git a/libnetwork/networkdb/delegate.go b/libnetwork/networkdb/delegate.go index 14e19bbdd7..6083031f5a 100644 --- a/libnetwork/networkdb/delegate.go +++ b/libnetwork/networkdb/delegate.go @@ -1,11 +1,12 @@ package networkdb import ( + "context" "net" "time" + "github.com/containerd/containerd/log" "github.com/gogo/protobuf/proto" - "github.com/sirupsen/logrus" ) type delegate struct { @@ -41,7 +42,7 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool { // If the node is not known from memberlist we cannot process save any state of it else if it actually // dies we won't receive any notification and we will remain stuck with it if _, ok := nDB.nodes[nEvent.NodeName]; !ok { - logrus.Errorf("node: %s is unknown to memberlist", nEvent.NodeName) + log.G(context.TODO()).Errorf("node: %s is unknown to memberlist", nEvent.NodeName) return false } @@ -49,21 +50,21 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool { case NodeEventTypeJoin: moved, err := nDB.changeNodeState(n.Name, nodeActiveState) if err != nil { - logrus.WithError(err).Error("unable to find the node to move") + log.G(context.TODO()).WithError(err).Error("unable to find the node to move") return false } if moved { - logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) + log.G(context.TODO()).Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) } return moved case NodeEventTypeLeave: moved, err := nDB.changeNodeState(n.Name, nodeLeftState) if err != nil { - logrus.WithError(err).Error("unable to find the node to move") + log.G(context.TODO()).WithError(err).Error("unable to find the node to move") return false } if moved { - logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) + log.G(context.TODO()).Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) } return moved } @@ -197,7 +198,7 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent, isBulkSync bool) bool // This case can happen if the cluster is running different versions of the engine where the old version does not have the // field. If that is not the case, this can be a BUG if e.deleting && e.reapTime == 0 { - logrus.Warnf("%v(%v) handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", + log.G(context.TODO()).Warnf("%v(%v) handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", nDB.config.Hostname, nDB.config.NodeID, tEvent) e.reapTime = nDB.config.reapEntryInterval } @@ -214,7 +215,7 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent, isBulkSync bool) bool // most likely the cluster is already aware of it // This also reduce the possibility that deletion of entries close to their garbage collection ends up circuling around // forever - //logrus.Infof("exiting on delete not knowing the obj with rebroadcast:%t", network.inSync) + //log.G(ctx).Infof("exiting on delete not knowing the obj with rebroadcast:%t", network.inSync) return network.inSync && e.reapTime > nDB.config.reapEntryInterval/6 } @@ -236,7 +237,7 @@ func (nDB *NetworkDB) handleCompound(buf []byte, isBulkSync bool) { // Decode the parts parts, err := decodeCompoundMessage(buf) if err != nil { - logrus.Errorf("Failed to decode compound request: %v", err) + log.G(context.TODO()).Errorf("Failed to decode compound request: %v", err) return } @@ -249,7 +250,7 @@ func (nDB *NetworkDB) handleCompound(buf []byte, isBulkSync bool) { func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) { var tEvent TableEvent if err := proto.Unmarshal(buf, &tEvent); err != nil { - logrus.Errorf("Error decoding table event message: %v", err) + log.G(context.TODO()).Errorf("Error decoding table event message: %v", err) return } @@ -262,7 +263,7 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) { var err error buf, err = encodeRawMessage(MessageTypeTableEvent, buf) if err != nil { - logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) + log.G(context.TODO()).Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) return } @@ -292,7 +293,7 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) { func (nDB *NetworkDB) handleNodeMessage(buf []byte) { var nEvent NodeEvent if err := proto.Unmarshal(buf, &nEvent); err != nil { - logrus.Errorf("Error decoding node event message: %v", err) + log.G(context.TODO()).Errorf("Error decoding node event message: %v", err) return } @@ -300,7 +301,7 @@ func (nDB *NetworkDB) handleNodeMessage(buf []byte) { var err error buf, err = encodeRawMessage(MessageTypeNodeEvent, buf) if err != nil { - logrus.Errorf("Error marshalling gossip message for node event rebroadcast: %v", err) + log.G(context.TODO()).Errorf("Error marshalling gossip message for node event rebroadcast: %v", err) return } @@ -313,7 +314,7 @@ func (nDB *NetworkDB) handleNodeMessage(buf []byte) { func (nDB *NetworkDB) handleNetworkMessage(buf []byte) { var nEvent NetworkEvent if err := proto.Unmarshal(buf, &nEvent); err != nil { - logrus.Errorf("Error decoding network event message: %v", err) + log.G(context.TODO()).Errorf("Error decoding network event message: %v", err) return } @@ -321,7 +322,7 @@ func (nDB *NetworkDB) handleNetworkMessage(buf []byte) { var err error buf, err = encodeRawMessage(MessageTypeNetworkEvent, buf) if err != nil { - logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) + log.G(context.TODO()).Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) return } @@ -336,7 +337,7 @@ func (nDB *NetworkDB) handleNetworkMessage(buf []byte) { func (nDB *NetworkDB) handleBulkSync(buf []byte) { var bsm BulkSyncMessage if err := proto.Unmarshal(buf, &bsm); err != nil { - logrus.Errorf("Error decoding bulk sync message: %v", err) + log.G(context.TODO()).Errorf("Error decoding bulk sync message: %v", err) return } @@ -367,14 +368,14 @@ func (nDB *NetworkDB) handleBulkSync(buf []byte) { nDB.RUnlock() if err := nDB.bulkSyncNode(bsm.Networks, bsm.NodeName, false); err != nil { - logrus.Errorf("Error in responding to bulk sync from node %s: %v", nodeAddr, err) + log.G(context.TODO()).Errorf("Error in responding to bulk sync from node %s: %v", nodeAddr, err) } } func (nDB *NetworkDB) handleMessage(buf []byte, isBulkSync bool) { mType, data, err := decodeMessage(buf) if err != nil { - logrus.Errorf("Error decoding gossip message to get message type: %v", err) + log.G(context.TODO()).Errorf("Error decoding gossip message to get message type: %v", err) return } @@ -390,7 +391,7 @@ func (nDB *NetworkDB) handleMessage(buf []byte, isBulkSync bool) { case MessageTypeCompound: nDB.handleCompound(data, isBulkSync) default: - logrus.Errorf("%v(%v): unknown message type %d", nDB.config.Hostname, nDB.config.NodeID, mType) + log.G(context.TODO()).Errorf("%v(%v): unknown message type %d", nDB.config.Hostname, nDB.config.NodeID, mType) } } @@ -439,7 +440,7 @@ func (d *delegate) LocalState(join bool) []byte { buf, err := encodeMessage(MessageTypePushPull, &pp) if err != nil { - logrus.Errorf("Failed to encode local network state: %v", err) + log.G(context.TODO()).Errorf("Failed to encode local network state: %v", err) return nil } @@ -448,24 +449,24 @@ func (d *delegate) LocalState(join bool) []byte { func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) { if len(buf) == 0 { - logrus.Error("zero byte remote network state received") + log.G(context.TODO()).Error("zero byte remote network state received") return } var gMsg GossipMessage err := proto.Unmarshal(buf, &gMsg) if err != nil { - logrus.Errorf("Error unmarshalling push pull message: %v", err) + log.G(context.TODO()).Errorf("Error unmarshalling push pull message: %v", err) return } if gMsg.Type != MessageTypePushPull { - logrus.Errorf("Invalid message type %v received from remote", buf[0]) + log.G(context.TODO()).Errorf("Invalid message type %v received from remote", buf[0]) } pp := NetworkPushPull{} if err := proto.Unmarshal(gMsg.Data, &pp); err != nil { - logrus.Errorf("Failed to decode remote network state: %v", err) + log.G(context.TODO()).Errorf("Failed to decode remote network state: %v", err) return } diff --git a/libnetwork/networkdb/event_delegate.go b/libnetwork/networkdb/event_delegate.go index 78ebe0fd9e..6cb52e3651 100644 --- a/libnetwork/networkdb/event_delegate.go +++ b/libnetwork/networkdb/event_delegate.go @@ -1,11 +1,12 @@ package networkdb import ( + "context" "encoding/json" "net" + "github.com/containerd/containerd/log" "github.com/hashicorp/memberlist" - "github.com/sirupsen/logrus" ) type eventDelegate struct { @@ -17,12 +18,12 @@ func (e *eventDelegate) broadcastNodeEvent(addr net.IP, op opType) { if err == nil { e.nDB.broadcaster.Write(makeEvent(op, NodeTable, "", "", value)) } else { - logrus.Errorf("Error marshalling node broadcast event %s", addr.String()) + log.G(context.TODO()).Errorf("Error marshalling node broadcast event %s", addr.String()) } } func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) { - logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr) + log.G(context.TODO()).Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr) e.broadcastNodeEvent(mn.Addr, opCreate) e.nDB.Lock() defer e.nDB.Unlock() @@ -39,11 +40,11 @@ func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) { e.nDB.purgeReincarnation(mn) e.nDB.nodes[mn.Name] = &node{Node: *mn} - logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr) + log.G(context.TODO()).Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr) } func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) { - logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr) + log.G(context.TODO()).Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr) e.broadcastNodeEvent(mn.Addr, opDelete) e.nDB.Lock() @@ -51,7 +52,7 @@ func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) { n, currState, _ := e.nDB.findNode(mn.Name) if n == nil { - logrus.Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr) + log.G(context.TODO()).Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr) return } // if the node was active means that did not send the leave cluster message, so it's probable that @@ -59,11 +60,11 @@ func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) { if currState == nodeActiveState { moved, err := e.nDB.changeNodeState(mn.Name, nodeFailedState) if err != nil { - logrus.WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr) + log.G(context.TODO()).WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr) return } if moved { - logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr) + log.G(context.TODO()).Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr) } } } diff --git a/libnetwork/networkdb/networkdb.go b/libnetwork/networkdb/networkdb.go index 9813c9f547..a7feafc7d6 100644 --- a/libnetwork/networkdb/networkdb.go +++ b/libnetwork/networkdb/networkdb.go @@ -11,13 +11,13 @@ import ( "sync/atomic" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" iradix "github.com/hashicorp/go-immutable-radix" "github.com/hashicorp/memberlist" "github.com/hashicorp/serf/serf" - "github.com/sirupsen/logrus" ) const ( @@ -269,7 +269,7 @@ func New(c *Config) (*NetworkDB, error) { nDB.indexes[byTable] = iradix.New() nDB.indexes[byNetwork] = iradix.New() - logrus.Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c) + log.G(context.TODO()).Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c) if err := nDB.clusterInit(); err != nil { return nil, err } @@ -282,7 +282,7 @@ func New(c *Config) (*NetworkDB, error) { func (nDB *NetworkDB) Join(members []string) error { nDB.Lock() nDB.bootStrapIP = append([]string(nil), members...) - logrus.Infof("The new bootstrap node list is:%v", nDB.bootStrapIP) + log.G(context.TODO()).Infof("The new bootstrap node list is:%v", nDB.bootStrapIP) nDB.Unlock() return nDB.clusterJoin(members) } @@ -291,7 +291,7 @@ func (nDB *NetworkDB) Join(members []string) error { // stopping timers, canceling goroutines etc. func (nDB *NetworkDB) Close() { if err := nDB.clusterLeave(); err != nil { - logrus.Errorf("%v(%v) Could not close DB: %v", nDB.config.Hostname, nDB.config.NodeID, err) + log.G(context.TODO()).Errorf("%v(%v) Could not close DB: %v", nDB.config.Hostname, nDB.config.NodeID, err) } //Avoid (*Broadcaster).run goroutine leak @@ -640,9 +640,9 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error { return fmt.Errorf("failed to send leave network event for %s: %v", nid, err) } - logrus.Debugf("%v(%v): joined network %s", nDB.config.Hostname, nDB.config.NodeID, nid) + log.G(context.TODO()).Debugf("%v(%v): joined network %s", nDB.config.Hostname, nDB.config.NodeID, nid) if _, err := nDB.bulkSync(networkNodes, true); err != nil { - logrus.Errorf("Error bulk syncing while joining network %s: %v", nid, err) + log.G(context.TODO()).Errorf("Error bulk syncing while joining network %s: %v", nid, err) } // Mark the network as being synced @@ -685,7 +685,7 @@ func (nDB *NetworkDB) LeaveNetwork(nid string) error { return fmt.Errorf("could not find network %s while trying to leave", nid) } - logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid) + log.G(context.TODO()).Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid) n.ltime = ltime n.reapTime = nDB.config.reapNetworkInterval n.leaving = true diff --git a/libnetwork/networkdb/networkdb_test.go b/libnetwork/networkdb/networkdb_test.go index 963f567505..920c47ee62 100644 --- a/libnetwork/networkdb/networkdb_test.go +++ b/libnetwork/networkdb/networkdb_test.go @@ -1,8 +1,8 @@ package networkdb import ( + "context" "fmt" - "log" "net" "os" "strconv" @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-events" "github.com/hashicorp/memberlist" @@ -67,7 +68,7 @@ func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Co func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) { t.Helper() - log.Print("Closing DB instances...") + log.G(context.TODO()).Print("Closing DB instances...") for _, db := range dbs { db.Close() } @@ -861,7 +862,7 @@ func TestNetworkDBIslands(t *testing.T) { // Now the 3 bootstrap nodes will cleanly leave, and will be properly removed from the other 2 nodes for i := 0; i < 3; i++ { - logrus.Infof("node %d leaving", i) + log.G(context.TODO()).Infof("node %d leaving", i) dbs[i].Close() } @@ -896,7 +897,7 @@ func TestNetworkDBIslands(t *testing.T) { // Spawn again the first 3 nodes with different names but same IP:port for i := 0; i < 3; i++ { - logrus.Infof("node %d coming back", i) + log.G(context.TODO()).Infof("node %d coming back", i) conf := *dbs[i].config conf.NodeID = stringid.TruncateID(stringid.GenerateRandomID()) dbs[i] = launchNode(t, conf) diff --git a/libnetwork/networkdb/networkdbdiagnostic.go b/libnetwork/networkdb/networkdbdiagnostic.go index 62b5a291f0..c673bbbb92 100644 --- a/libnetwork/networkdb/networkdbdiagnostic.go +++ b/libnetwork/networkdb/networkdbdiagnostic.go @@ -1,11 +1,13 @@ package networkdb import ( + "context" "encoding/base64" "fmt" "net/http" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/diagnostic" "github.com/docker/docker/libnetwork/internal/caller" "github.com/sirupsen/logrus" @@ -37,7 +39,7 @@ func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("join cluster") if len(r.Form["members"]) < 1 { @@ -70,7 +72,7 @@ func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("network peers") if len(r.Form["nid"]) < 1 { @@ -104,7 +106,7 @@ func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("cluster peers") nDB, ok := ctx.(*NetworkDB) @@ -127,7 +129,7 @@ func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { unsafe, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("create entry") if len(r.Form["tname"]) < 1 || @@ -176,7 +178,7 @@ func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { unsafe, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("update entry") if len(r.Form["tname"]) < 1 || @@ -224,7 +226,7 @@ func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("delete entry") if len(r.Form["tname"]) < 1 || @@ -261,7 +263,7 @@ func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { unsafe, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("get entry") if len(r.Form["tname"]) < 1 || @@ -307,7 +309,7 @@ func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("join network") if len(r.Form["nid"]) < 1 { @@ -339,7 +341,7 @@ func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("leave network") if len(r.Form["nid"]) < 1 { @@ -371,7 +373,7 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) { unsafe, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("get table") if len(r.Form["tname"]) < 1 || @@ -419,7 +421,7 @@ func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) { _, json := diagnostic.ParseHTTPFormOptions(r) // audit logs - log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) + log := log.G(context.TODO()).WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": caller.Name(0), "url": r.URL.String()}) log.Info("network stats") if len(r.Form["nid"]) < 1 { diff --git a/libnetwork/networkdb/nodemgmt.go b/libnetwork/networkdb/nodemgmt.go index f5a7498522..018ff74310 100644 --- a/libnetwork/networkdb/nodemgmt.go +++ b/libnetwork/networkdb/nodemgmt.go @@ -1,10 +1,11 @@ package networkdb import ( + "context" "fmt" + "github.com/containerd/containerd/log" "github.com/hashicorp/memberlist" - "github.com/sirupsen/logrus" ) type nodeState int @@ -73,7 +74,7 @@ func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool nDB.failedNodes[nodeName] = n } - logrus.Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState]) + log.G(context.TODO()).Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState]) if newState == nodeLeftState || newState == nodeFailedState { // set the node reap time, if not already set @@ -94,7 +95,7 @@ func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool { for name, node := range nDB.nodes { if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { - logrus.Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr) + log.G(context.TODO()).Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr) nDB.changeNodeState(name, nodeLeftState) return true } @@ -102,7 +103,7 @@ func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool { for name, node := range nDB.failedNodes { if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { - logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr) + log.G(context.TODO()).Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr) nDB.changeNodeState(name, nodeLeftState) return true } @@ -110,7 +111,7 @@ func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool { for name, node := range nDB.leftNodes { if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { - logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr) + log.G(context.TODO()).Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr) nDB.changeNodeState(name, nodeLeftState) return true } diff --git a/libnetwork/ns/init_linux.go b/libnetwork/ns/init_linux.go index fe6481fbc1..ed721c6264 100644 --- a/libnetwork/ns/init_linux.go +++ b/libnetwork/ns/init_linux.go @@ -1,6 +1,7 @@ package ns import ( + "context" "fmt" "os/exec" "strings" @@ -8,7 +9,7 @@ import ( "syscall" "time" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" ) @@ -26,15 +27,15 @@ func Init() { var err error initNs, err = netns.Get() if err != nil { - logrus.Errorf("could not get initial namespace: %v", err) + log.G(context.TODO()).Errorf("could not get initial namespace: %v", err) } initNl, err = netlink.NewHandle(getSupportedNlFamilies()...) if err != nil { - logrus.Errorf("could not create netlink handle on initial namespace: %v", err) + log.G(context.TODO()).Errorf("could not create netlink handle on initial namespace: %v", err) } err = initNl.SetSocketTimeout(NetlinkSocketsTimeout) if err != nil { - logrus.Warnf("Failed to set the timeout on the default netlink handle sockets: %v", err) + log.G(context.TODO()).Warnf("Failed to set the timeout on the default netlink handle sockets: %v", err) } } @@ -59,14 +60,14 @@ func getSupportedNlFamilies() []int { fams := []int{syscall.NETLINK_ROUTE} // NETLINK_XFRM test if err := checkXfrmSocket(); err != nil { - logrus.Warnf("Could not load necessary modules for IPSEC rules: %v", err) + log.G(context.TODO()).Warnf("Could not load necessary modules for IPSEC rules: %v", err) } else { fams = append(fams, syscall.NETLINK_XFRM) } // NETLINK_NETFILTER test if err := loadNfConntrackModules(); err != nil { if checkNfSocket() != nil { - logrus.Warnf("Could not load necessary modules for Conntrack: %v", err) + log.G(context.TODO()).Warnf("Could not load necessary modules for Conntrack: %v", err) } else { fams = append(fams, syscall.NETLINK_NETFILTER) } diff --git a/libnetwork/osl/interface_linux.go b/libnetwork/osl/interface_linux.go index ee1b4ab842..22c905dac7 100644 --- a/libnetwork/osl/interface_linux.go +++ b/libnetwork/osl/interface_linux.go @@ -1,15 +1,16 @@ package osl import ( + "context" "fmt" "net" "sync" "syscall" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" ) @@ -141,7 +142,7 @@ func (i *nwIface) Remove() error { err = nlh.LinkSetName(iface, i.SrcName()) if err != nil { - logrus.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err) + log.G(context.TODO()).Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err) return err } @@ -153,7 +154,7 @@ func (i *nwIface) Remove() error { } else if !isDefault { // Move the network interface to caller namespace. if err := nlh.LinkSetNsFd(iface, ns.ParseHandlerInt()); err != nil { - logrus.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err) + log.G(context.TODO()).Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err) return err } } @@ -290,10 +291,10 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If // to properly cleanup the interface. Its important especially for // interfaces with global attributes, ex: vni id for vxlan interfaces. if nerr := nlh.LinkSetName(iface, i.SrcName()); nerr != nil { - logrus.Errorf("renaming interface (%s->%s) failed, %v after config error %v", i.DstName(), i.SrcName(), nerr, err) + log.G(context.TODO()).Errorf("renaming interface (%s->%s) failed, %v after config error %v", i.DstName(), i.SrcName(), nerr, err) } if nerr := nlh.LinkSetNsFd(iface, ns.ParseHandlerInt()); nerr != nil { - logrus.Errorf("moving interface %s to host ns failed, %v, after config error %v", i.SrcName(), nerr, err) + log.G(context.TODO()).Errorf("moving interface %s to host ns failed, %v, after config error %v", i.SrcName(), nerr, err) } return err } @@ -301,7 +302,7 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If // Up the interface. cnt := 0 for err = nlh.LinkSetUp(iface); err != nil && cnt < 3; cnt++ { - logrus.Debugf("retrying link setup because of: %v", err) + log.G(context.TODO()).Debugf("retrying link setup because of: %v", err) time.Sleep(10 * time.Millisecond) err = nlh.LinkSetUp(iface) } diff --git a/libnetwork/osl/kernel/knobs_linux.go b/libnetwork/osl/kernel/knobs_linux.go index 93d644424b..1d40dbff16 100644 --- a/libnetwork/osl/kernel/knobs_linux.go +++ b/libnetwork/osl/kernel/knobs_linux.go @@ -1,11 +1,12 @@ package kernel import ( + "context" "os" "path" "strings" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) // writeSystemProperty writes the value to a path under /proc/sys as determined from the key. @@ -31,17 +32,17 @@ func ApplyOSTweaks(osConfig map[string]*OSValue) { // read the existing property from disk oldv, err := readSystemProperty(k) if err != nil { - logrus.WithError(err).Errorf("error reading the kernel parameter %s", k) + log.G(context.TODO()).WithError(err).Errorf("error reading the kernel parameter %s", k) continue } if propertyIsValid(oldv, v.Value, v.CheckFn) { // write new prop value to disk if err := writeSystemProperty(k, v.Value); err != nil { - logrus.WithError(err).Errorf("error setting the kernel parameter %s = %s, (leaving as %s)", k, v.Value, oldv) + log.G(context.TODO()).WithError(err).Errorf("error setting the kernel parameter %s = %s, (leaving as %s)", k, v.Value, oldv) continue } - logrus.Debugf("updated kernel parameter %s = %s (was %s)", k, v.Value, oldv) + log.G(context.TODO()).Debugf("updated kernel parameter %s = %s (was %s)", k, v.Value, oldv) } } } diff --git a/libnetwork/osl/kernel/knobs_linux_test.go b/libnetwork/osl/kernel/knobs_linux_test.go index b6b5d856f8..70428bfc82 100644 --- a/libnetwork/osl/kernel/knobs_linux_test.go +++ b/libnetwork/osl/kernel/knobs_linux_test.go @@ -1,9 +1,10 @@ package kernel import ( + "context" "testing" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) @@ -17,7 +18,7 @@ func TestReadWriteKnobs(t *testing.T) { // Check if the test is able to read the value v, err := readSystemProperty(k) if err != nil { - logrus.WithError(err).Warnf("Path %v not readable", k) + log.G(context.TODO()).WithError(err).Warnf("Path %v not readable", k) // the path is not there, skip this key continue } diff --git a/libnetwork/osl/namespace_linux.go b/libnetwork/osl/namespace_linux.go index d7d2fe2d63..2f115abbfd 100644 --- a/libnetwork/osl/namespace_linux.go +++ b/libnetwork/osl/namespace_linux.go @@ -1,6 +1,7 @@ package osl import ( + "context" "errors" "fmt" "net" @@ -13,11 +14,11 @@ import ( "syscall" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/internal/unshare" "github.com/docker/docker/libnetwork/ns" "github.com/docker/docker/libnetwork/osl/kernel" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" "golang.org/x/sys/unix" @@ -220,7 +221,7 @@ func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { err = n.nlHandle.SetSocketTimeout(ns.NetlinkSocketsTimeout) if err != nil { - logrus.Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) + log.G(context.TODO()).Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) } // In live-restore mode, IPV6 entries are getting cleaned up due to below code // We should retain IPV6 configurations in live-restore mode when Docker Daemon @@ -229,7 +230,7 @@ func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { if !isRestore && !n.isDefault { err = setIPv6(n.path, "all", false) if err != nil { - logrus.Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) + log.G(context.TODO()).Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) } } @@ -277,13 +278,13 @@ func GetSandboxForExternalKey(basePath string, key string) (Sandbox, error) { err = n.nlHandle.SetSocketTimeout(ns.NetlinkSocketsTimeout) if err != nil { - logrus.Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) + log.G(context.TODO()).Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) } // As starting point, disable IPv6 on all interfaces err = setIPv6(n.path, "all", false) if err != nil { - logrus.Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) + log.G(context.TODO()).Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) } if err = n.loopbackUp(); err != nil { @@ -311,7 +312,7 @@ func createNetworkNamespace(path string, osCreate bool) error { func unmountNamespaceFile(path string) { if _, err := os.Stat(path); err == nil { if err := syscall.Unmount(path, syscall.MNT_DETACH); err != nil && !errors.Is(err, unix.EINVAL) { - logrus.WithError(err).Error("Error unmounting namespace file") + log.G(context.TODO()).WithError(err).Error("Error unmounting namespace file") } } } @@ -426,7 +427,7 @@ func (n *networkNamespace) InvokeFunc(f func()) error { defer func() { close(done) if err := netns.Set(origNS); err != nil { - logrus.WithError(err).Warn("failed to restore thread's network namespace") + log.G(context.TODO()).WithError(err).Warn("failed to restore thread's network namespace") // Recover from the error by leaving this goroutine locked to // the thread. The runtime will terminate the thread and replace // it with a clean one when this goroutine returns. @@ -587,7 +588,7 @@ func (n *networkNamespace) checkLoV6() { } if err := setIPv6(n.path, "lo", enable); err != nil { - logrus.Warnf("Failed to %s IPv6 on loopback interface on network namespace %q: %v", action, n.path, err) + log.G(context.TODO()).Warnf("Failed to %s IPv6 on loopback interface on network namespace %q: %v", action, n.path, err) } n.loV6Enabled = enable @@ -622,7 +623,7 @@ func setIPv6(nspath, iface string, enable bool) error { } defer func() { if err := netns.Set(origNS); err != nil { - logrus.WithError(err).Error("libnetwork: restoring thread network namespace failed") + log.G(context.TODO()).WithError(err).Error("libnetwork: restoring thread network namespace failed") // The error is only fatal for the current thread. Keep this // goroutine locked to the thread to make the runtime replace it // with a clean thread once this goroutine returns. @@ -644,7 +645,7 @@ func setIPv6(nspath, iface string, enable bool) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { - logrus.WithError(err).Warn("Cannot configure IPv6 forwarding on container interface. Has IPv6 been disabled in this node's kernel?") + log.G(context.TODO()).WithError(err).Warn("Cannot configure IPv6 forwarding on container interface. Has IPv6 been disabled in this node's kernel?") return } errCh <- err diff --git a/libnetwork/osl/neigh_linux.go b/libnetwork/osl/neigh_linux.go index e46b12a89f..2786fdbe56 100644 --- a/libnetwork/osl/neigh_linux.go +++ b/libnetwork/osl/neigh_linux.go @@ -2,10 +2,11 @@ package osl import ( "bytes" + "context" "fmt" "net" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "github.com/vishvananda/netlink" ) @@ -84,7 +85,7 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr, // from the namespace cache. Otherwise if the neighbor moves back to the // same host again, kernel update can fail. if err := nlh.NeighDel(nlnh); err != nil { - logrus.Warnf("Deleting neighbor IP %s, mac %s failed, %v", dstIP, dstMac, err) + log.G(context.TODO()).Warnf("Deleting neighbor IP %s, mac %s failed, %v", dstIP, dstMac, err) } // Delete the dynamic entry in the bridge @@ -100,7 +101,7 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr, nlnh.LinkIndex = iface.Attrs().Index } if err := nlh.NeighDel(nlnh); err != nil { - logrus.WithError(err).Warn("error while deleting neighbor entry") + log.G(context.TODO()).WithError(err).Warn("error while deleting neighbor entry") } } } @@ -113,7 +114,7 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr, } } n.Unlock() - logrus.Debugf("Neighbor entry deleted for IP %v, mac %v osDelete:%t", dstIP, dstMac, osDelete) + log.G(context.TODO()).Debugf("Neighbor entry deleted for IP %v, mac %v osDelete:%t", dstIP, dstMac, osDelete) return nil } @@ -130,7 +131,7 @@ func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, fo nh := n.findNeighbor(dstIP, dstMac) if nh != nil { neighborAlreadyPresent = true - logrus.Warnf("Neighbor entry already present for IP %v, mac %v neighbor:%+v forceUpdate:%t", dstIP, dstMac, nh, force) + log.G(context.TODO()).Warnf("Neighbor entry already present for IP %v, mac %v neighbor:%+v forceUpdate:%t", dstIP, dstMac, nh, force) if !force { return NeighborSearchError{dstIP, dstMac, true} } @@ -187,7 +188,7 @@ func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, fo n.Lock() n.neighbors = append(n.neighbors, nh) n.Unlock() - logrus.Debugf("Neighbor entry added for IP:%v, mac:%v on ifc:%s", dstIP, dstMac, nh.linkName) + log.G(context.TODO()).Debugf("Neighbor entry added for IP:%v, mac:%v on ifc:%s", dstIP, dstMac, nh.linkName) return nil } diff --git a/libnetwork/portallocator/portallocator.go b/libnetwork/portallocator/portallocator.go index d4ea711c45..bf8723fc12 100644 --- a/libnetwork/portallocator/portallocator.go +++ b/libnetwork/portallocator/portallocator.go @@ -1,12 +1,13 @@ package portallocator import ( + "context" "errors" "fmt" "net" "sync" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) type ipMapping map[string]protoMap @@ -92,7 +93,7 @@ func Get() *PortAllocator { func NewInstance() *PortAllocator { start, end, err := getDynamicPortRange() if err != nil { - logrus.WithError(err).Infof("falling back to default port range %d-%d", defaultPortRangeStart, defaultPortRangeEnd) + log.G(context.TODO()).WithError(err).Infof("falling back to default port range %d-%d", defaultPortRangeStart, defaultPortRangeEnd) start, end = defaultPortRangeStart, defaultPortRangeEnd } return &PortAllocator{ diff --git a/libnetwork/portmapper/mapper.go b/libnetwork/portmapper/mapper.go index 3315158c97..1fa29e600e 100644 --- a/libnetwork/portmapper/mapper.go +++ b/libnetwork/portmapper/mapper.go @@ -1,13 +1,14 @@ package portmapper import ( + "context" "errors" "fmt" "net" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/portallocator" "github.com/ishidawataru/sctp" - "github.com/sirupsen/logrus" ) type mapping struct { @@ -198,7 +199,7 @@ func (pm *PortMapper) Unmap(host net.Addr) error { containerIP, containerPort := getIPAndPort(data.container) hostIP, hostPort := getIPAndPort(data.host) if err := pm.DeleteForwardingTableEntry(data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - logrus.Errorf("Error on iptables delete: %s", err) + log.G(context.TODO()).Errorf("Error on iptables delete: %s", err) } switch a := host.(type) { @@ -219,12 +220,12 @@ func (pm *PortMapper) Unmap(host net.Addr) error { func (pm *PortMapper) ReMapAll() { pm.lock.Lock() defer pm.lock.Unlock() - logrus.Debugln("Re-applying all port mappings.") + log.G(context.TODO()).Debugln("Re-applying all port mappings.") for _, data := range pm.currentMappings { containerIP, containerPort := getIPAndPort(data.container) hostIP, hostPort := getIPAndPort(data.host) if err := pm.AppendForwardingTableEntry(data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - logrus.Errorf("Error on iptables add: %s", err) + log.G(context.TODO()).Errorf("Error on iptables add: %s", err) } } } @@ -237,7 +238,7 @@ func getKey(a net.Addr) string { return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") case *sctp.SCTPAddr: if len(t.IPAddrs) == 0 { - logrus.Error(ErrSCTPAddrNoIP) + log.G(context.TODO()).Error(ErrSCTPAddrNoIP) return "" } return fmt.Sprintf("%s:%d/%s", t.IPAddrs[0].IP.String(), t.Port, "sctp") @@ -253,7 +254,7 @@ func getIPAndPort(a net.Addr) (net.IP, int) { return t.IP, t.Port case *sctp.SCTPAddr: if len(t.IPAddrs) == 0 { - logrus.Error(ErrSCTPAddrNoIP) + log.G(context.TODO()).Error(ErrSCTPAddrNoIP) return nil, 0 } return t.IPAddrs[0].IP, t.Port diff --git a/libnetwork/resolvconf/resolvconf.go b/libnetwork/resolvconf/resolvconf.go index baf9c31585..9ae5a5195c 100644 --- a/libnetwork/resolvconf/resolvconf.go +++ b/libnetwork/resolvconf/resolvconf.go @@ -3,12 +3,13 @@ package resolvconf import ( "bytes" + "context" "os" "regexp" "strings" "sync" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) const ( @@ -51,7 +52,7 @@ func Path() string { ns := GetNameservers(candidateResolvConf, IP) if len(ns) == 1 && ns[0] == "127.0.0.53" { pathAfterSystemdDetection = alternatePath - logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) + log.G(context.TODO()).Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) } }) return pathAfterSystemdDetection @@ -120,10 +121,10 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { // if the resulting resolvConf has no more nameservers defined, add appropriate // default DNS servers for IPv4 and (optionally) IPv6 if len(GetNameservers(cleanedResolvConf, IP)) == 0 { - logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) + log.G(context.TODO()).Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) dns := defaultIPv4Dns if ipv6Enabled { - logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) + log.G(context.TODO()).Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) dns = append(dns, defaultIPv6Dns...) } cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) diff --git a/libnetwork/resolver.go b/libnetwork/resolver.go index ab19b7b08f..3c492b1558 100644 --- a/libnetwork/resolver.go +++ b/libnetwork/resolver.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" "github.com/miekg/dns" "github.com/sirupsen/logrus" @@ -92,7 +93,7 @@ func NewResolver(address string, proxyDNS bool, backend DNSBackend) *Resolver { func (r *Resolver) log() *logrus.Logger { if r.logger == nil { - return logrus.StandardLogger() + return log.G(context.TODO()).Logger } return r.logger } diff --git a/libnetwork/resolver_test.go b/libnetwork/resolver_test.go index 733e1992de..e4de1c40ea 100644 --- a/libnetwork/resolver_test.go +++ b/libnetwork/resolver_test.go @@ -1,6 +1,7 @@ package libnetwork import ( + "context" "encoding/hex" "errors" "net" @@ -9,6 +10,7 @@ import ( "testing" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/testutils" "github.com/miekg/dns" "github.com/sirupsen/logrus" @@ -199,7 +201,7 @@ func newDNSHandlerServFailOnce(requests *int) func(w dns.ResponseWriter, r *dns. } *requests = *requests + 1 if err := w.WriteMsg(m); err != nil { - logrus.WithError(err).Error("Error writing dns response") + log.G(context.TODO()).WithError(err).Error("Error writing dns response") } } } diff --git a/libnetwork/sandbox.go b/libnetwork/sandbox.go index aceb7b9be5..930ce7a102 100644 --- a/libnetwork/sandbox.go +++ b/libnetwork/sandbox.go @@ -1,6 +1,7 @@ package libnetwork import ( + "context" "encoding/json" "fmt" "net" @@ -9,11 +10,11 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/etchosts" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/osl" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) // SandboxOption is an option setter function type used to pass various options to @@ -188,18 +189,18 @@ func (sb *Sandbox) delete(force bool) error { if c.isDistributedControl() { retain = true } - logrus.Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err) + log.G(context.TODO()).Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err) continue } if !force { if err := ep.Leave(sb); err != nil { - logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) } } if err := ep.Delete(force); err != nil { - logrus.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) + log.G(context.TODO()).Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) } } @@ -219,12 +220,12 @@ func (sb *Sandbox) delete(force bool) error { if sb.osSbox != nil && !sb.config.useDefaultSandBox { if err := sb.osSbox.Destroy(); err != nil { - logrus.WithError(err).Warn("error destroying network sandbox") + log.G(context.TODO()).WithError(err).Warn("error destroying network sandbox") } } if err := sb.storeDelete(); err != nil { - logrus.Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err) + log.G(context.TODO()).Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err) } c.mu.Lock() @@ -255,7 +256,7 @@ func (sb *Sandbox) Rename(name string) error { defer func() { if err != nil { if err2 := lEp.rename(oldName); err2 != nil { - logrus.WithField("old", oldName).WithField("origError", err).WithError(err2).Error("error renaming sandbox") + log.G(context.TODO()).WithField("old", oldName).WithField("origError", err).WithError(err2).Error("error renaming sandbox") } } }() @@ -273,7 +274,7 @@ func (sb *Sandbox) Refresh(options ...SandboxOption) error { // Detach from all endpoints for _, ep := range epList { if err := ep.Leave(sb); err != nil { - logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) } } @@ -289,7 +290,7 @@ func (sb *Sandbox) Refresh(options ...SandboxOption) error { // Re-connect to all endpoints for _, ep := range epList { if err := ep.Join(sb); err != nil { - logrus.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err) + log.G(context.TODO()).Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err) } } @@ -408,7 +409,7 @@ func (sb *Sandbox) HandleQueryResp(name string, ip net.IP) { func (sb *Sandbox) ResolveIP(ip string) string { var svc string - logrus.Debugf("IP To resolve %v", ip) + log.G(context.TODO()).Debugf("IP To resolve %v", ip) for _, ep := range sb.Endpoints() { n := ep.getNetwork() @@ -437,7 +438,7 @@ func (sb *Sandbox) ResolveService(name string) ([]*net.SRV, []net.IP) { srv := []*net.SRV{} ip := []net.IP{} - logrus.Debugf("Service name To resolve: %v", name) + log.G(context.TODO()).Debugf("Service name To resolve: %v", name) // There are DNS implementations that allow SRV queries for names not in // the format defined by RFC 2782. Hence specific validations checks are @@ -500,7 +501,7 @@ func (sb *Sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) { // {a.b in network c.d}, // {a in network b.c.d}, - logrus.Debugf("Name To resolve: %v", name) + log.G(context.TODO()).Debugf("Name To resolve: %v", name) name = strings.TrimSuffix(name, ".") reqName := []string{name} networkName := []string{""} @@ -608,7 +609,7 @@ func (sb *Sandbox) resolveName(req string, networkName string, epList []*Endpoin func (sb *Sandbox) SetKey(basePath string) error { start := time.Now() defer func() { - logrus.Debugf("sandbox set key processing took %s for container %s", time.Since(start), sb.ContainerID()) + log.G(context.TODO()).Debugf("sandbox set key processing took %s for container %s", time.Since(start), sb.ContainerID()) }() if basePath == "" { @@ -646,10 +647,10 @@ func (sb *Sandbox) SetKey(basePath string) error { if err := sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err == nil { if err := sb.resolver.Start(); err != nil { - logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) } } else { - logrus.Errorf("Resolver Setup Function failed for container %s, %q", sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Resolver Setup Function failed for container %s, %q", sb.ContainerID(), err) } } @@ -664,11 +665,11 @@ func (sb *Sandbox) SetKey(basePath string) error { // EnableService makes a managed container's service available by adding the // endpoint to the service load balancer and service discovery. func (sb *Sandbox) EnableService() (err error) { - logrus.Debugf("EnableService %s START", sb.containerID) + log.G(context.TODO()).Debugf("EnableService %s START", sb.containerID) defer func() { if err != nil { if err2 := sb.DisableService(); err2 != nil { - logrus.WithError(err2).WithField("origError", err).Error("Error while disabling service after original error") + log.G(context.TODO()).WithError(err2).WithField("origError", err).Error("Error while disabling service after original error") } } }() @@ -680,14 +681,14 @@ func (sb *Sandbox) EnableService() (err error) { ep.enableService() } } - logrus.Debugf("EnableService %s DONE", sb.containerID) + log.G(context.TODO()).Debugf("EnableService %s DONE", sb.containerID) return nil } // DisableService removes a managed container's endpoints from the load balancer // and service discovery. func (sb *Sandbox) DisableService() (err error) { - logrus.Debugf("DisableService %s START", sb.containerID) + log.G(context.TODO()).Debugf("DisableService %s START", sb.containerID) failedEps := []string{} defer func() { if len(failedEps) > 0 { @@ -698,12 +699,12 @@ func (sb *Sandbox) DisableService() (err error) { if ep.isServiceEnabled() { if err := ep.deleteServiceInfoFromCluster(sb, false, "DisableService"); err != nil { failedEps = append(failedEps, ep.Name()) - logrus.Warnf("failed update state for endpoint %s into cluster: %v", ep.Name(), err) + log.G(context.TODO()).Warnf("failed update state for endpoint %s into cluster: %v", ep.Name(), err) } ep.disableService() } } - logrus.Debugf("DisableService %s DONE", sb.containerID) + log.G(context.TODO()).Debugf("DisableService %s DONE", sb.containerID) return nil } @@ -712,7 +713,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *Endpoint) { // Only remove the interfaces owned by this endpoint from the sandbox. if ep.hasInterface(i.SrcName()) { if err := i.Remove(); err != nil { - logrus.Debugf("Remove interface %s failed: %v", i.SrcName(), err) + log.G(context.TODO()).Debugf("Remove interface %s failed: %v", i.SrcName(), err) } } } @@ -726,7 +727,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *Endpoint) { if len(vip) > 0 && lbModeIsDSR { ipNet := &net.IPNet{IP: vip, Mask: net.CIDRMask(32, 32)} if err := osSbox.RemoveAliasIP(osSbox.GetLoopbackIfaceName(), ipNet); err != nil { - logrus.WithError(err).Debugf("failed to remove virtual ip %v to loopback", ipNet) + log.G(context.TODO()).WithError(err).Debugf("failed to remove virtual ip %v to loopback", ipNet) } } @@ -737,7 +738,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *Endpoint) { // Remove non-interface routes. for _, r := range joinInfo.StaticRoutes { if err := osSbox.RemoveStaticRoute(r); err != nil { - logrus.Debugf("Remove route failed: %v", err) + log.G(context.TODO()).Debugf("Remove route failed: %v", err) } } } @@ -757,7 +758,7 @@ func (sb *Sandbox) releaseOSSbox() { } if err := osSbox.Destroy(); err != nil { - logrus.WithError(err).Error("Error destroying os sandbox") + log.G(context.TODO()).WithError(err).Error("Error destroying os sandbox") } } @@ -773,7 +774,7 @@ func (sb *Sandbox) restoreOslSandbox() error { ep.mu.Unlock() if i == nil { - logrus.Errorf("error restoring endpoint %s for container %s", ep.Name(), sb.ContainerID()) + log.G(context.TODO()).Errorf("error restoring endpoint %s for container %s", ep.Name(), sb.ContainerID()) continue } @@ -917,7 +918,7 @@ func (sb *Sandbox) clearNetworkResources(origEp *Endpoint) error { if len(sb.endpoints) == 0 { // sb.endpoints should never be empty and this is unexpected error condition // We log an error message to note this down for debugging purposes. - logrus.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name()) + log.G(context.TODO()).Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name()) sb.mu.Unlock() return nil } @@ -939,7 +940,7 @@ func (sb *Sandbox) clearNetworkResources(origEp *Endpoint) error { } if index == -1 { - logrus.Warnf("Endpoint %s has already been deleted", ep.Name()) + log.G(context.TODO()).Warnf("Endpoint %s has already been deleted", ep.Name()) sb.mu.Unlock() return nil } diff --git a/libnetwork/sandbox_dns_unix.go b/libnetwork/sandbox_dns_unix.go index b809e8c3b8..990cad144a 100644 --- a/libnetwork/sandbox_dns_unix.go +++ b/libnetwork/sandbox_dns_unix.go @@ -4,6 +4,7 @@ package libnetwork import ( "bytes" + "context" "fmt" "net" "os" @@ -12,10 +13,10 @@ import ( "strconv" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/etchosts" "github.com/docker/docker/libnetwork/resolvconf" "github.com/docker/docker/libnetwork/types" - "github.com/sirupsen/logrus" ) const ( @@ -41,19 +42,19 @@ func (sb *Sandbox) startResolver(restore bool) { if !restore { err = sb.rebuildDNS() if err != nil { - logrus.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err) return } } sb.resolver.SetExtServers(sb.extDNS) if err = sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err != nil { - logrus.Errorf("Resolver Setup function failed for container %s, %q", sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Resolver Setup function failed for container %s, %q", sb.ContainerID(), err) return } if err = sb.resolver.Start(); err != nil { - logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) } }) } @@ -130,13 +131,13 @@ func (sb *Sandbox) updateHostsFile(ifaceIPs []string) error { func (sb *Sandbox) addHostsEntries(recs []etchosts.Record) { if err := etchosts.Add(sb.config.hostsPath, recs); err != nil { - logrus.Warnf("Failed adding service host entries to the running container: %v", err) + log.G(context.TODO()).Warnf("Failed adding service host entries to the running container: %v", err) } } func (sb *Sandbox) deleteHostsEntries(recs []etchosts.Record) { if err := etchosts.Delete(sb.config.hostsPath, recs); err != nil { - logrus.Warnf("Failed deleting service host entries to the running container: %v", err) + log.G(context.TODO()).Warnf("Failed deleting service host entries to the running container: %v", err) } } @@ -213,7 +214,7 @@ func (sb *Sandbox) setupDNS() error { if !os.IsNotExist(err) { return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err) } - logrus.Infof("%s does not exist, we create an empty resolv.conf for container", sb.config.originResolvConfPath) + log.G(context.TODO()).Infof("%s does not exist, we create an empty resolv.conf for container", sb.config.originResolvConfPath) if err := createFile(sb.config.resolvConfPath); err != nil { return err } @@ -232,7 +233,7 @@ func (sb *Sandbox) setupDNS() error { return err } // No /etc/resolv.conf found: we'll use the default resolvers (Google's Public DNS). - logrus.WithField("path", originResolvConfPath).Infof("no resolv.conf found, falling back to defaults") + log.G(context.TODO()).WithField("path", originResolvConfPath).Infof("no resolv.conf found, falling back to defaults") } var newRC *resolvconf.File @@ -313,7 +314,7 @@ func (sb *Sandbox) updateDNS(ipv6Enabled bool) error { if len(currHash) > 0 && !bytes.Equal(currHash, currRC.Hash) { // Seems the user has changed the container resolv.conf since the last time // we checked so return without doing anything. - // logrus.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled) + // log.G(ctx).Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled) return nil } diff --git a/libnetwork/sandbox_externalkey_unix.go b/libnetwork/sandbox_externalkey_unix.go index 0138e6b7ed..d441e9d3a1 100644 --- a/libnetwork/sandbox_externalkey_unix.go +++ b/libnetwork/sandbox_externalkey_unix.go @@ -3,6 +3,7 @@ package libnetwork import ( + "context" "encoding/json" "flag" "fmt" @@ -11,11 +12,11 @@ import ( "os" "path/filepath" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" ) const ( @@ -130,10 +131,10 @@ func (c *Controller) acceptClientConnections(sock string, l net.Listener) { conn, err := l.Accept() if err != nil { if _, err1 := os.Stat(sock); os.IsNotExist(err1) { - logrus.Debugf("Unix socket %s doesn't exist. cannot accept client connections", sock) + log.G(context.TODO()).Debugf("Unix socket %s doesn't exist. cannot accept client connections", sock) return } - logrus.Errorf("Error accepting connection %v", err) + log.G(context.TODO()).Errorf("Error accepting connection %v", err) continue } go func() { @@ -147,7 +148,7 @@ func (c *Controller) acceptClientConnections(sock string, l net.Listener) { _, err = conn.Write([]byte(ret)) if err != nil { - logrus.Errorf("Error returning to the client %v", err) + log.G(context.TODO()).Errorf("Error returning to the client %v", err) } }() } diff --git a/libnetwork/sandbox_store.go b/libnetwork/sandbox_store.go index fc6b0071ff..7af9aba325 100644 --- a/libnetwork/sandbox_store.go +++ b/libnetwork/sandbox_store.go @@ -1,12 +1,13 @@ package libnetwork import ( + "context" "encoding/json" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/docker/libnetwork/osl" - "github.com/sirupsen/logrus" ) const ( @@ -176,13 +177,13 @@ func (sb *Sandbox) storeDelete() error { func (c *Controller) sandboxCleanup(activeSandboxes map[string]interface{}) { store := c.getStore() if store == nil { - logrus.Error("Could not find local scope store while trying to cleanup sandboxes") + log.G(context.TODO()).Error("Could not find local scope store while trying to cleanup sandboxes") return } kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c}) if err != nil && err != datastore.ErrKeyNotFound { - logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err) + log.G(context.TODO()).Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err) return } @@ -220,7 +221,7 @@ func (c *Controller) sandboxCleanup(activeSandboxes map[string]interface{}) { } sb.osSbox, err = osl.NewSandbox(sb.Key(), create, isRestore) if err != nil { - logrus.Errorf("failed to create osl sandbox while trying to restore sandbox %.7s%s: %v", sb.ID(), msg, err) + log.G(context.TODO()).Errorf("failed to create osl sandbox while trying to restore sandbox %.7s%s: %v", sb.ID(), msg, err) continue } @@ -232,27 +233,27 @@ func (c *Controller) sandboxCleanup(activeSandboxes map[string]interface{}) { n, err := c.getNetworkFromStore(eps.Nid) var ep *Endpoint if err != nil { - logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err) + log.G(context.TODO()).Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err) n = &network{id: eps.Nid, ctrlr: c, drvOnce: &sync.Once{}, persist: true} ep = &Endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} } else { ep, err = n.getEndpointFromStore(eps.Eid) if err != nil { - logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err) + log.G(context.TODO()).Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err) ep = &Endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} } } if _, ok := activeSandboxes[sb.ID()]; ok && err != nil { - logrus.Errorf("failed to restore endpoint %s in %s for container %s due to %v", eps.Eid, eps.Nid, sb.ContainerID(), err) + log.G(context.TODO()).Errorf("failed to restore endpoint %s in %s for container %s due to %v", eps.Eid, eps.Nid, sb.ContainerID(), err) continue } sb.addEndpoint(ep) } if _, ok := activeSandboxes[sb.ID()]; !ok { - logrus.Infof("Removing stale sandbox %s (%s)", sb.id, sb.containerID) + log.G(context.TODO()).Infof("Removing stale sandbox %s (%s)", sb.id, sb.containerID) if err := sb.delete(true); err != nil { - logrus.Errorf("Failed to delete sandbox %s while trying to cleanup: %v", sb.id, err) + log.G(context.TODO()).Errorf("Failed to delete sandbox %s while trying to cleanup: %v", sb.id, err) } continue } @@ -260,7 +261,7 @@ func (c *Controller) sandboxCleanup(activeSandboxes map[string]interface{}) { // reconstruct osl sandbox field if !sb.config.useDefaultSandBox { if err := sb.restoreOslSandbox(); err != nil { - logrus.Errorf("failed to populate fields for osl sandbox %s: %v", sb.ID(), err) + log.G(context.TODO()).Errorf("failed to populate fields for osl sandbox %s: %v", sb.ID(), err) continue } } else { diff --git a/libnetwork/service_common.go b/libnetwork/service_common.go index 9c8b4c3dcf..1ae553610b 100644 --- a/libnetwork/service_common.go +++ b/libnetwork/service_common.go @@ -3,9 +3,10 @@ package libnetwork import ( + "context" "net" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) const maxSetStringLen = 350 @@ -16,7 +17,7 @@ func (c *Controller) addEndpointNameResolution(svcName, svcID, nID, eID, contain return err } - logrus.Debugf("addEndpointNameResolution %s %s add_service:%t sAliases:%v tAliases:%v", eID, svcName, addService, serviceAliases, taskAliases) + log.G(context.TODO()).Debugf("addEndpointNameResolution %s %s add_service:%t sAliases:%v tAliases:%v", eID, svcName, addService, serviceAliases, taskAliases) // Add container resolution mappings if err := c.addContainerNameResolution(nID, eID, containerName, taskAliases, ip, method); err != nil { @@ -58,7 +59,7 @@ func (c *Controller) addContainerNameResolution(nID, eID, containerName string, if err != nil { return err } - logrus.Debugf("addContainerNameResolution %s %s", eID, containerName) + log.G(context.TODO()).Debugf("addContainerNameResolution %s %s", eID, containerName) // Add resolution for container name n.(*network).addSvcRecords(eID, containerName, eID, ip, nil, true, method) @@ -77,11 +78,11 @@ func (c *Controller) deleteEndpointNameResolution(svcName, svcID, nID, eID, cont return err } - logrus.Debugf("deleteEndpointNameResolution %s %s rm_service:%t suppress:%t sAliases:%v tAliases:%v", eID, svcName, rmService, multipleEntries, serviceAliases, taskAliases) + log.G(context.TODO()).Debugf("deleteEndpointNameResolution %s %s rm_service:%t suppress:%t sAliases:%v tAliases:%v", eID, svcName, rmService, multipleEntries, serviceAliases, taskAliases) // Delete container resolution mappings if err := c.delContainerNameResolution(nID, eID, containerName, taskAliases, ip, method); err != nil { - logrus.WithError(err).Warn("Error delting container from resolver") + log.G(context.TODO()).WithError(err).Warn("Error delting container from resolver") } serviceID := svcID @@ -122,7 +123,7 @@ func (c *Controller) delContainerNameResolution(nID, eID, containerName string, if err != nil { return err } - logrus.Debugf("delContainerNameResolution %s %s", eID, containerName) + log.G(context.TODO()).Debugf("delContainerNameResolution %s %s", eID, containerName) // Delete resolution for container name n.(*network).deleteSvcRecords(eID, containerName, eID, ip, nil, true, method) @@ -170,18 +171,18 @@ func (c *Controller) cleanupServiceDiscovery(cleanupNID string) { c.mu.Lock() defer c.mu.Unlock() if cleanupNID == "" { - logrus.Debugf("cleanupServiceDiscovery for all networks") + log.G(context.TODO()).Debugf("cleanupServiceDiscovery for all networks") c.svcRecords = make(map[string]*svcInfo) return } - logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID) + log.G(context.TODO()).Debugf("cleanupServiceDiscovery for network:%s", cleanupNID) delete(c.svcRecords, cleanupNID) } func (c *Controller) cleanupServiceBindings(cleanupNID string) { var cleanupFuncs []func() - logrus.Debugf("cleanupServiceBindings for %s", cleanupNID) + log.G(context.TODO()).Debugf("cleanupServiceBindings for %s", cleanupNID) c.mu.Lock() services := make([]*service, 0, len(c.serviceBindings)) for _, s := range c.serviceBindings { @@ -218,7 +219,7 @@ func makeServiceCleanupFunc(c *Controller, s *service, nID, eID string, vip net. // Balancer bookeeping, is to keep consistent the mapping of endpoint to IP. return func() { if err := c.rmServiceBinding(s.name, s.id, nID, eID, "", vip, s.ingressPorts, s.aliases, []string{}, ip, "cleanupServiceBindings", false, true); err != nil { - logrus.Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v", s.id, nID, eID, err) + log.G(context.TODO()).Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v", s.id, nID, eID, err) } } } @@ -262,7 +263,7 @@ func (c *Controller) addServiceBinding(svcName, svcID, nID, eID, containerName s } s.Unlock() } - logrus.Debugf("addServiceBinding from %s START for %s %s p:%p nid:%s skey:%v", method, svcName, eID, s, nID, skey) + log.G(context.TODO()).Debugf("addServiceBinding from %s START for %s %s p:%p nid:%s skey:%v", method, svcName, eID, s, nID, skey) defer s.Unlock() lb, ok := s.loadBalancers[nID] @@ -294,7 +295,7 @@ func (c *Controller) addServiceBinding(svcName, svcID, nID, eID, containerName s if len(setStr) > maxSetStringLen { setStr = setStr[:maxSetStringLen] } - logrus.Warnf("addServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) + log.G(context.TODO()).Warnf("addServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) } // Add loadbalancer service and backend to the network @@ -305,7 +306,7 @@ func (c *Controller) addServiceBinding(svcName, svcID, nID, eID, containerName s return err } - logrus.Debugf("addServiceBinding from %s END for %s %s", method, svcName, eID) + log.G(context.TODO()).Debugf("addServiceBinding from %s END for %s %s", method, svcName, eID) return nil } @@ -322,22 +323,22 @@ func (c *Controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st s, ok := c.serviceBindings[skey] c.mu.Unlock() if !ok { - logrus.Warnf("rmServiceBinding %s %s %s aborted c.serviceBindings[skey] !ok", method, svcName, eID) + log.G(context.TODO()).Warnf("rmServiceBinding %s %s %s aborted c.serviceBindings[skey] !ok", method, svcName, eID) return nil } s.Lock() defer s.Unlock() - logrus.Debugf("rmServiceBinding from %s START for %s %s p:%p nid:%s sKey:%v deleteSvc:%t", method, svcName, eID, s, nID, skey, deleteSvcRecords) + log.G(context.TODO()).Debugf("rmServiceBinding from %s START for %s %s p:%p nid:%s sKey:%v deleteSvc:%t", method, svcName, eID, s, nID, skey, deleteSvcRecords) lb, ok := s.loadBalancers[nID] if !ok { - logrus.Warnf("rmServiceBinding %s %s %s aborted s.loadBalancers[nid] !ok", method, svcName, eID) + log.G(context.TODO()).Warnf("rmServiceBinding %s %s %s aborted s.loadBalancers[nid] !ok", method, svcName, eID) return nil } be, ok := lb.backEnds[eID] if !ok { - logrus.Warnf("rmServiceBinding %s %s %s aborted lb.backEnds[eid] && lb.disabled[eid] !ok", method, svcName, eID) + log.G(context.TODO()).Warnf("rmServiceBinding %s %s %s aborted lb.backEnds[eid] && lb.disabled[eid] !ok", method, svcName, eID) return nil } @@ -355,7 +356,7 @@ func (c *Controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st rmService = true delete(s.loadBalancers, nID) - logrus.Debugf("rmServiceBinding %s delete %s, p:%p in loadbalancers len:%d", eID, nID, lb, len(s.loadBalancers)) + log.G(context.TODO()).Debugf("rmServiceBinding %s delete %s, p:%p in loadbalancers len:%d", eID, nID, lb, len(s.loadBalancers)) } ok, entries := s.removeIPToEndpoint(ip.String(), eID) @@ -364,7 +365,7 @@ func (c *Controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st if len(setStr) > maxSetStringLen { setStr = setStr[:maxSetStringLen] } - logrus.Warnf("rmServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) + log.G(context.TODO()).Warnf("rmServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) } // Remove loadbalancer service(if needed) and backend in all @@ -405,6 +406,6 @@ func (c *Controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st c.mu.Unlock() } - logrus.Debugf("rmServiceBinding from %s END for %s %s", method, svcName, eID) + log.G(context.TODO()).Debugf("rmServiceBinding from %s END for %s %s", method, svcName, eID) return nil } diff --git a/libnetwork/service_linux.go b/libnetwork/service_linux.go index 07f7e60ee2..2fcd5487c9 100644 --- a/libnetwork/service_linux.go +++ b/libnetwork/service_linux.go @@ -1,6 +1,7 @@ package libnetwork import ( + "context" "fmt" "io" "net" @@ -11,11 +12,11 @@ import ( "sync" "syscall" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/iptables" "github.com/docker/docker/libnetwork/ns" "github.com/ishidawataru/sctp" "github.com/moby/ipvs" - "github.com/sirupsen/logrus" "github.com/vishvananda/netlink/nl" ) @@ -32,7 +33,7 @@ func (sb *Sandbox) populateLoadBalancers(ep *Endpoint) { if n.ingress { if err := sb.addRedirectRules(eIP, ep.ingressPorts); err != nil { - logrus.Errorf("Failed to add redirect rules for ep %s (%.7s): %v", ep.Name(), ep.ID(), err) + log.G(context.TODO()).Errorf("Failed to add redirect rules for ep %s (%.7s): %v", ep.Name(), ep.ID(), err) } } } @@ -84,7 +85,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { } ep, sb, err := n.findLBEndpointSandbox() if err != nil { - logrus.Errorf("addLBBackend %s/%s: %v", n.ID(), n.Name(), err) + log.G(context.TODO()).Errorf("addLBBackend %s/%s: %v", n.ID(), n.Name(), err) return } if sb.osSbox == nil { @@ -95,7 +96,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { i, err := ipvs.New(sb.Key()) if err != nil { - logrus.Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb addition: %v", sb.ID(), sb.ContainerID(), sb.Key(), err) + log.G(context.TODO()).Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb addition: %v", sb.ID(), sb.ContainerID(), sb.Key(), err) return } defer i.Close() @@ -110,12 +111,12 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { // Add IP alias for the VIP to the endpoint ifName := findIfaceDstName(sb, ep) if ifName == "" { - logrus.Errorf("Failed find interface name for endpoint %s(%s) to create LB alias", ep.ID(), ep.Name()) + log.G(context.TODO()).Errorf("Failed find interface name for endpoint %s(%s) to create LB alias", ep.ID(), ep.Name()) return } err := sb.osSbox.AddAliasIP(ifName, &net.IPNet{IP: lb.vip, Mask: net.CIDRMask(32, 32)}) if err != nil { - logrus.Errorf("Failed add IP alias %s to network %s LB endpoint interface %s: %v", lb.vip, n.ID(), ifName, err) + log.G(context.TODO()).Errorf("Failed add IP alias %s to network %s LB endpoint interface %s: %v", lb.vip, n.ID(), ifName, err) return } @@ -125,19 +126,19 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { gwIP = ep.Iface().Address().IP } if err := programIngress(gwIP, lb.service.ingressPorts, false); err != nil { - logrus.Errorf("Failed to add ingress: %v", err) + log.G(context.TODO()).Errorf("Failed to add ingress: %v", err) return } } - logrus.Debugf("Creating service for vip %s fwMark %d ingressPorts %#v in sbox %.7s (%.7s)", lb.vip, lb.fwMark, lb.service.ingressPorts, sb.ID(), sb.ContainerID()) + log.G(context.TODO()).Debugf("Creating service for vip %s fwMark %d ingressPorts %#v in sbox %.7s (%.7s)", lb.vip, lb.fwMark, lb.service.ingressPorts, sb.ID(), sb.ContainerID()) if err := sb.configureFWMark(lb.vip, lb.fwMark, lb.service.ingressPorts, eIP, false, n.loadBalancerMode); err != nil { - logrus.Errorf("Failed to add firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to add firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err) return } if err := i.NewService(s); err != nil && err != syscall.EEXIST { - logrus.Errorf("Failed to create a new service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to create a new service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) return } } @@ -155,7 +156,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { // destination. s.SchedName = "" if err := i.NewDestination(s, d); err != nil && err != syscall.EEXIST { - logrus.Errorf("Failed to create real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to create real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) } // Ensure that kernel tweaks are applied in case this is the first time @@ -173,7 +174,7 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR } ep, sb, err := n.findLBEndpointSandbox() if err != nil { - logrus.Debugf("rmLBBackend for %s/%s: %v -- probably transient state", n.ID(), n.Name(), err) + log.G(context.TODO()).Debugf("rmLBBackend for %s/%s: %v -- probably transient state", n.ID(), n.Name(), err) return } if sb.osSbox == nil { @@ -184,7 +185,7 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR i, err := ipvs.New(sb.Key()) if err != nil { - logrus.Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb removal: %v", sb.ID(), sb.ContainerID(), sb.Key(), err) + log.G(context.TODO()).Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb removal: %v", sb.ID(), sb.ContainerID(), sb.Key(), err) return } defer i.Close() @@ -205,19 +206,19 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR if fullRemove { if err := i.DelDestination(s, d); err != nil && err != syscall.ENOENT { - logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to delete real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) } } else { d.Weight = 0 if err := i.UpdateDestination(s, d); err != nil && err != syscall.ENOENT { - logrus.Errorf("Failed to set LB weight of real server %s to 0 for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to set LB weight of real server %s to 0 for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) } } if rmService { s.SchedName = ipvs.RoundRobin if err := i.DelService(s); err != nil && err != syscall.ENOENT { - logrus.Errorf("Failed to delete service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to delete service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err) } if sb.ingress { @@ -226,23 +227,23 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR gwIP = ep.Iface().Address().IP } if err := programIngress(gwIP, lb.service.ingressPorts, true); err != nil { - logrus.Errorf("Failed to delete ingress: %v", err) + log.G(context.TODO()).Errorf("Failed to delete ingress: %v", err) } } if err := sb.configureFWMark(lb.vip, lb.fwMark, lb.service.ingressPorts, eIP, true, n.loadBalancerMode); err != nil { - logrus.Errorf("Failed to delete firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err) + log.G(context.TODO()).Errorf("Failed to delete firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err) } // Remove IP alias from the VIP to the endpoint ifName := findIfaceDstName(sb, ep) if ifName == "" { - logrus.Errorf("Failed find interface name for endpoint %s(%s) to create LB alias", ep.ID(), ep.Name()) + log.G(context.TODO()).Errorf("Failed find interface name for endpoint %s(%s) to create LB alias", ep.ID(), ep.Name()) return } err := sb.osSbox.RemoveAliasIP(ifName, &net.IPNet{IP: lb.vip, Mask: net.CIDRMask(32, 32)}) if err != nil { - logrus.Errorf("Failed add IP alias %s to network %s LB endpoint interface %s: %v", lb.vip, n.ID(), ifName, err) + log.G(context.TODO()).Errorf("Failed add IP alias %s to network %s LB endpoint interface %s: %v", lb.vip, n.ID(), ifName, err) } } } @@ -316,12 +317,12 @@ func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) erro // exists. It might contain stale rules from previous life. if chainExists { if err := iptable.RawCombinedOutput("-t", "nat", "-F", ingressChain); err != nil { - logrus.Errorf("Could not flush nat table ingress chain rules during init: %v", err) + log.G(context.TODO()).Errorf("Could not flush nat table ingress chain rules during init: %v", err) } } if filterChainExists { if err := iptable.RawCombinedOutput("-F", ingressChain); err != nil { - logrus.Errorf("Could not flush filter table ingress chain rules during init: %v", err) + log.G(context.TODO()).Errorf("Could not flush filter table ingress chain rules during init: %v", err) } } }) @@ -392,7 +393,7 @@ func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) erro filterPortConfigs(filteredPorts, !isDelete) for _, rule := range rollbackRules { if err := iptable.RawCombinedOutput(rule...); err != nil { - logrus.Warnf("roll back rule failed, %v: %v", rule, err) + log.G(context.TODO()).Warnf("roll back rule failed, %v: %v", rule, err) } } } @@ -412,7 +413,7 @@ func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) erro if !isDelete { return err } - logrus.Info(err) + log.G(context.TODO()).Info(err) } rollbackRule := []string{"-t", "nat", rollbackAddDelOpt, ingressChain, "-p", protocol, "--dport", publishedPort, "-j", "DNAT", "--to-destination", destination} rollbackRules = append(rollbackRules, rollbackRule) @@ -427,7 +428,7 @@ func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) erro if !isDelete { return err } - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } rollbackRule := []string{rollbackAddDelOpt, ingressChain, "-m", "state", "-p", protocol, "--sport", publishedPort, "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"} rollbackRules = append(rollbackRules, rollbackRule) @@ -438,13 +439,13 @@ func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) erro if !isDelete { return err } - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } rollbackRule = []string{rollbackAddDelOpt, ingressChain, "-p", protocol, "--dport", publishedPort, "-j", "ACCEPT"} rollbackRules = append(rollbackRules, rollbackRule) if err := plumbProxy(iPort, isDelete); err != nil { - logrus.Warnf("failed to create proxy for port %s: %v", publishedPort, err) + log.G(context.TODO()).Warnf("failed to create proxy for port %s: %v", publishedPort, err) } } @@ -462,11 +463,11 @@ func arrangeIngressFilterRule() { if iptable.ExistChain(ingressChain, iptables.Filter) { if iptable.Exists(iptables.Filter, "FORWARD", "-j", ingressChain) { if err := iptable.RawCombinedOutput("-D", "FORWARD", "-j", ingressChain); err != nil { - logrus.Warnf("failed to delete jump rule to ingressChain in filter table: %v", err) + log.G(context.TODO()).Warnf("failed to delete jump rule to ingressChain in filter table: %v", err) } } if err := iptable.RawCombinedOutput("-I", "FORWARD", "-j", ingressChain); err != nil { - logrus.Warnf("failed to add jump rule to ingressChain in filter table: %v", err) + log.G(context.TODO()).Warnf("failed to add jump rule to ingressChain in filter table: %v", err) } } } diff --git a/libnetwork/service_windows.go b/libnetwork/service_windows.go index af8475a950..123d162ddc 100644 --- a/libnetwork/service_windows.go +++ b/libnetwork/service_windows.go @@ -1,10 +1,11 @@ package libnetwork import ( + "context" "net" "github.com/Microsoft/hcsshim" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) type policyLists struct { @@ -38,7 +39,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { } if sourceVIP == "" { - logrus.Errorf("Failed to find load balancer IP for network %s", n.Name()) + log.G(context.TODO()).Errorf("Failed to find load balancer IP for network %s", n.Name()) return } @@ -51,7 +52,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { //Call HNS to get back ID (GUID) corresponding to the endpoint. hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid) if err != nil { - logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err) + log.G(context.TODO()).Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err) return } @@ -74,7 +75,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0) if err != nil { - logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v", + log.G(context.TODO()).Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v", lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err) return } @@ -109,7 +110,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) { lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort)) if err != nil { - logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v", + log.G(context.TODO()).Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v", lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err) return } @@ -127,26 +128,26 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR } else { lb.Lock() defer lb.Unlock() - logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String()) + log.G(context.TODO()).Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String()) if policyLists, ok := lbPolicylistMap[lb]; ok { if policyLists.ilb != nil { if _, err := policyLists.ilb.Delete(); err != nil { - logrus.Errorf("Failed to remove HNS ILB policylist %s: %s", policyLists.ilb.ID, err) + log.G(context.TODO()).Errorf("Failed to remove HNS ILB policylist %s: %s", policyLists.ilb.ID, err) } policyLists.ilb = nil } if policyLists.elb != nil { if _, err := policyLists.elb.Delete(); err != nil { - logrus.Errorf("Failed to remove HNS ELB policylist %s: %s", policyLists.elb.ID, err) + log.G(context.TODO()).Errorf("Failed to remove HNS ELB policylist %s: %s", policyLists.elb.ID, err) } policyLists.elb = nil } delete(lbPolicylistMap, lb) } else { - logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String()) + log.G(context.TODO()).Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String()) } } } diff --git a/libnetwork/store.go b/libnetwork/store.go index f64924d614..0eafad4065 100644 --- a/libnetwork/store.go +++ b/libnetwork/store.go @@ -1,12 +1,13 @@ package libnetwork import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/libnetwork/datastore" "github.com/docker/libkv/store/boltdb" - "github.com/sirupsen/logrus" ) func registerKVStores() { @@ -75,7 +76,7 @@ func (c *Controller) getNetworks() ([]*network, error) { ec := &endpointCnt{n: n} err = store.GetObject(datastore.Key(ec.Key()...), ec) if err != nil && !n.inDelete { - logrus.Warnf("Could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err) + log.G(context.TODO()).Warnf("Could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err) continue } @@ -96,14 +97,14 @@ func (c *Controller) getNetworksFromStore() []*network { // FIXME: unify with c. kvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix), &network{ctrlr: c}) if err != nil { if err != datastore.ErrKeyNotFound { - logrus.Debugf("failed to get networks from store: %v", err) + log.G(context.TODO()).Debugf("failed to get networks from store: %v", err) } return nil } kvep, err := store.Map(datastore.Key(epCntKeyPrefix), &endpointCnt{}) if err != nil && err != datastore.ErrKeyNotFound { - logrus.Warnf("failed to get endpoint_count map from store: %v", err) + log.G(context.TODO()).Warnf("failed to get endpoint_count map from store: %v", err) } for _, kvo := range kvol { @@ -187,7 +188,7 @@ retry: if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) } - logrus.Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key()) + log.G(context.TODO()).Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key()) goto retry } return err @@ -232,7 +233,7 @@ func (c *Controller) networkWatchLoop(nw *netWatch, ep *Endpoint, ecCh <-chan da epl, err := ec.n.getEndpointsFromStore() if err != nil { - logrus.WithError(err).Debug("error getting endpoints from store") + log.G(context.TODO()).WithError(err).Debug("error getting endpoints from store") continue } @@ -342,7 +343,7 @@ func (c *Controller) processEndpointCreate(nmap map[string]*netWatch, ep *Endpoi ch, err := store.Watch(n.getEpCnt(), nw.stopCh) if err != nil { - logrus.Warnf("Error creating watch for network: %v", err) + log.G(context.TODO()).Warnf("Error creating watch for network: %v", err) return } @@ -409,9 +410,9 @@ func (c *Controller) startWatch() { func (c *Controller) networkCleanup() { for _, n := range c.getNetworksFromStore() { if n.inDelete { - logrus.Infof("Removing stale network %s (%s)", n.Name(), n.ID()) + log.G(context.TODO()).Infof("Removing stale network %s (%s)", n.Name(), n.ID()) if err := n.delete(true, true); err != nil { - logrus.Debugf("Error while removing stale network: %v", err) + log.G(context.TODO()).Debugf("Error while removing stale network: %v", err) } } } @@ -420,7 +421,7 @@ func (c *Controller) networkCleanup() { var populateSpecial NetworkWalker = func(nw Network) bool { if n := nw.(*network); n.hasSpecialDriver() && !n.ConfigOnly() { if err := n.getController().addNetwork(n); err != nil { - logrus.Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type()) + log.G(context.TODO()).Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type()) } } return false diff --git a/oci/caps/utils_linux.go b/oci/caps/utils_linux.go index 06dc3410fc..ad9c015246 100644 --- a/oci/caps/utils_linux.go +++ b/oci/caps/utils_linux.go @@ -1,9 +1,10 @@ package caps // import "github.com/docker/docker/oci/caps" import ( + "context" "sync" + "github.com/containerd/containerd/log" ccaps "github.com/containerd/containerd/pkg/cap" - "github.com/sirupsen/logrus" ) var initCapsOnce sync.Once @@ -13,7 +14,7 @@ func initCaps() { rawCaps := ccaps.Known() curCaps, err := ccaps.Current() if err != nil { - logrus.WithError(err).Error("failed to get capabilities from current environment") + log.G(context.TODO()).WithError(err).Error("failed to get capabilities from current environment") allCaps = rawCaps } else { allCaps = curCaps diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index d44920ba4f..e213837490 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -20,6 +20,7 @@ import ( "syscall" "time" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" @@ -205,21 +206,21 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { - logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { - logrus.Debugf("unpigz binary not found, falling back to go gzip library") + log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } - logrus.Debugf("Using %s to decompress", unpigzPath) + log.G(ctx).Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } @@ -754,7 +755,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o } case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") + log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") return nil default: @@ -789,7 +790,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o } if len(xattrErrs) > 0 { - logrus.WithFields(logrus.Fields{ + log.G(context.TODO()).WithFields(logrus.Fields{ "errors": xattrErrs, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } @@ -907,13 +908,13 @@ func (t *Tarballer) Do() { defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) + log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) } if err := t.compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) + log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) } if err := t.pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) + log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) } }() @@ -936,7 +937,7 @@ func (t *Tarballer) Do() { // directory. So, we must split the source path and use the // basename as the include. if len(t.options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") + log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(t.srcPath) @@ -961,7 +962,7 @@ func (t *Tarballer) Do() { walkRoot := getWalkRoot(t.srcPath, include) filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) + log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) return nil } @@ -1000,7 +1001,7 @@ func (t *Tarballer) Do() { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) + log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) return err } @@ -1061,7 +1062,7 @@ func (t *Tarballer) Do() { } if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err @@ -1098,7 +1099,7 @@ loop: // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { - logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 7f7242be50..6a3494491b 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -3,6 +3,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" + "context" "fmt" "io" "os" @@ -12,10 +13,10 @@ import ( "syscall" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // ChangeType represents the change type. @@ -371,7 +372,7 @@ func ChangesSize(newDir string, changes []Change) int64 { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) + log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) continue } @@ -420,22 +421,22 @@ func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) + log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) + log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) + log.G(context.TODO()).Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) + log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) } }() return reader, nil diff --git a/pkg/archive/copy.go b/pkg/archive/copy.go index 0ea1596278..7a5a16c821 100644 --- a/pkg/archive/copy.go +++ b/pkg/archive/copy.go @@ -2,14 +2,15 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "errors" "io" "os" "path/filepath" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // Errors used or returned by this file. @@ -107,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er sourceDir, sourceBase := SplitPathDirEntry(sourcePath) opts := TarResourceRebaseOpts(sourceBase, rebaseName) - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, opts) } diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index 92c1361478..2acd4ccc76 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -2,6 +2,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "fmt" "io" "os" @@ -9,9 +10,9 @@ import ( "runtime" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -67,7 +68,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go index 73caff86f4..e0ae3a74e8 100644 --- a/pkg/archive/example_changes.go +++ b/pkg/archive/example_changes.go @@ -13,14 +13,14 @@ import ( "path" "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) var ( flDebug = flag.Bool("D", false, "debugging output") flNewDir = flag.String("newdir", "", "") flOldDir = flag.String("olddir", "", "") - log = logrus.New() + log = log.G(ctx).New() ) func main() { @@ -32,7 +32,7 @@ func main() { flag.Parse() log.Out = os.Stderr if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) + log.G(ctx).SetLevel(logrus.DebugLevel) } var newDir, oldDir string diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go index 590ac8dddd..f1680c89ad 100644 --- a/pkg/authorization/authz.go +++ b/pkg/authorization/authz.go @@ -3,14 +3,15 @@ package authorization // import "github.com/docker/docker/pkg/authorization" import ( "bufio" "bytes" + "context" "fmt" "io" "mime" "net/http" "strings" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" ) const maxBodySize = 1048576 // 1MB @@ -85,7 +86,7 @@ func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { } for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + log.G(context.TODO()).Debugf("AuthZ request using plugin %s", plugin.Name()) authRes, err := plugin.AuthZRequest(ctx.authReq) if err != nil { @@ -110,7 +111,7 @@ func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { } for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + log.G(context.TODO()).Debugf("AuthZ response using plugin %s", plugin.Name()) authRes, err := plugin.AuthZResponse(ctx.authReq) if err != nil { @@ -135,7 +136,7 @@ func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { data, err := bufReader.Peek(maxBodySize) // Body size exceeds max body size if err == nil { - logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + log.G(context.TODO()).Warnf("Request body is larger than: '%d' skipping body", maxBodySize) return nil, newBody, nil } // Body size is less than maximum size diff --git a/pkg/authorization/middleware.go b/pkg/authorization/middleware.go index 39c2dce856..e39698ae76 100644 --- a/pkg/authorization/middleware.go +++ b/pkg/authorization/middleware.go @@ -5,8 +5,8 @@ import ( "net/http" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/plugingetter" - "github.com/sirupsen/logrus" ) // Middleware uses a list of plugins to @@ -74,7 +74,7 @@ func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.Respon authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) if err := authCtx.AuthZRequest(w, r); err != nil { - logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + log.G(ctx).Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } @@ -83,21 +83,21 @@ func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.Respon var errD error if errD = handler(ctx, rw, r, vars); errD != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + log.G(ctx).Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) } // There's a chance that the authCtx.plugins was updated. One of the reasons // this can happen is when an authzplugin is disabled. plugins = m.getAuthzPlugins() if len(plugins) == 0 { - logrus.Debug("There are no authz plugins in the chain") + log.G(ctx).Debug("There are no authz plugins in the chain") return nil } authCtx.plugins = plugins if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { - logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + log.G(ctx).Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } diff --git a/pkg/authorization/response.go b/pkg/authorization/response.go index c9e9a2c85f..735b397e7a 100644 --- a/pkg/authorization/response.go +++ b/pkg/authorization/response.go @@ -3,12 +3,13 @@ package authorization // import "github.com/docker/docker/pkg/authorization" import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "net" "net/http" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) // ResponseModifier allows authorization plugins to read and modify the content of the http.response @@ -155,7 +156,7 @@ func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { func (rm *responseModifier) Flush() { flusher, ok := rm.rw.(http.Flusher) if !ok { - logrus.Error("Internal response writer doesn't support the Flusher interface") + log.G(context.TODO()).Error("Internal response writer doesn't support the Flusher interface") return } diff --git a/pkg/fileutils/fileutils_unix.go b/pkg/fileutils/fileutils_unix.go index fbdbfd04a2..ab8e03a9a0 100644 --- a/pkg/fileutils/fileutils_unix.go +++ b/pkg/fileutils/fileutils_unix.go @@ -3,17 +3,18 @@ package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( + "context" "fmt" "os" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + log.G(context.TODO()).Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } diff --git a/pkg/loopback/attach_loopback.go b/pkg/loopback/attach_loopback.go index 11de15236d..035fe5edf6 100644 --- a/pkg/loopback/attach_loopback.go +++ b/pkg/loopback/attach_loopback.go @@ -4,11 +4,12 @@ package loopback // import "github.com/docker/docker/pkg/loopback" import ( + "context" "errors" "fmt" "os" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "golang.org/x/sys/unix" ) @@ -43,20 +44,20 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { - logrus.Error("There are no more loopback devices available.") + log.G(context.TODO()).Error("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { - logrus.Errorf("Loopback device %s is not a block device.", target) + log.G(context.TODO()).Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { - logrus.Errorf("Error opening loopback device: %s", err) + log.G(context.TODO()).Errorf("Error opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } @@ -66,7 +67,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // If the error is EBUSY, then try the next loopback if err != unix.EBUSY { - logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + log.G(context.TODO()).Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } @@ -79,7 +80,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // This can't happen, but let's be sure if loopFile == nil { - logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + log.G(context.TODO()).Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } @@ -94,13 +95,13 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { - logrus.Debugf("Error retrieving the next available loopback: %s", err) + log.G(context.TODO()).Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { - logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + log.G(context.TODO()).Errorf("Error opening sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() @@ -118,11 +119,11 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { } if err = unix.IoctlLoopSetStatus64(int(loopFile.Fd()), loopInfo); err != nil { - logrus.Errorf("Cannot set up loopback device info: %s", err) + log.G(context.TODO()).Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err = unix.IoctlSetInt(int(loopFile.Fd()), unix.LOOP_CLR_FD, 0); err != nil { - logrus.Error("Error while cleaning up the loopback device") + log.G(context.TODO()).Error("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice diff --git a/pkg/loopback/loopback.go b/pkg/loopback/loopback.go index dc3fc4ae9c..7e7c3b96f3 100644 --- a/pkg/loopback/loopback.go +++ b/pkg/loopback/loopback.go @@ -4,17 +4,18 @@ package loopback // import "github.com/docker/docker/pkg/loopback" import ( + "context" "fmt" "os" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" "golang.org/x/sys/unix" ) func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := unix.IoctlLoopGetStatus64(int(file.Fd())) if err != nil { - logrus.Errorf("Error get loopback backing file: %s", err) + log.G(context.TODO()).Errorf("Error get loopback backing file: %s", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.Device, loopInfo.Inode, nil @@ -23,7 +24,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { // SetCapacity reloads the size for the loopback device. func SetCapacity(file *os.File) error { if err := unix.IoctlSetInt(int(file.Fd()), unix.LOOP_SET_CAPACITY, 0); err != nil { - logrus.Errorf("Error loopbackSetCapacity: %s", err) + log.G(context.TODO()).Errorf("Error loopbackSetCapacity: %s", err) return ErrSetCapacity } return nil diff --git a/pkg/parsers/kernel/kernel_unix.go b/pkg/parsers/kernel/kernel_unix.go index 610b3184a6..f8657535f5 100644 --- a/pkg/parsers/kernel/kernel_unix.go +++ b/pkg/parsers/kernel/kernel_unix.go @@ -3,7 +3,9 @@ package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( - "github.com/sirupsen/logrus" + "context" + + "github.com/containerd/containerd/log" "golang.org/x/sys/unix" ) @@ -22,7 +24,7 @@ func GetKernelVersion() (*VersionInfo, error) { // the given version. func CheckKernelVersion(k, major, minor int) bool { if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) + log.G(context.TODO()).Warnf("error getting kernel version: %s", err) } else { if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { return false diff --git a/pkg/platform/platform.go b/pkg/platform/platform.go index 380cfd160e..875c5fe20f 100644 --- a/pkg/platform/platform.go +++ b/pkg/platform/platform.go @@ -3,7 +3,9 @@ package platform // import "github.com/docker/docker/pkg/platform" import ( - "github.com/sirupsen/logrus" + "context" + + "github.com/containerd/containerd/log" ) // Architecture holds the runtime architecture of the process. @@ -19,6 +21,6 @@ func init() { var err error Architecture, err = runtimeArchitecture() if err != nil { - logrus.WithError(err).Error("Could not read system architecture info") + log.G(context.TODO()).WithError(err).Error("Could not read system architecture info") } } diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go index 752fecd0ae..91303ab5cd 100644 --- a/pkg/plugins/client.go +++ b/pkg/plugins/client.go @@ -9,11 +9,11 @@ import ( "net/url" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) const ( @@ -116,7 +116,7 @@ func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret int defer body.Close() if ret != nil { if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + log.G(context.TODO()).Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } } @@ -140,7 +140,7 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) } defer body.Close() if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + log.G(context.TODO()).Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } return nil @@ -180,7 +180,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, return nil, err } retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + log.G(context.TODO()).Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) time.Sleep(timeOff) continue } diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go index 7d9c5eac22..cc1f55b90f 100644 --- a/pkg/plugins/plugins.go +++ b/pkg/plugins/plugins.go @@ -23,13 +23,14 @@ package plugins // import "github.com/docker/docker/pkg/plugins" import ( + "context" "errors" "fmt" "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) // ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. @@ -217,7 +218,7 @@ func loadWithRetry(name string, retry bool) (*Plugin, error) { return nil, err } retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + log.G(context.TODO()).Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) time.Sleep(timeOff) continue } @@ -262,7 +263,7 @@ func Get(name, imp string) (*Plugin, error) { return nil, err } if err := pl.waitActive(); err == nil && pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) + log.G(context.TODO()).Debugf("%s implements: %s", name, imp) return pl, nil } return nil, fmt.Errorf("%w: plugin=%q, requested implementation=%q", ErrNotImplements, name, imp) @@ -329,7 +330,7 @@ func (l *LocalRegistry) GetAll(imp string) ([]*Plugin, error) { var out []*Plugin for pl := range chPl { if pl.err != nil { - logrus.Error(pl.err) + log.G(context.TODO()).Error(pl.err) continue } if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { diff --git a/pkg/rootless/specconv/specconv_linux.go b/pkg/rootless/specconv/specconv_linux.go index 06f55ef13d..d66b67f381 100644 --- a/pkg/rootless/specconv/specconv_linux.go +++ b/pkg/rootless/specconv/specconv_linux.go @@ -1,6 +1,7 @@ package specconv // import "github.com/docker/docker/pkg/rootless/specconv" import ( + "context" "fmt" "os" "path" @@ -8,8 +9,8 @@ import ( "strconv" "strings" + "github.com/containerd/containerd/log" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" ) // ToRootless converts spec to be compatible with "rootless" runc. @@ -26,13 +27,13 @@ func ToRootless(spec *specs.Spec, v2Controllers []string) error { func getCurrentOOMScoreAdj() int { b, err := os.ReadFile("/proc/self/oom_score_adj") if err != nil { - logrus.WithError(err).Warn("failed to read /proc/self/oom_score_adj") + log.G(context.TODO()).WithError(err).Warn("failed to read /proc/self/oom_score_adj") return 0 } s := string(b) i, err := strconv.Atoi(strings.TrimSpace(s)) if err != nil { - logrus.WithError(err).Warnf("failed to parse /proc/self/oom_score_adj (%q)", s) + log.G(context.TODO()).WithError(err).Warnf("failed to parse /proc/self/oom_score_adj (%q)", s) return 0 } return i diff --git a/pkg/sysinfo/cgroup2_linux.go b/pkg/sysinfo/cgroup2_linux.go index 151179ca32..c2b60f3d49 100644 --- a/pkg/sysinfo/cgroup2_linux.go +++ b/pkg/sysinfo/cgroup2_linux.go @@ -1,14 +1,15 @@ package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( + "context" "os" "path" "strings" "github.com/containerd/cgroups/v3" cgroupsV2 "github.com/containerd/cgroups/v3/cgroup2" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" - "github.com/sirupsen/logrus" ) func newV2(options ...Opt) *SysInfo { @@ -29,12 +30,12 @@ func newV2(options ...Opt) *SysInfo { m, err := cgroupsV2.Load(sysInfo.cg2GroupPath) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } else { sysInfo.cg2Controllers = make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } for _, c := range controllers { sysInfo.cg2Controllers[c] = struct{}{} diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go index 215d7ff302..377615c0ca 100644 --- a/pkg/sysinfo/sysinfo_linux.go +++ b/pkg/sysinfo/sysinfo_linux.go @@ -1,6 +1,7 @@ package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( + "context" "fmt" "os" "path" @@ -9,9 +10,9 @@ import ( "github.com/containerd/cgroups/v3" "github.com/containerd/cgroups/v3/cgroup1" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/seccomp" "github.com/moby/sys/mountinfo" - "github.com/sirupsen/logrus" ) var ( @@ -107,7 +108,7 @@ func newV1() *SysInfo { sysInfo.cgMounts, err = findCgroupV1Mountpoints() if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } else { ops = append(ops, applyMemoryCgroupInfo, diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go index cdc2831a1f..b01276f2fe 100644 --- a/plugin/backend_linux.go +++ b/plugin/backend_linux.go @@ -16,6 +16,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" @@ -37,7 +38,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var acceptedPluginFilterTags = map[string]bool{ @@ -384,7 +384,7 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header defer waitProgress() progressHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - logrus.WithField("mediaType", desc.MediaType).WithField("digest", desc.Digest.String()).Debug("Preparing to push plugin layer") + log.G(ctx).WithField("mediaType", desc.MediaType).WithField("digest", desc.Digest.String()).Debug("Preparing to push plugin layer") id := stringid.TruncateID(desc.Digest.String()) pj.add(remotes.MakeRefKey(ctx, desc), id) progress.Update(out, id, "Preparing") @@ -434,14 +434,14 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header if resolver != nil { pusher, _ := resolver.Pusher(ctx, ref.String()) if pusher != nil { - logrus.WithField("ref", ref).Debug("Re-attmpting push with http-fallback") + log.G(ctx).WithField("ref", ref).Debug("Re-attmpting push with http-fallback") err2 := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, nil, func(h images.Handler) images.Handler { return images.Handlers(progressHandler, h) }) if err2 == nil { err = nil } else { - logrus.WithError(err2).WithField("ref", ref).Debug("Error while attempting push with http-fallback") + log.G(ctx).WithError(err2).WithField("ref", ref).Debug("Error while attempting push with http-fallback") } } } @@ -505,7 +505,7 @@ func buildManifest(ctx context.Context, s content.Manager, config digest.Digest, // getManifestDescriptor gets the OCI descriptor for a manifest // It will generate a manifest if one does not exist func (pm *Manager) getManifestDescriptor(ctx context.Context, p *v2.Plugin) (ocispec.Descriptor, error) { - logger := logrus.WithField("plugin", p.Name()).WithField("digest", p.Manifest) + logger := log.G(ctx).WithField("plugin", p.Name()).WithField("digest", p.Manifest) if p.Manifest != "" { info, err := pm.blobStore.Info(ctx, p.Manifest) if err == nil { @@ -579,7 +579,7 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { if p.IsEnabled() { if err := pm.disable(p, c); err != nil { - logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + log.G(context.TODO()).Errorf("failed to disable plugin '%s': %s", p.Name(), err) } } diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go index cb874cab38..a9e939255d 100644 --- a/plugin/executor/containerd/containerd.go +++ b/plugin/executor/containerd/containerd.go @@ -9,6 +9,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/libcontainerd" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" @@ -75,7 +76,7 @@ func (p c8dPlugin) deleteTaskAndContainer(ctx context.Context) { // Create creates a new container func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { ctx := context.Background() - log := logrus.WithField("plugin", id) + log := log.G(ctx).WithField("plugin", id) ctr, err := libcontainerd.ReplaceContainer(ctx, e.client, id, &spec, e.shim, e.shimOpts) if err != nil { return errors.Wrap(err, "error creating containerd container for plugin") @@ -96,7 +97,7 @@ func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteClo // Restore restores a container func (e *Executor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) { ctx := context.Background() - p := c8dPlugin{log: logrus.WithField("plugin", id)} + p := c8dPlugin{log: log.G(ctx).WithField("plugin", id)} ctr, err := e.client.LoadContainer(ctx, id) if err != nil { if errdefs.IsNotFound(err) { @@ -164,7 +165,7 @@ func (e *Executor) ProcessEvent(id string, et libcontainerdtypes.EventType, ei l p := e.plugins[id] e.mu.Unlock() if p == nil { - logrus.WithField("id", id).Warn("Received exit event for an unknown plugin") + log.G(context.TODO()).WithField("id", id).Warn("Received exit event for an unknown plugin") } else { p.deleteTaskAndContainer(context.Background()) } diff --git a/plugin/fetch_linux.go b/plugin/fetch_linux.go index bfad62c315..35bd2719be 100644 --- a/plugin/fetch_linux.go +++ b/plugin/fetch_linux.go @@ -9,6 +9,7 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" @@ -21,7 +22,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const mediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" @@ -84,7 +84,7 @@ func (pm *Manager) fetch(ctx context.Context, ref reference.Named, auth *registr // This is perfectly fine, unless you are talking to an older registry which does not split the comma separated list, // so it is never able to match a media type and it falls back to schema1 (yuck) and fails because our manifest the // fallback does not support plugin configs... - logrus.WithError(err).WithField("ref", withDomain).Debug("Error while resolving reference, falling back to backwards compatible accept header format") + log.G(ctx).WithError(err).WithField("ref", withDomain).Debug("Error while resolving reference, falling back to backwards compatible accept header format") headers := http.Header{} headers.Add("Accept", images.MediaTypeDockerSchema2Manifest) headers.Add("Accept", images.MediaTypeDockerSchema2ManifestList) @@ -94,7 +94,7 @@ func (pm *Manager) fetch(ctx context.Context, ref reference.Named, auth *registr if resolver != nil { resolved, desc, err = resolver.Resolve(ctx, withDomain.String()) if err != nil { - logrus.WithError(err).WithField("ref", withDomain).Debug("Failed to resolve reference after falling back to backwards compatible accept header format") + log.G(ctx).WithError(err).WithField("ref", withDomain).Debug("Failed to resolve reference after falling back to backwards compatible accept header format") } } if err != nil { @@ -249,7 +249,7 @@ func withFetchProgress(cs content.Store, out progress.Output, ref reference.Name s, err := cs.Status(ctx, key) if err != nil { if !cerrdefs.IsNotFound(err) { - logrus.WithError(err).WithField("layerDigest", desc.Digest.String()).Error("Error looking up status of plugin layer pull") + log.G(ctx).WithError(err).WithField("layerDigest", desc.Digest.String()).Error("Error looking up status of plugin layer pull") progress.Update(out, id, err.Error()) return } diff --git a/plugin/manager.go b/plugin/manager.go index fc0eba04ee..7e03ed7a40 100644 --- a/plugin/manager.go +++ b/plugin/manager.go @@ -15,6 +15,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/containerfs" @@ -131,7 +132,7 @@ func (pm *Manager) HandleExitEvent(id string) error { } if err := os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)); err != nil { - logrus.WithError(err).WithField("id", id).Error("Could not remove plugin bundle dir") + log.G(context.TODO()).WithError(err).WithField("id", id).Error("Could not remove plugin bundle dir") } pm.mu.RLock() @@ -155,7 +156,7 @@ func handleLoadError(err error, id string) { if err == nil { return } - logger := logrus.WithError(err).WithField("id", id) + logger := log.G(context.TODO()).WithError(err).WithField("id", id) if errors.Is(err, os.ErrNotExist) { // Likely some error while removing on an older version of docker logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.") @@ -182,7 +183,7 @@ func (pm *Manager) reload() error { // todo: restore if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) { // There was likely some error while removing this plugin, let's try to remove again here if err := containerfs.EnsureRemoveAll(v.Name()); err != nil { - logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") + log.G(context.TODO()).WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") } } } @@ -201,7 +202,7 @@ func (pm *Manager) reload() error { // todo: restore go func(p *v2.Plugin) { defer wg.Done() if err := pm.restorePlugin(p, c); err != nil { - logrus.WithError(err).WithField("id", p.GetID()).Error("Failed to restore plugin") + log.G(context.TODO()).WithError(err).WithField("id", p.GetID()).Error("Failed to restore plugin") return } @@ -221,13 +222,13 @@ func (pm *Manager) reload() error { // todo: restore rootfsProp := filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) if _, err := os.Stat(rootfsProp); err == nil { if err := os.Rename(rootfsProp, propRoot); err != nil { - logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + log.G(context.TODO()).WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") } } } if err := os.MkdirAll(propRoot, 0755); err != nil { - logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + log.G(context.TODO()).Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) } } } @@ -239,7 +240,7 @@ func (pm *Manager) reload() error { // todo: restore if requiresManualRestore { // if liveRestore is not enabled, the plugin will be stopped now so we should enable it if err := pm.enable(p, c, true); err != nil { - logrus.WithError(err).WithField("id", p.GetID()).Error("failed to enable plugin") + log.G(context.TODO()).WithError(err).WithField("id", p.GetID()).Error("failed to enable plugin") } } }(p) diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go index 72eda2d4b4..a7cd10b374 100644 --- a/plugin/manager_linux.go +++ b/plugin/manager_linux.go @@ -9,6 +9,7 @@ import ( "time" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/errdefs" @@ -20,7 +21,6 @@ import ( "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -46,7 +46,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") if err := os.MkdirAll(propRoot, 0755); err != nil { - logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + log.G(context.TODO()).Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) } if err := mount.MakeRShared(propRoot); err != nil { @@ -63,7 +63,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil { if p.PluginObj.Config.PropagatedMount != "" { if err := mount.Unmount(propRoot); err != nil { - logrus.WithField("plugin", p.Name()).WithError(err).Warn("Failed to unmount vplugin propagated mount root") + log.G(context.TODO()).WithField("plugin", p.Name()).WithError(err).Warn("Failed to unmount vplugin propagated mount root") } } return errors.WithStack(err) @@ -104,7 +104,7 @@ func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { retries++ if retries > maxRetries { - logrus.Debugf("error net dialing plugin: %v", err) + log.G(context.TODO()).Debugf("error net dialing plugin: %v", err) c.restart = false // While restoring plugins, we need to explicitly set the state to disabled pm.config.Store.SetState(p, false) @@ -153,7 +153,7 @@ func shutdownPlugin(p *v2.Plugin, ec chan bool, executor Executor) { pluginID := p.GetID() if err := executor.Signal(pluginID, unix.SIGTERM); err != nil { - logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + log.G(context.TODO()).Errorf("Sending SIGTERM to plugin failed with error: %v", err) return } @@ -162,20 +162,20 @@ func shutdownPlugin(p *v2.Plugin, ec chan bool, executor Executor) { select { case <-ec: - logrus.Debug("Clean shutdown of plugin") + log.G(context.TODO()).Debug("Clean shutdown of plugin") case <-timeout.C: - logrus.Debug("Force shutdown plugin") + log.G(context.TODO()).Debug("Force shutdown plugin") if err := executor.Signal(pluginID, unix.SIGKILL); err != nil { - logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + log.G(context.TODO()).Errorf("Sending SIGKILL to plugin failed with error: %v", err) } timeout.Reset(shutdownTimeout) select { case <-ec: - logrus.Debug("SIGKILL plugin shutdown") + log.G(context.TODO()).Debug("SIGKILL plugin shutdown") case <-timeout.C: - logrus.WithField("plugin", p.Name).Warn("Force shutdown plugin FAILED") + log.G(context.TODO()).WithField("plugin", p.Name).Warn("Force shutdown plugin FAILED") } } } @@ -200,7 +200,7 @@ func (pm *Manager) Shutdown() { pm.mu.RUnlock() if pm.config.LiveRestoreEnabled && p.IsEnabled() { - logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + log.G(context.TODO()).Debug("Plugin active when liveRestore is set, skipping shutdown") continue } if pm.executor != nil && p.IsEnabled() { @@ -209,7 +209,7 @@ func (pm *Manager) Shutdown() { } } if err := mount.RecursiveUnmount(pm.config.Root); err != nil { - logrus.WithError(err).Warn("error cleaning up plugin mounts") + log.G(context.TODO()).WithError(err).Warn("error cleaning up plugin mounts") } } @@ -237,18 +237,18 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest, manifestDigest dige defer func() { if err != nil { if rmErr := os.RemoveAll(orig); rmErr != nil { - logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + log.G(context.TODO()).WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") return } if mvErr := os.Rename(backup, orig); mvErr != nil { err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure") } if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + log.G(context.TODO()).WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) } } else { if rmErr := os.RemoveAll(backup); rmErr != nil { - logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + log.G(context.TODO()).WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") } p.Config = configDigest diff --git a/plugin/registry.go b/plugin/registry.go index d4e8a55e0b..38525f720b 100644 --- a/plugin/registry.go +++ b/plugin/registry.go @@ -7,13 +7,13 @@ import ( "net/http" "time" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/dockerversion" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // scope builds the correct auth scope for the registry client to authorize against @@ -69,7 +69,7 @@ func (pm *Manager) registryHostsFn(auth *registry.AuthConfig, httpFallback bool) // pass to it. // So it is the callers responsibility to retry with this flag set. if httpFallback && ep.URL.Scheme != "http" { - logrus.WithField("registryHost", hostname).WithField("endpoint", ep).Debugf("Skipping non-http endpoint") + log.G(context.TODO()).WithField("registryHost", hostname).WithField("endpoint", ep).Debugf("Skipping non-http endpoint") continue } @@ -101,7 +101,7 @@ func (pm *Manager) registryHostsFn(auth *registry.AuthConfig, httpFallback bool) ), }) } - logrus.WithField("registryHost", hostname).WithField("hosts", hosts).Debug("Resolved registry hosts") + log.G(context.TODO()).WithField("registryHost", hostname).WithField("hosts", hosts).Debug("Resolved registry hosts") return hosts, nil } diff --git a/plugin/store.go b/plugin/store.go index b1acfc22db..70e2c84ccf 100644 --- a/plugin/store.go +++ b/plugin/store.go @@ -1,9 +1,11 @@ package plugin // import "github.com/docker/docker/plugin" import ( + "context" "fmt" "strings" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/plugingetter" @@ -11,7 +13,6 @@ import ( v2 "github.com/docker/docker/plugin/v2" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // allowV1PluginsFallback determines daemon's support for V1 plugins. @@ -259,7 +260,7 @@ func (ps *Store) resolvePluginID(idOrName string) (string, error) { return "", errors.WithStack(errNotFound(idOrName)) } if _, ok := ref.(reference.Canonical); ok { - logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) + log.G(context.TODO()).Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) return "", errors.WithStack(errNotFound(idOrName)) } diff --git a/quota/projectquota.go b/quota/projectquota.go index 8a9a1f579c..575e7a9d26 100644 --- a/quota/projectquota.go +++ b/quota/projectquota.go @@ -52,15 +52,16 @@ const int Q_XGETQSTAT_PRJQUOTA = QCMD(Q_XGETQSTAT, PRJQUOTA); */ import "C" import ( + "context" "os" "path" "path/filepath" "sync" "unsafe" + "github.com/containerd/containerd/log" "github.com/containerd/containerd/pkg/userns" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -179,7 +180,7 @@ func NewControl(basePath string) (*Control, error) { return nil, err } - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, state.nextProjectID) + log.G(context.TODO()).Debugf("NewControl(%s): nextProjectID = %d", basePath, state.nextProjectID) return &q, nil } @@ -214,7 +215,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { // // set the quota limit for the container's project id // - logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + log.G(context.TODO()).Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) return setProjectQuota(q.backingFsBlockDev, projectID, quota) } diff --git a/registry/auth.go b/registry/auth.go index dd75a49f38..d5ee9bd504 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -1,17 +1,18 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "net/http" "net/url" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // AuthClientID is used the ClientID used for the token server @@ -74,7 +75,7 @@ func loginV2(authConfig *registry.AuthConfig, endpoint APIEndpoint, userAgent st creds = loginCredentialStore{authConfig: &credentialAuthConfig} ) - logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) + log.G(context.TODO()).Debugf("attempting v2 login to registry endpoint %s", endpointStr) loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) if err != nil { diff --git a/registry/endpoint_v1.go b/registry/endpoint_v1.go index 56257dc799..4382cdbc0a 100644 --- a/registry/endpoint_v1.go +++ b/registry/endpoint_v1.go @@ -1,6 +1,7 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "crypto/tls" "encoding/json" "io" @@ -8,9 +9,9 @@ import ( "net/url" "strings" + "github.com/containerd/containerd/log" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" ) // v1PingResult contains the information returned when pinging a registry. It @@ -55,7 +56,7 @@ func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, } func validateEndpoint(endpoint *v1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) + log.G(context.TODO()).Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" @@ -67,7 +68,7 @@ func validateEndpoint(endpoint *v1Endpoint) error { } // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) + log.G(context.TODO()).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) endpoint.URL.Scheme = "http" var err2 error @@ -138,7 +139,7 @@ func (e *v1Endpoint) ping() (v1PingResult, error) { return v1PingResult{}, nil } - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + log.G(context.TODO()).Debugf("attempting v1 ping for registry endpoint %s", e) pingURL := e.String() + "_ping" req, err := http.NewRequest(http.MethodGet, pingURL, nil) if err != nil { @@ -163,13 +164,13 @@ func (e *v1Endpoint) ping() (v1PingResult, error) { Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.WithError(err).Debug("error unmarshaling _ping response") + log.G(context.TODO()).WithError(err).Debug("error unmarshaling _ping response") // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { info.Version = hdr } - logrus.Debugf("v1PingResult.Version: %q", info.Version) + log.G(context.TODO()).Debugf("v1PingResult.Version: %q", info.Version) standalone := resp.Header.Get("X-Docker-Registry-Standalone") @@ -180,6 +181,6 @@ func (e *v1Endpoint) ping() (v1PingResult, error) { // there is a header set, and it is not "true" or "1", so assume fails info.Standalone = false } - logrus.Debugf("v1PingResult.Standalone: %t", info.Standalone) + log.G(context.TODO()).Debugf("v1PingResult.Standalone: %t", info.Standalone) return info, nil } diff --git a/registry/registry.go b/registry/registry.go index 5ff39ce5e7..83f712a0a0 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -2,6 +2,7 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "crypto/tls" "net" "net/http" @@ -10,9 +11,9 @@ import ( "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/distribution/registry/client/transport" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) // HostCertsDir returns the config directory for a specific host. @@ -29,7 +30,7 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { if isSecure && CertsDir() != "" { hostDir := HostCertsDir(hostname) - logrus.Debugf("hostDir: %s", hostDir) + log.G(context.TODO()).Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { return nil, err } @@ -65,7 +66,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { } tlsConfig.RootCAs = systemPool } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("crt: %s", filepath.Join(directory, f.Name())) data, err := os.ReadFile(filepath.Join(directory, f.Name())) if err != nil { return err @@ -75,7 +76,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName) } @@ -88,7 +89,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { return invalidParamf("missing client certificate %s for key %s", certName, keyName) } diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 6e4f172375..779df0bb5d 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -1,6 +1,7 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "encoding/json" "errors" "io" @@ -9,8 +10,8 @@ import ( "net/http/httptest" "testing" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" "gotest.tools/v3/assert" ) @@ -60,7 +61,7 @@ func init() { func handlerAccessLog(handler http.Handler) http.Handler { logHandler := func(w http.ResponseWriter, r *http.Request) { - logrus.Debugf(`%s "%s %s"`, r.RemoteAddr, r.Method, r.URL) + log.G(context.TODO()).Debugf(`%s "%s %s"`, r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) } return http.HandlerFunc(logHandler) diff --git a/registry/resumable/resumablerequestreader.go b/registry/resumable/resumablerequestreader.go index 3649f36ede..b52459d739 100644 --- a/registry/resumable/resumablerequestreader.go +++ b/registry/resumable/resumablerequestreader.go @@ -1,12 +1,13 @@ package resumable // import "github.com/docker/docker/registry/resumable" import ( + "context" "fmt" "io" "net/http" "time" - "github.com/sirupsen/logrus" + "github.com/containerd/containerd/log" ) type requestReader struct { @@ -75,7 +76,7 @@ func (r *requestReader) Read(p []byte) (n int, err error) { r.cleanUpResponse() } if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + log.G(context.TODO()).Infof("encountered error during pull and clearing it before resume: %s", err) err = nil } return n, err diff --git a/registry/search.go b/registry/search.go index 60b86ea228..621a6ed7a4 100644 --- a/registry/search.go +++ b/registry/search.go @@ -10,9 +10,9 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" + "github.com/containerd/containerd/log" "github.com/docker/distribution/registry/client/auth" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var acceptedSearchFilterTags = map[string]bool{ @@ -126,7 +126,7 @@ func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, v2Client.CheckRedirect = endpoint.client.CheckRedirect v2Client.Jar = endpoint.client.Jar - logrus.Debugf("using v2 client for search to %s", endpoint.URL) + log.G(ctx).Debugf("using v2 client for search to %s", endpoint.URL) client = v2Client } else { client = endpoint.client diff --git a/registry/service.go b/registry/service.go index afc896acf2..23cb888176 100644 --- a/registry/service.go +++ b/registry/service.go @@ -7,10 +7,10 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" - "github.com/sirupsen/logrus" ) // Service is a registry service. It tracks configuration data such as a list @@ -85,7 +85,7 @@ func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, use // Failed to authenticate; don't continue with (non-TLS) endpoints. return status, token, err } - logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") + log.G(ctx).WithError(err).Infof("Error logging in to endpoint, trying next endpoint") } return "", "", err diff --git a/registry/session.go b/registry/session.go index 86a5cd9edf..81713671cc 100644 --- a/registry/session.go +++ b/registry/session.go @@ -2,6 +2,7 @@ package registry // import "github.com/docker/docker/registry" import ( // this is required for some certificates + "context" _ "crypto/sha512" "encoding/json" "fmt" @@ -11,12 +12,12 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // A session is used to communicate with a V1 registry @@ -155,7 +156,7 @@ func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpo return err } if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + log.G(context.TODO()).Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) alwaysSetBasicAuth = true } } @@ -191,7 +192,7 @@ func (r *session) searchRepositories(term string, limit int) (*registry.SearchRe if limit < 1 || limit > 100 { return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) } - logrus.Debugf("Index server: %s", r.indexEndpoint) + log.G(context.TODO()).Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) req, err := http.NewRequest(http.MethodGet, u, nil) diff --git a/volume/drivers/adapter.go b/volume/drivers/adapter.go index f6ee07a006..cd8f739d78 100644 --- a/volume/drivers/adapter.go +++ b/volume/drivers/adapter.go @@ -1,12 +1,13 @@ package drivers // import "github.com/docker/docker/volume/drivers" import ( + "context" "errors" "strings" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" ) var ( @@ -94,7 +95,7 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { if err != nil { // `GetCapabilities` is a not a required endpoint. // On error assume it's a local-only driver - logrus.WithError(err).WithField("driver", a.name).Debug("Volume driver returned an error while trying to query its capabilities, using default capabilities") + log.G(context.TODO()).WithError(err).WithField("driver", a.name).Debug("Volume driver returned an error while trying to query its capabilities, using default capabilities") return volume.Capability{Scope: volume.LocalScope} } @@ -105,7 +106,7 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { cap.Scope = strings.ToLower(cap.Scope) if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { - logrus.WithField("driver", a.Name()).WithField("scope", a.Scope).Warn("Volume driver returned an invalid scope") + log.G(context.TODO()).WithField("driver", a.Name()).WithField("scope", a.Scope).Warn("Volume driver returned an invalid scope") cap.Scope = volume.LocalScope } diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go index 46a438612e..7b604b4e13 100644 --- a/volume/drivers/extpoint.go +++ b/volume/drivers/extpoint.go @@ -3,17 +3,18 @@ package drivers // import "github.com/docker/docker/volume/drivers" import ( + "context" "fmt" "sort" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" getter "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/volume" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const extName = "VolumeDriver" @@ -97,7 +98,7 @@ func (s *Store) lookup(name string, mode int) (volume.Driver, error) { if mode > 0 { // Undo any reference count changes from the initial `Get` if _, err := s.pluginGetter.Get(name, extName, mode*-1); err != nil { - logrus.WithError(err).WithField("action", "validate-driver").WithField("plugin", name).Error("error releasing reference to plugin") + log.G(context.TODO()).WithError(err).WithField("action", "validate-driver").WithField("plugin", name).Error("error releasing reference to plugin") } } return nil, err diff --git a/volume/local/local.go b/volume/local/local.go index 512e666eb8..306e974e57 100644 --- a/volume/local/local.go +++ b/volume/local/local.go @@ -4,6 +4,7 @@ package local // import "github.com/docker/docker/volume/local" import ( + "context" "encoding/json" "os" "path/filepath" @@ -11,13 +12,13 @@ import ( "strings" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/daemon/names" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/quota" "github.com/docker/docker/volume" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -62,7 +63,7 @@ func New(scope string, rootIdentity idtools.Identity) (*Root, error) { } if r.quotaCtl, err = quota.NewControl(r.path); err != nil { - logrus.Debugf("No quota support for local volumes in %s: %v", r.path, err) + log.G(context.TODO()).Debugf("No quota support for local volumes in %s: %v", r.path, err) } for _, d := range dirs { @@ -340,7 +341,7 @@ func (v *localVolume) loadOpts() error { b, err := os.ReadFile(filepath.Join(v.rootPath, "opts.json")) if err != nil { if !errors.Is(err, os.ErrNotExist) { - logrus.WithError(err).Warnf("error while loading volume options for volume: %s", v.name) + log.G(context.TODO()).WithError(err).Warnf("error while loading volume options for volume: %s", v.name) } return nil } diff --git a/volume/service/convert.go b/volume/service/convert.go index b109ce277e..f988811ace 100644 --- a/volume/service/convert.go +++ b/volume/service/convert.go @@ -6,12 +6,12 @@ import ( "strconv" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" ) // convertOpts are used to pass options to `volumeToAPI` @@ -69,7 +69,7 @@ func (s *VolumesService) volumesToAPI(ctx context.Context, volumes []volume.Volu } sz, err := directory.Size(ctx, p) if err != nil { - logrus.WithError(err).WithField("volume", v.Name()).Warnf("Failed to determine size of volume") + log.G(ctx).WithError(err).WithField("volume", v.Name()).Warnf("Failed to determine size of volume") sz = -1 } apiV.UsageData = &volumetypes.UsageData{Size: sz, RefCount: int64(s.vs.CountReferences(v))} diff --git a/volume/service/db.go b/volume/service/db.go index d48ae544a4..070d5c34ba 100644 --- a/volume/service/db.go +++ b/volume/service/db.go @@ -1,11 +1,12 @@ package service // import "github.com/docker/docker/volume/service" import ( + "context" "encoding/json" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/pkg/errors" - "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -85,7 +86,7 @@ func listMeta(tx *bolt.Tx) []volumeMetadata { var m volumeMetadata if err := json.Unmarshal(v, &m); err != nil { // Just log the error - logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) + log.G(context.TODO()).Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) return nil } ls = append(ls, m) diff --git a/volume/service/restore.go b/volume/service/restore.go index 6741f9ec5f..f345a65c7e 100644 --- a/volume/service/restore.go +++ b/volume/service/restore.go @@ -4,8 +4,8 @@ import ( "context" "sync" + "github.com/containerd/containerd/log" "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -36,7 +36,7 @@ func (s *VolumeStore) restore() { if meta.Driver != "" { v, err = lookupVolume(ctx, s.drivers, meta.Driver, meta.Name) if err != nil && err != errNoSuchVolume { - logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") + log.G(ctx).WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") return } if v == nil { @@ -55,7 +55,7 @@ func (s *VolumeStore) restore() { meta.Driver = v.DriverName() if err := s.setMeta(v.Name(), meta); err != nil { - logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") + log.G(ctx).WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") } } @@ -77,7 +77,7 @@ func (s *VolumeStore) restore() { s.db.Update(func(tx *bolt.Tx) error { for meta := range chRemove { if err := removeMeta(tx, meta.Name); err != nil { - logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) + log.G(ctx).WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) } } return nil diff --git a/volume/service/service.go b/volume/service/service.go index 7030b2a32b..cfac31823b 100644 --- a/volume/service/service.go +++ b/volume/service/service.go @@ -5,6 +5,7 @@ import ( "strconv" "sync/atomic" + "github.com/containerd/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" @@ -17,7 +18,6 @@ import ( "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type ds interface { @@ -239,10 +239,10 @@ func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types vSize, err := directory.Size(ctx, v.Path()) if err != nil { - logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") + log.G(ctx).WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { - logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") + log.G(ctx).WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) diff --git a/volume/service/store.go b/volume/service/store.go index 8926866e1c..a7b8a707d9 100644 --- a/volume/service/store.go +++ b/volume/service/store.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/containerd/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" @@ -16,7 +17,6 @@ import ( "github.com/docker/docker/volume/service/opts" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -185,11 +185,11 @@ func (s *VolumeStore) purge(ctx context.Context, name string) error { if exists { driverName := v.DriverName() if _, err := s.drivers.ReleaseDriver(driverName); err != nil { - logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") + log.G(ctx).WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } } if err := s.removeMeta(name); err != nil { - logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) + log.G(ctx).Errorf("Error removing volume metadata for volume %q: %v", name, err) } delete(s.names, name) delete(s.refs, name) @@ -337,7 +337,7 @@ func unique(ls *[]volume.Volume) { // If a driver returns a volume that has name which conflicts with another volume from a different driver, // the first volume is chosen and the conflicting volume is dropped. func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, warnings []string, err error) { - logrus.WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") + log.G(ctx).WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") switch f := by.(type) { case nil, orCombinator, andCombinator, byDriver, ByReferenced, CustomFilter: warnings, err = s.filter(ctx, &vols, by) @@ -361,7 +361,7 @@ func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, wa // Note: it's not safe to populate the cache here because the volume may have been // deleted before we acquire a lock on its name if exists && storedV.DriverName() != v.DriverName() { - logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + log.G(ctx).Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) s.locks.Unlock(v.Name()) continue } @@ -613,12 +613,12 @@ func (s *VolumeStore) create(ctx context.Context, name, driverName string, opts, return nil, false, &OpErr{Op: "create", Name: name, Err: err} } - logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + log.G(ctx).Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) if v, _ = vd.Get(name); v == nil { v, err = vd.Create(name, opts) if err != nil { if _, err := s.drivers.ReleaseDriver(driverName); err != nil { - logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") + log.G(ctx).WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } return nil, false, err } @@ -722,7 +722,7 @@ func (s *VolumeStore) getVolume(ctx context.Context, name, driverName string) (v return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil } - logrus.Debugf("Probing all drivers for volume with name: %s", name) + log.G(ctx).Debugf("Probing all drivers for volume with name: %s", name) drivers, err := s.drivers.GetAllDrivers() if err != nil { return nil, err @@ -774,7 +774,7 @@ func lookupVolume(ctx context.Context, store *drivers.Store, driverName, volumeN // At this point, the error could be anything from the driver, such as "no such volume" // Let's not check an error here, and instead check if the driver returned a volume - logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") + log.G(ctx).WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") } return v, nil } @@ -810,7 +810,7 @@ func (s *VolumeStore) Remove(ctx context.Context, v volume.Volume, rmOpts ...opt return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} } - logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + log.G(ctx).Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) vol := unwrapVolume(v) err = vd.Remove(vol)