Ver Fonte

Revert "Merge pull request #16228 from duglin/ContextualizeEvents"

Although having a request ID available throughout the codebase is very
valuable, the impact of requiring a Context as an argument to every
function in the codepath of an API request, is too significant and was
not properly understood at the time of the review.

Furthermore, mixing API-layer code with non-API-layer code makes the
latter usable only by API-layer code (one that has a notion of Context).

This reverts commit de4164043546d2b9ee3bf323dbc41f4979c84480, reversing
changes made to 7daeecd42d7bb112bfe01532c8c9a962bb0c7967.

Signed-off-by: Tibor Vass <tibor@docker.com>

Conflicts:
	api/server/container.go
	builder/internals.go
	daemon/container_unix.go
	daemon/create.go
Tibor Vass há 9 anos atrás
pai
commit
b08f071e18
68 ficheiros alterados com 564 adições e 736 exclusões
  1. 22 22
      api/server/container.go
  2. 4 4
      api/server/copy.go
  3. 2 2
      api/server/daemon.go
  4. 4 4
      api/server/exec.go
  5. 14 14
      api/server/image.go
  6. 3 3
      api/server/inspect.go
  7. 6 8
      api/server/server.go
  8. 2 6
      api/server/server_experimental_unix.go
  9. 1 5
      api/server/server_stub.go
  10. 2 3
      api/server/server_unix.go
  11. 2 3
      api/server/server_windows.go
  12. 4 4
      api/server/volume.go
  13. 41 42
      builder/dispatchers.go
  14. 8 9
      builder/evaluator.go
  15. 31 32
      builder/internals.go
  16. 10 11
      builder/job.go
  17. 24 25
      daemon/archive.go
  18. 6 7
      daemon/attach.go
  19. 3 6
      daemon/changes.go
  20. 4 5
      daemon/commit.go
  21. 57 59
      daemon/container.go
  22. 24 25
      daemon/container_unix.go
  23. 5 6
      daemon/container_windows.go
  24. 16 17
      daemon/create.go
  25. 44 42
      daemon/daemon.go
  26. 9 14
      daemon/daemon_test.go
  27. 7 8
      daemon/daemon_unix.go
  28. 3 4
      daemon/daemon_windows.go
  29. 11 13
      daemon/delete.go
  30. 2 4
      daemon/events/events.go
  31. 4 8
      daemon/events/events_test.go
  32. 10 11
      daemon/exec.go
  33. 3 4
      daemon/execdriver/driver.go
  34. 3 4
      daemon/execdriver/lxc/driver.go
  35. 4 5
      daemon/execdriver/native/create.go
  36. 3 4
      daemon/execdriver/native/driver.go
  37. 2 3
      daemon/execdriver/native/exec.go
  38. 2 3
      daemon/execdriver/windows/exec.go
  39. 2 3
      daemon/execdriver/windows/run.go
  40. 3 4
      daemon/export.go
  41. 40 41
      daemon/image_delete.go
  42. 6 7
      daemon/info.go
  43. 10 11
      daemon/inspect.go
  44. 4 7
      daemon/inspect_unix.go
  45. 3 6
      daemon/inspect_windows.go
  46. 5 9
      daemon/kill.go
  47. 23 24
      daemon/list.go
  48. 1 2
      daemon/logs.go
  49. 10 11
      daemon/monitor.go
  50. 3 4
      daemon/pause.go
  51. 5 6
      daemon/rename.go
  52. 4 8
      daemon/resize.go
  53. 3 4
      daemon/restart.go
  54. 5 6
      daemon/start.go
  55. 2 3
      daemon/stats.go
  56. 3 4
      daemon/stop.go
  57. 4 5
      daemon/top_unix.go
  58. 1 2
      daemon/top_windows.go
  59. 3 4
      daemon/unpause.go
  60. 2 3
      daemon/volumes_unix.go
  61. 1 2
      daemon/volumes_windows.go
  62. 3 7
      daemon/wait.go
  63. 9 15
      docker/daemon.go
  64. 2 3
      graph/import.go
  65. 2 3
      graph/pull.go
  66. 2 3
      graph/push.go
  67. 1 76
      integration-cli/docker_cli_events_test.go
  68. 0 4
      pkg/jsonmessage/jsonmessage.go

+ 22 - 22
api/server/container.go

@@ -45,7 +45,7 @@ func (s *Server) getContainersJSON(ctx context.Context, w http.ResponseWriter, r
 		config.Limit = limit
 	}
 
-	containers, err := s.daemon.Containers(ctx, config)
+	containers, err := s.daemon.Containers(config)
 	if err != nil {
 		return err
 	}
@@ -83,7 +83,7 @@ func (s *Server) getContainersStats(ctx context.Context, w http.ResponseWriter,
 		Version:   version,
 	}
 
-	return s.daemon.ContainerStats(ctx, vars["name"], config)
+	return s.daemon.ContainerStats(vars["name"], config)
 }
 
 func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -118,7 +118,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
 		closeNotifier = notifier.CloseNotify()
 	}
 
-	c, err := s.daemon.Get(ctx, vars["name"])
+	c, err := s.daemon.Get(vars["name"])
 	if err != nil {
 		return err
 	}
@@ -140,7 +140,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
 		Stop:       closeNotifier,
 	}
 
-	if err := s.daemon.ContainerLogs(ctx, c, logsConfig); err != nil {
+	if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
 		// The client may be expecting all of the data we're sending to
 		// be multiplexed, so send it through OutStream, which will
 		// have been set up to handle that if needed.
@@ -155,7 +155,7 @@ func (s *Server) getContainersExport(ctx context.Context, w http.ResponseWriter,
 		return fmt.Errorf("Missing parameter")
 	}
 
-	return s.daemon.ContainerExport(ctx, vars["name"], w)
+	return s.daemon.ContainerExport(vars["name"], w)
 }
 
 func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -183,7 +183,7 @@ func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter,
 		hostConfig = c
 	}
 
-	if err := s.daemon.ContainerStart(ctx, vars["name"], hostConfig); err != nil {
+	if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil {
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
@@ -200,7 +200,7 @@ func (s *Server) postContainersStop(ctx context.Context, w http.ResponseWriter,
 
 	seconds, _ := strconv.Atoi(r.Form.Get("t"))
 
-	if err := s.daemon.ContainerStop(ctx, vars["name"], seconds); err != nil {
+	if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
@@ -227,7 +227,7 @@ func (s *Server) postContainersKill(ctx context.Context, w http.ResponseWriter,
 		}
 	}
 
-	if err := s.daemon.ContainerKill(ctx, name, uint64(sig)); err != nil {
+	if err := s.daemon.ContainerKill(name, uint64(sig)); err != nil {
 		theErr, isDerr := err.(errcode.ErrorCoder)
 		isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning
 
@@ -254,7 +254,7 @@ func (s *Server) postContainersRestart(ctx context.Context, w http.ResponseWrite
 
 	timeout, _ := strconv.Atoi(r.Form.Get("t"))
 
-	if err := s.daemon.ContainerRestart(ctx, vars["name"], timeout); err != nil {
+	if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
 		return err
 	}
 
@@ -271,7 +271,7 @@ func (s *Server) postContainersPause(ctx context.Context, w http.ResponseWriter,
 		return err
 	}
 
-	if err := s.daemon.ContainerPause(ctx, vars["name"]); err != nil {
+	if err := s.daemon.ContainerPause(vars["name"]); err != nil {
 		return err
 	}
 
@@ -288,7 +288,7 @@ func (s *Server) postContainersUnpause(ctx context.Context, w http.ResponseWrite
 		return err
 	}
 
-	if err := s.daemon.ContainerUnpause(ctx, vars["name"]); err != nil {
+	if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
 		return err
 	}
 
@@ -302,7 +302,7 @@ func (s *Server) postContainersWait(ctx context.Context, w http.ResponseWriter,
 		return fmt.Errorf("Missing parameter")
 	}
 
-	status, err := s.daemon.ContainerWait(ctx, vars["name"], -1*time.Second)
+	status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second)
 	if err != nil {
 		return err
 	}
@@ -317,7 +317,7 @@ func (s *Server) getContainersChanges(ctx context.Context, w http.ResponseWriter
 		return fmt.Errorf("Missing parameter")
 	}
 
-	changes, err := s.daemon.ContainerChanges(ctx, vars["name"])
+	changes, err := s.daemon.ContainerChanges(vars["name"])
 	if err != nil {
 		return err
 	}
@@ -334,7 +334,7 @@ func (s *Server) getContainersTop(ctx context.Context, w http.ResponseWriter, r
 		return err
 	}
 
-	procList, err := s.daemon.ContainerTop(ctx, vars["name"], r.Form.Get("ps_args"))
+	procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args"))
 	if err != nil {
 		return err
 	}
@@ -352,7 +352,7 @@ func (s *Server) postContainerRename(ctx context.Context, w http.ResponseWriter,
 
 	name := vars["name"]
 	newName := r.Form.Get("name")
-	if err := s.daemon.ContainerRename(ctx, name, newName); err != nil {
+	if err := s.daemon.ContainerRename(name, newName); err != nil {
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
@@ -376,7 +376,7 @@ func (s *Server) postContainersCreate(ctx context.Context, w http.ResponseWriter
 	version := ctx.Version()
 	adjustCPUShares := version.LessThan("1.19")
 
-	ccr, err := s.daemon.ContainerCreate(ctx, name, config, hostConfig, adjustCPUShares)
+	ccr, err := s.daemon.ContainerCreate(name, config, hostConfig, adjustCPUShares)
 	if err != nil {
 		return err
 	}
@@ -399,7 +399,7 @@ func (s *Server) deleteContainers(ctx context.Context, w http.ResponseWriter, r
 		RemoveLink:   boolValue(r, "link"),
 	}
 
-	if err := s.daemon.ContainerRm(ctx, name, config); err != nil {
+	if err := s.daemon.ContainerRm(name, config); err != nil {
 		// Force a 404 for the empty string
 		if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") {
 			return fmt.Errorf("no such id: \"\"")
@@ -429,7 +429,7 @@ func (s *Server) postContainersResize(ctx context.Context, w http.ResponseWriter
 		return err
 	}
 
-	return s.daemon.ContainerResize(ctx, vars["name"], height, width)
+	return s.daemon.ContainerResize(vars["name"], height, width)
 }
 
 func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -441,7 +441,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
 	}
 	containerName := vars["name"]
 
-	if !s.daemon.Exists(ctx, containerName) {
+	if !s.daemon.Exists(containerName) {
 		return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
 	}
 
@@ -467,7 +467,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
 		Stream:    boolValue(r, "stream"),
 	}
 
-	if err := s.daemon.ContainerAttachWithLogs(ctx, containerName, attachWithLogsConfig); err != nil {
+	if err := s.daemon.ContainerAttachWithLogs(containerName, attachWithLogsConfig); err != nil {
 		fmt.Fprintf(outStream, "Error attaching: %s\n", err)
 	}
 
@@ -483,7 +483,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
 	}
 	containerName := vars["name"]
 
-	if !s.daemon.Exists(ctx, containerName) {
+	if !s.daemon.Exists(containerName) {
 		return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
 	}
 
@@ -498,7 +498,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
 			Stream:    boolValue(r, "stream"),
 		}
 
-		if err := s.daemon.ContainerWsAttachWithLogs(ctx, containerName, wsAttachWithLogsConfig); err != nil {
+		if err := s.daemon.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil {
 			logrus.Errorf("Error attaching websocket: %s", err)
 		}
 	})

+ 4 - 4
api/server/copy.go

@@ -32,7 +32,7 @@ func (s *Server) postContainersCopy(ctx context.Context, w http.ResponseWriter,
 		return fmt.Errorf("Path cannot be empty")
 	}
 
-	data, err := s.daemon.ContainerCopy(ctx, vars["name"], cfg.Resource)
+	data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource)
 	if err != nil {
 		if strings.Contains(strings.ToLower(err.Error()), "no such id") {
 			w.WriteHeader(http.StatusNotFound)
@@ -74,7 +74,7 @@ func (s *Server) headContainersArchive(ctx context.Context, w http.ResponseWrite
 		return err
 	}
 
-	stat, err := s.daemon.ContainerStatPath(ctx, v.name, v.path)
+	stat, err := s.daemon.ContainerStatPath(v.name, v.path)
 	if err != nil {
 		return err
 	}
@@ -88,7 +88,7 @@ func (s *Server) getContainersArchive(ctx context.Context, w http.ResponseWriter
 		return err
 	}
 
-	tarArchive, stat, err := s.daemon.ContainerArchivePath(ctx, v.name, v.path)
+	tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path)
 	if err != nil {
 		return err
 	}
@@ -111,5 +111,5 @@ func (s *Server) putContainersArchive(ctx context.Context, w http.ResponseWriter
 	}
 
 	noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir")
-	return s.daemon.ContainerExtractToDir(ctx, v.name, v.path, noOverwriteDirNonDir, r.Body)
+	return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body)
 }

+ 2 - 2
api/server/daemon.go

@@ -45,7 +45,7 @@ func (s *Server) getVersion(ctx context.Context, w http.ResponseWriter, r *http.
 }
 
 func (s *Server) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	info, err := s.daemon.SystemInfo(ctx)
+	info, err := s.daemon.SystemInfo()
 	if err != nil {
 		return err
 	}
@@ -120,7 +120,7 @@ func (s *Server) getEvents(ctx context.Context, w http.ResponseWriter, r *http.R
 	enc := json.NewEncoder(outStream)
 
 	getContainerID := func(cn string) string {
-		c, err := d.Get(ctx, cn)
+		c, err := d.Get(cn)
 		if err != nil {
 			return ""
 		}

+ 4 - 4
api/server/exec.go

@@ -19,7 +19,7 @@ func (s *Server) getExecByID(ctx context.Context, w http.ResponseWriter, r *http
 		return fmt.Errorf("Missing parameter 'id'")
 	}
 
-	eConfig, err := s.daemon.ContainerExecInspect(ctx, vars["id"])
+	eConfig, err := s.daemon.ContainerExecInspect(vars["id"])
 	if err != nil {
 		return err
 	}
@@ -47,7 +47,7 @@ func (s *Server) postContainerExecCreate(ctx context.Context, w http.ResponseWri
 	}
 
 	// Register an instance of Exec in container.
-	id, err := s.daemon.ContainerExecCreate(ctx, execConfig)
+	id, err := s.daemon.ContainerExecCreate(execConfig)
 	if err != nil {
 		logrus.Errorf("Error setting up exec command in container %s: %s", name, err)
 		return err
@@ -100,7 +100,7 @@ func (s *Server) postContainerExecStart(ctx context.Context, w http.ResponseWrit
 	}
 
 	// Now run the user process in container.
-	if err := s.daemon.ContainerExecStart(ctx, execName, stdin, stdout, stderr); err != nil {
+	if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil {
 		fmt.Fprintf(outStream, "Error running exec in container: %v\n", err)
 	}
 	return nil
@@ -123,5 +123,5 @@ func (s *Server) postContainerExecResize(ctx context.Context, w http.ResponseWri
 		return err
 	}
 
-	return s.daemon.ContainerExecResize(ctx, vars["name"], height, width)
+	return s.daemon.ContainerExecResize(vars["name"], height, width)
 }

+ 14 - 14
api/server/image.go

@@ -55,7 +55,7 @@ func (s *Server) postCommit(ctx context.Context, w http.ResponseWriter, r *http.
 		Config:  c,
 	}
 
-	imgID, err := builder.Commit(ctx, cname, s.daemon, commitCfg)
+	imgID, err := builder.Commit(cname, s.daemon, commitCfg)
 	if err != nil {
 		return err
 	}
@@ -112,7 +112,7 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
 			OutStream:   output,
 		}
 
-		err = s.daemon.Repositories(ctx).Pull(ctx, image, tag, imagePullConfig)
+		err = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
 	} else { //import
 		if tag == "" {
 			repo, tag = parsers.ParseRepositoryTag(repo)
@@ -124,12 +124,12 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
 		// generated from the download to be available to the output
 		// stream processing below
 		var newConfig *runconfig.Config
-		newConfig, err = builder.BuildFromConfig(ctx, s.daemon, &runconfig.Config{}, r.Form["changes"])
+		newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
 		if err != nil {
 			return err
 		}
 
-		err = s.daemon.Repositories(ctx).Import(ctx, src, repo, tag, message, r.Body, output, newConfig)
+		err = s.daemon.Repositories().Import(src, repo, tag, message, r.Body, output, newConfig)
 	}
 	if err != nil {
 		if !output.Flushed() {
@@ -184,7 +184,7 @@ func (s *Server) postImagesPush(ctx context.Context, w http.ResponseWriter, r *h
 
 	w.Header().Set("Content-Type", "application/json")
 
-	if err := s.daemon.Repositories(ctx).Push(ctx, name, imagePushConfig); err != nil {
+	if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
 		if !output.Flushed() {
 			return err
 		}
@@ -212,7 +212,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
 		names = r.Form["names"]
 	}
 
-	if err := s.daemon.Repositories(ctx).ImageExport(names, output); err != nil {
+	if err := s.daemon.Repositories().ImageExport(names, output); err != nil {
 		if !output.Flushed() {
 			return err
 		}
@@ -223,7 +223,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
 }
 
 func (s *Server) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	return s.daemon.Repositories(ctx).Load(r.Body, w)
+	return s.daemon.Repositories().Load(r.Body, w)
 }
 
 func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -243,7 +243,7 @@ func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *htt
 	force := boolValue(r, "force")
 	prune := !boolValue(r, "noprune")
 
-	list, err := s.daemon.ImageDelete(ctx, name, force, prune)
+	list, err := s.daemon.ImageDelete(name, force, prune)
 	if err != nil {
 		return err
 	}
@@ -256,7 +256,7 @@ func (s *Server) getImagesByName(ctx context.Context, w http.ResponseWriter, r *
 		return fmt.Errorf("Missing parameter")
 	}
 
-	imageInspect, err := s.daemon.Repositories(ctx).Lookup(vars["name"])
+	imageInspect, err := s.daemon.Repositories().Lookup(vars["name"])
 	if err != nil {
 		return err
 	}
@@ -346,7 +346,7 @@ func (s *Server) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
 		}()
 	}
 
-	if err := builder.Build(ctx, s.daemon, buildConfig); err != nil {
+	if err := builder.Build(s.daemon, buildConfig); err != nil {
 		// Do not write the error in the http output if it's still empty.
 		// This prevents from writing a 200(OK) when there is an interal error.
 		if !output.Flushed() {
@@ -364,7 +364,7 @@ func (s *Server) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *ht
 	}
 
 	// FIXME: The filter parameter could just be a match filter
-	images, err := s.daemon.Repositories(ctx).Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
+	images, err := s.daemon.Repositories().Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
 	if err != nil {
 		return err
 	}
@@ -378,7 +378,7 @@ func (s *Server) getImagesHistory(ctx context.Context, w http.ResponseWriter, r
 	}
 
 	name := vars["name"]
-	history, err := s.daemon.Repositories(ctx).History(name)
+	history, err := s.daemon.Repositories().History(name)
 	if err != nil {
 		return err
 	}
@@ -398,10 +398,10 @@ func (s *Server) postImagesTag(ctx context.Context, w http.ResponseWriter, r *ht
 	tag := r.Form.Get("tag")
 	force := boolValue(r, "force")
 	name := vars["name"]
-	if err := s.daemon.Repositories(ctx).Tag(repo, tag, name, force); err != nil {
+	if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
 		return err
 	}
-	s.daemon.EventsService.Log(ctx, "tag", utils.ImageReference(repo, tag), "")
+	s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
 	w.WriteHeader(http.StatusCreated)
 	return nil
 }

+ 3 - 3
api/server/inspect.go

@@ -20,11 +20,11 @@ func (s *Server) getContainersByName(ctx context.Context, w http.ResponseWriter,
 
 	switch {
 	case version.LessThan("1.20"):
-		json, err = s.daemon.ContainerInspectPre120(ctx, vars["name"])
+		json, err = s.daemon.ContainerInspectPre120(vars["name"])
 	case version.Equal("1.20"):
-		json, err = s.daemon.ContainerInspect120(ctx, vars["name"])
+		json, err = s.daemon.ContainerInspect120(vars["name"])
 	default:
-		json, err = s.daemon.ContainerInspect(ctx, vars["name"])
+		json, err = s.daemon.ContainerInspect(vars["name"])
 	}
 
 	if err != nil {

+ 6 - 8
api/server/server.go

@@ -18,7 +18,6 @@ import (
 	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/pkg/sockets"
-	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/utils"
 )
 
@@ -42,12 +41,12 @@ type Server struct {
 }
 
 // New returns a new instance of the server based on the specified configuration.
-func New(ctx context.Context, cfg *Config) *Server {
+func New(cfg *Config) *Server {
 	srv := &Server{
 		cfg:   cfg,
 		start: make(chan struct{}),
 	}
-	srv.router = createRouter(ctx, srv)
+	srv.router = createRouter(srv)
 	return srv
 }
 
@@ -291,7 +290,7 @@ func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
 	return
 }
 
-func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
+func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
 	return func(w http.ResponseWriter, r *http.Request) {
 		// log the handler generation
 		logrus.Debugf("Calling %s %s", localMethod, localRoute)
@@ -303,8 +302,7 @@ func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localR
 		// apply to all requests. Data that is specific to the
 		// immediate function being called should still be passed
 		// as 'args' on the function call.
-		reqID := stringid.TruncateID(stringid.GenerateNonCryptoID())
-		ctx = context.WithValue(ctx, context.RequestID, reqID)
+		ctx := context.Background()
 		handlerFunc := s.handleWithGlobalMiddlewares(localHandler)
 
 		if err := handlerFunc(ctx, w, r, mux.Vars(r)); err != nil {
@@ -316,7 +314,7 @@ func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localR
 
 // createRouter initializes the main router the server uses.
 // we keep enableCors just for legacy usage, need to be removed in the future
-func createRouter(ctx context.Context, s *Server) *mux.Router {
+func createRouter(s *Server) *mux.Router {
 	r := mux.NewRouter()
 	if os.Getenv("DEBUG") != "" {
 		profilerSetup(r, "/debug/")
@@ -396,7 +394,7 @@ func createRouter(ctx context.Context, s *Server) *mux.Router {
 			localMethod := method
 
 			// build the handler function
-			f := s.makeHTTPHandler(ctx, localMethod, localRoute, localFct)
+			f := s.makeHTTPHandler(localMethod, localRoute, localFct)
 
 			// add the new route
 			if localRoute == "" {

+ 2 - 6
api/server/server_experimental_unix.go

@@ -2,12 +2,8 @@
 
 package server
 
-import (
-	"github.com/docker/docker/context"
-)
-
-func (s *Server) registerSubRouter(ctx context.Context) {
-	httpHandler := s.daemon.NetworkAPIRouter(ctx)
+func (s *Server) registerSubRouter() {
+	httpHandler := s.daemon.NetworkAPIRouter()
 
 	subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
 	subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)

+ 1 - 5
api/server/server_stub.go

@@ -2,9 +2,5 @@
 
 package server
 
-import (
-	"github.com/docker/docker/context"
-)
-
-func (s *Server) registerSubRouter(ctx context.Context) {
+func (s *Server) registerSubRouter() {
 }

+ 2 - 3
api/server/server_unix.go

@@ -8,7 +8,6 @@ import (
 	"net/http"
 	"strconv"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/pkg/sockets"
 	"github.com/docker/libnetwork/portallocator"
@@ -64,10 +63,10 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
 // AcceptConnections allows clients to connect to the API server.
 // Referenced Daemon is notified about this server, and waits for the
 // daemon acknowledgement before the incoming connections are accepted.
-func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
 	// Tell the init daemon we are accepting requests
 	s.daemon = d
-	s.registerSubRouter(ctx)
+	s.registerSubRouter()
 	go systemdDaemon.SdNotify("READY=1")
 	// close the lock so the listeners start accepting connections
 	select {

+ 2 - 3
api/server/server_windows.go

@@ -7,7 +7,6 @@ import (
 	"net"
 	"net/http"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 )
 
@@ -43,9 +42,9 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
 }
 
 // AcceptConnections allows router to start listening for the incoming requests.
-func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
 	s.daemon = d
-	s.registerSubRouter(ctx)
+	s.registerSubRouter()
 	// close the lock so the listeners start accepting connections
 	select {
 	case <-s.start:

+ 4 - 4
api/server/volume.go

@@ -13,7 +13,7 @@ func (s *Server) getVolumesList(ctx context.Context, w http.ResponseWriter, r *h
 		return err
 	}
 
-	volumes, err := s.daemon.Volumes(ctx, r.Form.Get("filters"))
+	volumes, err := s.daemon.Volumes(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -25,7 +25,7 @@ func (s *Server) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *
 		return err
 	}
 
-	v, err := s.daemon.VolumeInspect(ctx, vars["name"])
+	v, err := s.daemon.VolumeInspect(vars["name"])
 	if err != nil {
 		return err
 	}
@@ -46,7 +46,7 @@ func (s *Server) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r
 		return err
 	}
 
-	volume, err := s.daemon.VolumeCreate(ctx, req.Name, req.Driver, req.DriverOpts)
+	volume, err := s.daemon.VolumeCreate(req.Name, req.Driver, req.DriverOpts)
 	if err != nil {
 		return err
 	}
@@ -57,7 +57,7 @@ func (s *Server) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *ht
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	if err := s.daemon.VolumeRm(ctx, vars["name"]); err != nil {
+	if err := s.daemon.VolumeRm(vars["name"]); err != nil {
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)

+ 41 - 42
builder/dispatchers.go

@@ -18,7 +18,6 @@ import (
 	"strings"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/nat"
@@ -44,7 +43,7 @@ func nullDispatch(b *builder, args []string, attributes map[string]bool, origina
 // Sets the environment variable foo to bar, also makes interpolation
 // in the dockerfile available from the next statement on via ${foo}.
 //
-func env(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func env(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
 	}
@@ -97,13 +96,13 @@ func env(ctx context.Context, b *builder, args []string, attributes map[string]b
 		j++
 	}
 
-	return b.commit(ctx, "", b.Config.Cmd, commitStr)
+	return b.commit("", b.Config.Cmd, commitStr)
 }
 
 // MAINTAINER some text <maybe@an.email.address>
 //
 // Sets the maintainer metadata.
-func maintainer(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
 	}
@@ -113,14 +112,14 @@ func maintainer(ctx context.Context, b *builder, args []string, attributes map[s
 	}
 
 	b.maintainer = args[0]
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
 
 // LABEL some json data describing the image
 //
 // Sets the Label variable foo to bar,
 //
-func label(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func label(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
 	}
@@ -148,7 +147,7 @@ func label(ctx context.Context, b *builder, args []string, attributes map[string
 		b.Config.Labels[args[j]] = args[j+1]
 		j++
 	}
-	return b.commit(ctx, "", b.Config.Cmd, commitStr)
+	return b.commit("", b.Config.Cmd, commitStr)
 }
 
 // ADD foo /path
@@ -156,7 +155,7 @@ func label(ctx context.Context, b *builder, args []string, attributes map[string
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
 // exist here. If you do not wish to have this automatic handling, use COPY.
 //
-func add(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func add(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) < 2 {
 		return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
 	}
@@ -165,14 +164,14 @@ func add(ctx context.Context, b *builder, args []string, attributes map[string]b
 		return err
 	}
 
-	return b.runContextCommand(ctx, args, true, true, "ADD")
+	return b.runContextCommand(args, true, true, "ADD")
 }
 
 // COPY foo /path
 //
 // Same as 'ADD' but without the tar and remote url handling.
 //
-func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) < 2 {
 		return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
 	}
@@ -181,14 +180,14 @@ func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map
 		return err
 	}
 
-	return b.runContextCommand(ctx, args, false, false, "COPY")
+	return b.runContextCommand(args, false, false, "COPY")
 }
 
 // FROM imagename
 //
 // This sets the image the dockerfile will build on top of.
 //
-func from(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func from(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
 	}
@@ -209,16 +208,16 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
 		return nil
 	}
 
-	image, err := b.Daemon.Repositories(ctx).LookupImage(name)
+	image, err := b.Daemon.Repositories().LookupImage(name)
 	if b.Pull {
-		image, err = b.pullImage(ctx, name)
+		image, err = b.pullImage(name)
 		if err != nil {
 			return err
 		}
 	}
 	if err != nil {
-		if b.Daemon.Graph(ctx).IsNotExist(err, name) {
-			image, err = b.pullImage(ctx, name)
+		if b.Daemon.Graph().IsNotExist(err, name) {
+			image, err = b.pullImage(name)
 		}
 
 		// note that the top level err will still be !nil here if IsNotExist is
@@ -228,7 +227,7 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
 		}
 	}
 
-	return b.processImageFrom(ctx, image)
+	return b.processImageFrom(image)
 }
 
 // ONBUILD RUN echo yo
@@ -240,7 +239,7 @@ func from(ctx context.Context, b *builder, args []string, attributes map[string]
 // special cases. search for 'OnBuild' in internals.go for additional special
 // cases.
 //
-func onbuild(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
 	}
@@ -260,14 +259,14 @@ func onbuild(ctx context.Context, b *builder, args []string, attributes map[stri
 	original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
 
 	b.Config.OnBuild = append(b.Config.OnBuild, original)
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
 }
 
 // WORKDIR /tmp
 //
 // Set the working directory for future RUN/CMD/etc statements.
 //
-func workdir(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
 	}
@@ -287,7 +286,7 @@ func workdir(ctx context.Context, b *builder, args []string, attributes map[stri
 
 	b.Config.WorkingDir = workdir
 
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
 }
 
 // RUN some command yo
@@ -300,7 +299,7 @@ func workdir(ctx context.Context, b *builder, args []string, attributes map[stri
 // RUN echo hi          # cmd /S /C echo hi   (Windows)
 // RUN [ "echo", "hi" ] # echo hi
 //
-func run(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func run(b *builder, args []string, attributes map[string]bool, original string) error {
 	if b.image == "" && !b.noBaseImage {
 		return derr.ErrorCodeMissingFrom
 	}
@@ -381,7 +380,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
 	}
 
 	b.Config.Cmd = saveCmd
-	hit, err := b.probeCache(ctx)
+	hit, err := b.probeCache()
 	if err != nil {
 		return err
 	}
@@ -396,17 +395,17 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
 
 	logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
 
-	c, err := b.create(ctx)
+	c, err := b.create()
 	if err != nil {
 		return err
 	}
 
 	// Ensure that we keep the container mounted until the commit
 	// to avoid unmounting and then mounting directly again
-	c.Mount(ctx)
-	defer c.Unmount(ctx)
+	c.Mount()
+	defer c.Unmount()
 
-	err = b.run(ctx, c)
+	err = b.run(c)
 	if err != nil {
 		return err
 	}
@@ -416,7 +415,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
 	// properly match it.
 	b.Config.Env = env
 	b.Config.Cmd = saveCmd
-	if err := b.commit(ctx, c.ID, cmd, "run"); err != nil {
+	if err := b.commit(c.ID, cmd, "run"); err != nil {
 		return err
 	}
 
@@ -428,7 +427,7 @@ func run(ctx context.Context, b *builder, args []string, attributes map[string]b
 // Set the default command to run in the container (which may be empty).
 // Argument handling is the same as RUN.
 //
-func cmd(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
 	if err := b.BuilderFlags.Parse(); err != nil {
 		return err
 	}
@@ -445,7 +444,7 @@ func cmd(ctx context.Context, b *builder, args []string, attributes map[string]b
 
 	b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...)
 
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
 		return err
 	}
 
@@ -464,7 +463,7 @@ func cmd(ctx context.Context, b *builder, args []string, attributes map[string]b
 // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
 // is initialized at NewBuilder time instead of through argument parsing.
 //
-func entrypoint(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
 	if err := b.BuilderFlags.Parse(); err != nil {
 		return err
 	}
@@ -493,7 +492,7 @@ func entrypoint(ctx context.Context, b *builder, args []string, attributes map[s
 		b.Config.Cmd = nil
 	}
 
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
 		return err
 	}
 
@@ -505,7 +504,7 @@ func entrypoint(ctx context.Context, b *builder, args []string, attributes map[s
 // Expose ports for links and port mappings. This all ends up in
 // b.Config.ExposedPorts for runconfig.
 //
-func expose(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func expose(b *builder, args []string, attributes map[string]bool, original string) error {
 	portsTab := args
 
 	if len(args) == 0 {
@@ -538,7 +537,7 @@ func expose(ctx context.Context, b *builder, args []string, attributes map[strin
 		i++
 	}
 	sort.Strings(portList)
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
 }
 
 // USER foo
@@ -546,7 +545,7 @@ func expose(ctx context.Context, b *builder, args []string, attributes map[strin
 // Set the user to 'foo' for future commands and when running the
 // ENTRYPOINT/CMD at container run time.
 //
-func user(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func user(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
 	}
@@ -556,14 +555,14 @@ func user(ctx context.Context, b *builder, args []string, attributes map[string]
 	}
 
 	b.Config.User = args[0]
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("USER %v", args))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
 }
 
 // VOLUME /foo
 //
 // Expose the volume /foo for use. Will also accept the JSON array form.
 //
-func volume(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func volume(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 		return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
 	}
@@ -582,7 +581,7 @@ func volume(ctx context.Context, b *builder, args []string, attributes map[strin
 		}
 		b.Config.Volumes[v] = struct{}{}
 	}
-	if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
 		return err
 	}
 	return nil
@@ -591,7 +590,7 @@ func volume(ctx context.Context, b *builder, args []string, attributes map[strin
 // STOPSIGNAL signal
 //
 // Set the signal that will be used to kill the container.
-func stopSignal(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return fmt.Errorf("STOPSIGNAL requires exactly one argument")
 	}
@@ -603,7 +602,7 @@ func stopSignal(ctx context.Context, b *builder, args []string, attributes map[s
 	}
 
 	b.Config.StopSignal = sig
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
 }
 
 // ARG name[=value]
@@ -611,7 +610,7 @@ func stopSignal(ctx context.Context, b *builder, args []string, attributes map[s
 // Adds the variable foo to the trusted list of variables that can be passed
 // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
 // Dockerfile author may optionally set a default value of this variable.
-func arg(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
+func arg(b *builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 		return fmt.Errorf("ARG requires exactly one argument definition")
 	}
@@ -647,5 +646,5 @@ func arg(ctx context.Context, b *builder, args []string, attributes map[string]b
 		b.buildArgs[name] = value
 	}
 
-	return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
 }

+ 8 - 9
builder/evaluator.go

@@ -32,7 +32,6 @@ import (
 	"github.com/docker/docker/builder/command"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/streamformatter"
@@ -58,10 +57,10 @@ var replaceEnvAllowed = map[string]struct{}{
 	command.Arg:        {},
 }
 
-var evaluateTable map[string]func(context.Context, *builder, []string, map[string]bool, string) error
+var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error
 
 func init() {
-	evaluateTable = map[string]func(context.Context, *builder, []string, map[string]bool, string) error{
+	evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
 		command.Env:        env,
 		command.Label:      label,
 		command.Maintainer: maintainer,
@@ -159,7 +158,7 @@ type builder struct {
 //   processing.
 // * Print a happy message and return the image ID.
 //
-func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
+func (b *builder) Run(context io.Reader) (string, error) {
 	if err := b.readContext(context); err != nil {
 		return "", err
 	}
@@ -188,15 +187,15 @@ func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
 		default:
 			// Not cancelled yet, keep going...
 		}
-		if err := b.dispatch(ctx, i, n); err != nil {
+		if err := b.dispatch(i, n); err != nil {
 			if b.ForceRemove {
-				b.clearTmp(ctx)
+				b.clearTmp()
 			}
 			return "", err
 		}
 		fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
 		if b.Remove {
-			b.clearTmp(ctx)
+			b.clearTmp()
 		}
 	}
 
@@ -312,7 +311,7 @@ func (b *builder) isBuildArgAllowed(arg string) bool {
 // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
 // deal with that, at least until it becomes more of a general concern with new
 // features.
-func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) error {
+func (b *builder) dispatch(stepN int, ast *parser.Node) error {
 	cmd := ast.Value
 
 	// To ensure the user is given a decent error message if the platform
@@ -405,7 +404,7 @@ func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) err
 	if f, ok := evaluateTable[cmd]; ok {
 		b.BuilderFlags = NewBFlags()
 		b.BuilderFlags.Args = flags
-		return f(ctx, b, strList, attrs, original)
+		return f(b, strList, attrs, original)
 	}
 
 	return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))

+ 31 - 32
builder/internals.go

@@ -22,7 +22,6 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
@@ -76,7 +75,7 @@ func (b *builder) readContext(context io.Reader) (err error) {
 	return
 }
 
-func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
+func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
 	if b.disableCommit {
 		return nil
 	}
@@ -93,7 +92,7 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
 		}
 		defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
 
-		hit, err := b.probeCache(ctx)
+		hit, err := b.probeCache()
 		if err != nil {
 			return err
 		}
@@ -101,18 +100,18 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
 			return nil
 		}
 
-		container, err := b.create(ctx)
+		container, err := b.create()
 		if err != nil {
 			return err
 		}
 		id = container.ID
 
-		if err := container.Mount(ctx); err != nil {
+		if err := container.Mount(); err != nil {
 			return err
 		}
-		defer container.Unmount(ctx)
+		defer container.Unmount()
 	}
-	container, err := b.Daemon.Get(ctx, id)
+	container, err := b.Daemon.Get(id)
 	if err != nil {
 		return err
 	}
@@ -128,11 +127,11 @@ func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.St
 	}
 
 	// Commit the container
-	image, err := b.Daemon.Commit(ctx, container, commitCfg)
+	image, err := b.Daemon.Commit(container, commitCfg)
 	if err != nil {
 		return err
 	}
-	b.Daemon.Graph(ctx).Retain(b.id, image.ID)
+	b.Daemon.Graph().Retain(b.id, image.ID)
 	b.activeImages = append(b.activeImages, image.ID)
 	b.image = image.ID
 	return nil
@@ -146,7 +145,7 @@ type copyInfo struct {
 	tmpDir     string
 }
 
-func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
+func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
 	if b.context == nil {
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
 	}
@@ -224,7 +223,7 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
 	}
 	defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
 
-	hit, err := b.probeCache(ctx)
+	hit, err := b.probeCache()
 	if err != nil {
 		return err
 	}
@@ -233,21 +232,21 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
 		return nil
 	}
 
-	ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true)
+	ccr, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
 	if err != nil {
 		return err
 	}
-	container, err := b.Daemon.Get(ctx, ccr.ID)
+	container, err := b.Daemon.Get(ccr.ID)
 	if err != nil {
 		return err
 	}
 
 	b.TmpContainers[container.ID] = struct{}{}
 
-	if err := container.Mount(ctx); err != nil {
+	if err := container.Mount(); err != nil {
 		return err
 	}
-	defer container.Unmount(ctx)
+	defer container.Unmount()
 
 	for _, ci := range copyInfos {
 		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
@@ -255,7 +254,7 @@ func (b *builder) runContextCommand(ctx context.Context, args []string, allowRem
 		}
 	}
 
-	if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
+	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
 		return err
 	}
 	return nil
@@ -490,7 +489,7 @@ func containsWildcards(name string) bool {
 	return false
 }
 
-func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
+func (b *builder) pullImage(name string) (*image.Image, error) {
 	remote, tag := parsers.ParseRepositoryTag(name)
 	if tag == "" {
 		tag = "latest"
@@ -516,11 +515,11 @@ func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, err
 		OutStream:  ioutils.NopWriteCloser(b.OutOld),
 	}
 
-	if err := b.Daemon.Repositories(ctx).Pull(ctx, remote, tag, imagePullConfig); err != nil {
+	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
 		return nil, err
 	}
 
-	image, err := b.Daemon.Repositories(ctx).LookupImage(name)
+	image, err := b.Daemon.Repositories().LookupImage(name)
 	if err != nil {
 		return nil, err
 	}
@@ -528,7 +527,7 @@ func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, err
 	return image, nil
 }
 
-func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
+func (b *builder) processImageFrom(img *image.Image) error {
 	b.image = img.ID
 
 	if img.Config != nil {
@@ -568,7 +567,7 @@ func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error
 				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
 			}
 
-			if err := b.dispatch(ctx, i, n); err != nil {
+			if err := b.dispatch(i, n); err != nil {
 				return err
 			}
 		}
@@ -582,12 +581,12 @@ func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error
 // in the current server `b.Daemon`. If an image is found, probeCache returns
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // is any error, it returns `(false, err)`.
-func (b *builder) probeCache(ctx context.Context) (bool, error) {
+func (b *builder) probeCache() (bool, error) {
 	if !b.UtilizeCache || b.cacheBusted {
 		return false, nil
 	}
 
-	cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config)
+	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
 	if err != nil {
 		return false, err
 	}
@@ -600,12 +599,12 @@ func (b *builder) probeCache(ctx context.Context) (bool, error) {
 	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
 	logrus.Debugf("[BUILDER] Use cached version")
 	b.image = cache.ID
-	b.Daemon.Graph(ctx).Retain(b.id, cache.ID)
+	b.Daemon.Graph().Retain(b.id, cache.ID)
 	b.activeImages = append(b.activeImages, cache.ID)
 	return true, nil
 }
 
-func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
+func (b *builder) create() (*daemon.Container, error) {
 	if b.image == "" && !b.noBaseImage {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
@@ -626,14 +625,14 @@ func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
 	config := *b.Config
 
 	// Create the container
-	ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true)
+	ccr, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
 	if err != nil {
 		return nil, err
 	}
 	for _, warning := range ccr.Warnings {
 		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
 	}
-	c, err := b.Daemon.Get(ctx, ccr.ID)
+	c, err := b.Daemon.Get(ccr.ID)
 	if err != nil {
 		return nil, err
 	}
@@ -653,14 +652,14 @@ func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
 	return c, nil
 }
 
-func (b *builder) run(ctx context.Context, c *daemon.Container) error {
+func (b *builder) run(c *daemon.Container) error {
 	var errCh chan error
 	if b.Verbose {
 		errCh = c.Attach(nil, b.OutStream, b.ErrStream)
 	}
 
 	//start the container
-	if err := c.Start(ctx); err != nil {
+	if err := c.Start(); err != nil {
 		return err
 	}
 
@@ -670,7 +669,7 @@ func (b *builder) run(ctx context.Context, c *daemon.Container) error {
 		select {
 		case <-b.cancelled:
 			logrus.Debugln("Build cancelled, killing container:", c.ID)
-			c.Kill(ctx)
+			c.Kill()
 		case <-finished:
 		}
 	}()
@@ -801,13 +800,13 @@ func copyAsDirectory(source, destination string, destExisted bool) error {
 	return fixPermissions(source, destination, 0, 0, destExisted)
 }
 
-func (b *builder) clearTmp(ctx context.Context) {
+func (b *builder) clearTmp() {
 	for c := range b.TmpContainers {
 		rmConfig := &daemon.ContainerRmConfig{
 			ForceRemove:  true,
 			RemoveVolume: true,
 		}
-		if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
+		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
 			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
 			return
 		}

+ 10 - 11
builder/job.go

@@ -14,7 +14,6 @@ import (
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/pkg/archive"
@@ -113,7 +112,7 @@ func NewBuildConfig() *Config {
 
 // Build is the main interface of the package, it gathers the Builder
 // struct and calls builder.Run() to do all the real build job.
-func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
+func Build(d *daemon.Daemon, buildConfig *Config) error {
 	var (
 		repoName string
 		tag      string
@@ -230,15 +229,15 @@ func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
 	}
 
 	defer func() {
-		builder.Daemon.Graph(ctx).Release(builder.id, builder.activeImages...)
+		builder.Daemon.Graph().Release(builder.id, builder.activeImages...)
 	}()
 
-	id, err := builder.Run(ctx, context)
+	id, err := builder.Run(context)
 	if err != nil {
 		return err
 	}
 	if repoName != "" {
-		return d.Repositories(ctx).Tag(repoName, tag, id, true)
+		return d.Repositories().Tag(repoName, tag, id, true)
 	}
 	return nil
 }
@@ -248,7 +247,7 @@ func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
 //
 // - call parse.Parse() to get AST root from Dockerfile entries
 // - do build by calling builder.dispatch() to call all entries' handling routines
-func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
+func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
 	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
 	if err != nil {
 		return nil, err
@@ -270,7 +269,7 @@ func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config,
 	}
 
 	for i, n := range ast.Children {
-		if err := builder.dispatch(ctx, i, n); err != nil {
+		if err := builder.dispatch(i, n); err != nil {
 			return nil, err
 		}
 	}
@@ -290,8 +289,8 @@ type CommitConfig struct {
 }
 
 // Commit will create a new image from a container's changes
-func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
-	container, err := d.Get(ctx, name)
+func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
+	container, err := d.Get(name)
 	if err != nil {
 		return "", err
 	}
@@ -305,7 +304,7 @@ func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig)
 		c.Config = &runconfig.Config{}
 	}
 
-	newConfig, err := BuildFromConfig(ctx, d, c.Config, c.Changes)
+	newConfig, err := BuildFromConfig(d, c.Config, c.Changes)
 	if err != nil {
 		return "", err
 	}
@@ -323,7 +322,7 @@ func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig)
 		Config:  newConfig,
 	}
 
-	img, err := d.Commit(ctx, container, commitCfg)
+	img, err := d.Commit(container, commitCfg)
 	if err != nil {
 		return "", err
 	}

+ 24 - 25
daemon/archive.go

@@ -8,7 +8,6 @@ import (
 	"strings"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/ioutils"
@@ -21,8 +20,8 @@ var ErrExtractPointNotDirectory = errors.New("extraction point is not a director
 
 // ContainerCopy performs a deprecated operation of archiving the resource at
 // the specified path in the conatiner identified by the given name.
-func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string) (io.ReadCloser, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -31,30 +30,30 @@ func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string
 		res = res[1:]
 	}
 
-	return container.copy(ctx, res)
+	return container.copy(res)
 }
 
 // ContainerStatPath stats the filesystem resource at the specified path in the
 // container identified by the given name.
-func (daemon *Daemon) ContainerStatPath(ctx context.Context, name string, path string) (stat *types.ContainerPathStat, err error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
 
-	return container.StatPath(ctx, path)
+	return container.StatPath(path)
 }
 
 // ContainerArchivePath creates an archive of the filesystem resource at the
 // specified path in the container identified by the given name. Returns a
 // tar archive of the resource and whether it was a directory or a single file.
-func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, nil, err
 	}
 
-	return container.ArchivePath(ctx, path)
+	return container.ArchivePath(path)
 }
 
 // ContainerExtractToDir extracts the given archive to the specified location
@@ -63,13 +62,13 @@ func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, pat
 // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will
 // be an error if unpacking the given content would cause an existing directory
 // to be replaced with a non-directory and vice versa.
-func (daemon *Daemon) ContainerExtractToDir(ctx context.Context, name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	return container.ExtractToDir(ctx, path, noOverwriteDirNonDir, content)
+	return container.ExtractToDir(path, noOverwriteDirNonDir, content)
 }
 
 // resolvePath resolves the given path in the container to a resource on the
@@ -134,14 +133,14 @@ func (container *Container) statPath(resolvedPath, absPath string) (stat *types.
 
 // StatPath stats the filesystem resource at the specified path in this
 // container. Returns stat info about the resource.
-func (container *Container) StatPath(ctx context.Context, path string) (stat *types.ContainerPathStat, err error) {
+func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
 	container.Lock()
 	defer container.Unlock()
 
-	if err = container.Mount(ctx); err != nil {
+	if err = container.Mount(); err != nil {
 		return nil, err
 	}
-	defer container.Unmount(ctx)
+	defer container.Unmount()
 
 	err = container.mountVolumes()
 	defer container.unmountVolumes(true)
@@ -160,7 +159,7 @@ func (container *Container) StatPath(ctx context.Context, path string) (stat *ty
 // ArchivePath creates an archive of the filesystem resource at the specified
 // path in this container. Returns a tar archive of the resource and stat info
 // about the resource.
-func (container *Container) ArchivePath(ctx context.Context, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
+func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
 	container.Lock()
 
 	defer func() {
@@ -172,7 +171,7 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
 		}
 	}()
 
-	if err = container.Mount(ctx); err != nil {
+	if err = container.Mount(); err != nil {
 		return nil, nil, err
 	}
 
@@ -181,7 +180,7 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
 			// unmount any volumes
 			container.unmountVolumes(true)
 			// unmount the container's rootfs
-			container.Unmount(ctx)
+			container.Unmount()
 		}
 	}()
 
@@ -215,12 +214,12 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
 	content = ioutils.NewReadCloserWrapper(data, func() error {
 		err := data.Close()
 		container.unmountVolumes(true)
-		container.Unmount(ctx)
+		container.Unmount()
 		container.Unlock()
 		return err
 	})
 
-	container.logEvent(ctx, "archive-path")
+	container.logEvent("archive-path")
 
 	return content, stat, nil
 }
@@ -231,14 +230,14 @@ func (container *Container) ArchivePath(ctx context.Context, path string) (conte
 // noOverwriteDirNonDir is true then it will be an error if unpacking the
 // given content would cause an existing directory to be replaced with a non-
 // directory and vice versa.
-func (container *Container) ExtractToDir(ctx context.Context, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
+func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
 	container.Lock()
 	defer container.Unlock()
 
-	if err = container.Mount(ctx); err != nil {
+	if err = container.Mount(); err != nil {
 		return err
 	}
-	defer container.Unmount(ctx)
+	defer container.Unmount()
 
 	err = container.mountVolumes()
 	defer container.unmountVolumes(true)
@@ -319,7 +318,7 @@ func (container *Container) ExtractToDir(ctx context.Context, path string, noOve
 		return err
 	}
 
-	container.logEvent(ctx, "extract-to-dir")
+	container.logEvent("extract-to-dir")
 
 	return nil
 }

+ 6 - 7
daemon/attach.go

@@ -3,7 +3,6 @@ package daemon
 import (
 	"io"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/stdcopy"
 )
 
@@ -16,8 +15,8 @@ type ContainerAttachWithLogsConfig struct {
 }
 
 // ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
-func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerAttachWithLogsConfig) error {
-	container, err := daemon.Get(ctx, prefixOrName)
+func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error {
+	container, err := daemon.Get(prefixOrName)
 	if err != nil {
 		return err
 	}
@@ -44,7 +43,7 @@ func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName
 		stderr = errStream
 	}
 
-	return container.attachWithLogs(ctx, stdin, stdout, stderr, c.Logs, c.Stream)
+	return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
 }
 
 // ContainerWsAttachWithLogsConfig attach with websockets, since all
@@ -56,10 +55,10 @@ type ContainerWsAttachWithLogsConfig struct {
 }
 
 // ContainerWsAttachWithLogs websocket connection
-func (daemon *Daemon) ContainerWsAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
-	container, err := daemon.Get(ctx, prefixOrName)
+func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
+	container, err := daemon.Get(prefixOrName)
 	if err != nil {
 		return err
 	}
-	return container.attachWithLogs(ctx, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
+	return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
 }

+ 3 - 6
daemon/changes.go

@@ -1,13 +1,10 @@
 package daemon
 
-import (
-	"github.com/docker/docker/context"
-	"github.com/docker/docker/pkg/archive"
-)
+import "github.com/docker/docker/pkg/archive"
 
 // ContainerChanges returns a list of container fs changes
-func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}

+ 4 - 5
daemon/commit.go

@@ -1,7 +1,6 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/runconfig"
 )
@@ -19,10 +18,10 @@ type ContainerCommitConfig struct {
 
 // Commit creates a new filesystem image from the current state of a container.
 // The image can optionally be tagged into a repository.
-func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *ContainerCommitConfig) (*image.Image, error) {
+func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
 	if c.Pause && !container.isPaused() {
-		container.pause(ctx)
-		defer container.unpause(ctx)
+		container.pause()
+		defer container.unpause()
 	}
 
 	rwTar, err := container.exportContainerRw()
@@ -47,6 +46,6 @@ func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *Conta
 			return img, err
 		}
 	}
-	container.logEvent(ctx, "commit")
+	container.logEvent("commit")
 	return img, nil
 }

+ 57 - 59
daemon/container.go

@@ -15,7 +15,6 @@ import (
 	"github.com/opencontainers/runc/libcontainer/label"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
@@ -171,10 +170,9 @@ func (container *Container) writeHostConfig() error {
 	return ioutil.WriteFile(pth, data, 0666)
 }
 
-func (container *Container) logEvent(ctx context.Context, action string) {
+func (container *Container) logEvent(action string) {
 	d := container.daemon
 	d.EventsService.Log(
-		ctx,
 		action,
 		container.ID,
 		container.Config.Image,
@@ -240,7 +238,7 @@ func (container *Container) exportContainerRw() (archive.Archive, error) {
 // container needs, such as storage and networking, as well as links
 // between containers. The container is left waiting for a signal to
 // begin running.
-func (container *Container) Start(ctx context.Context) (err error) {
+func (container *Container) Start() (err error) {
 	container.Lock()
 	defer container.Unlock()
 
@@ -262,12 +260,12 @@ func (container *Container) Start(ctx context.Context) (err error) {
 				container.ExitCode = 128
 			}
 			container.toDisk()
-			container.cleanup(ctx)
-			container.logEvent(ctx, "die")
+			container.cleanup()
+			container.logEvent("die")
 		}
 	}()
 
-	if err := container.Mount(ctx); err != nil {
+	if err := container.Mount(); err != nil {
 		return err
 	}
 
@@ -275,10 +273,10 @@ func (container *Container) Start(ctx context.Context) (err error) {
 	// backwards API compatibility.
 	container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
 
-	if err := container.initializeNetworking(ctx); err != nil {
+	if err := container.initializeNetworking(); err != nil {
 		return err
 	}
-	linkedEnv, err := container.setupLinkedContainers(ctx)
+	linkedEnv, err := container.setupLinkedContainers()
 	if err != nil {
 		return err
 	}
@@ -286,7 +284,7 @@ func (container *Container) Start(ctx context.Context) (err error) {
 		return err
 	}
 	env := container.createDaemonEnvironment(linkedEnv)
-	if err := populateCommand(ctx, container, env); err != nil {
+	if err := populateCommand(container, env); err != nil {
 		return err
 	}
 
@@ -303,7 +301,7 @@ func (container *Container) Start(ctx context.Context) (err error) {
 	mounts = append(mounts, container.ipcMounts()...)
 
 	container.command.Mounts = mounts
-	return container.waitForStart(ctx)
+	return container.waitForStart()
 }
 
 // streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
@@ -336,14 +334,14 @@ func (container *Container) isNetworkAllocated() bool {
 
 // cleanup releases any network resources allocated to the container along with any rules
 // around how containers are linked together.  It also unmounts the container's root filesystem.
-func (container *Container) cleanup(ctx context.Context) {
+func (container *Container) cleanup() {
 	container.releaseNetwork()
 
 	if err := container.unmountIpcMounts(); err != nil {
 		logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
 	}
 
-	if err := container.Unmount(ctx); err != nil {
+	if err := container.Unmount(); err != nil {
 		logrus.Errorf("%s: Failed to umount filesystem: %v", container.ID, err)
 	}
 
@@ -359,7 +357,7 @@ func (container *Container) cleanup(ctx context.Context) {
 // to send the signal. An error is returned if the container is paused
 // or not running, or if there is a problem returned from the
 // underlying kill command.
-func (container *Container) killSig(ctx context.Context, sig int) error {
+func (container *Container) killSig(sig int) error {
 	logrus.Debugf("Sending %d to %s", sig, container.ID)
 	container.Lock()
 	defer container.Unlock()
@@ -387,13 +385,13 @@ func (container *Container) killSig(ctx context.Context, sig int) error {
 	if err := container.daemon.kill(container, sig); err != nil {
 		return err
 	}
-	container.logEvent(ctx, "kill")
+	container.logEvent("kill")
 	return nil
 }
 
 // Wrapper aroung killSig() suppressing "no such process" error.
-func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int) error {
-	err := container.killSig(ctx, sig)
+func (container *Container) killPossiblyDeadProcess(sig int) error {
+	err := container.killSig(sig)
 	if err == syscall.ESRCH {
 		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
 		return nil
@@ -401,7 +399,7 @@ func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int
 	return err
 }
 
-func (container *Container) pause(ctx context.Context) error {
+func (container *Container) pause() error {
 	container.Lock()
 	defer container.Unlock()
 
@@ -419,11 +417,11 @@ func (container *Container) pause(ctx context.Context) error {
 		return err
 	}
 	container.Paused = true
-	container.logEvent(ctx, "pause")
+	container.logEvent("pause")
 	return nil
 }
 
-func (container *Container) unpause(ctx context.Context) error {
+func (container *Container) unpause() error {
 	container.Lock()
 	defer container.Unlock()
 
@@ -441,18 +439,18 @@ func (container *Container) unpause(ctx context.Context) error {
 		return err
 	}
 	container.Paused = false
-	container.logEvent(ctx, "unpause")
+	container.logEvent("unpause")
 	return nil
 }
 
 // Kill forcefully terminates a container.
-func (container *Container) Kill(ctx context.Context) error {
+func (container *Container) Kill() error {
 	if !container.IsRunning() {
 		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
 	}
 
 	// 1. Send SIGKILL
-	if err := container.killPossiblyDeadProcess(ctx, int(syscall.SIGKILL)); err != nil {
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
 		// While normally we might "return err" here we're not going to
 		// because if we can't stop the container by this point then
 		// its probably because its already stopped. Meaning, between
@@ -486,15 +484,15 @@ func (container *Container) Kill(ctx context.Context) error {
 // process to exit. If a negative duration is given, Stop will wait
 // for the initial signal forever. If the container is not running Stop returns
 // immediately.
-func (container *Container) Stop(ctx context.Context, seconds int) error {
+func (container *Container) Stop(seconds int) error {
 	if !container.IsRunning() {
 		return nil
 	}
 
 	// 1. Send a SIGTERM
-	if err := container.killPossiblyDeadProcess(ctx, container.stopSignal()); err != nil {
+	if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil {
 		logrus.Infof("Failed to send SIGTERM to the process, force killing")
-		if err := container.killPossiblyDeadProcess(ctx, 9); err != nil {
+		if err := container.killPossiblyDeadProcess(9); err != nil {
 			return err
 		}
 	}
@@ -503,13 +501,13 @@ func (container *Container) Stop(ctx context.Context, seconds int) error {
 	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
 		logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
 		// 3. If it doesn't, then send SIGKILL
-		if err := container.Kill(ctx); err != nil {
+		if err := container.Kill(); err != nil {
 			container.WaitStop(-1 * time.Second)
 			return err
 		}
 	}
 
-	container.logEvent(ctx, "stop")
+	container.logEvent("stop")
 	return nil
 }
 
@@ -517,61 +515,61 @@ func (container *Container) Stop(ctx context.Context, seconds int) error {
 // container. When stopping, wait for the given duration in seconds to
 // gracefully stop, before forcefully terminating the container. If
 // given a negative duration, wait forever for a graceful stop.
-func (container *Container) Restart(ctx context.Context, seconds int) error {
+func (container *Container) Restart(seconds int) error {
 	// Avoid unnecessarily unmounting and then directly mounting
 	// the container when the container stops and then starts
 	// again
-	if err := container.Mount(ctx); err == nil {
-		defer container.Unmount(ctx)
+	if err := container.Mount(); err == nil {
+		defer container.Unmount()
 	}
 
-	if err := container.Stop(ctx, seconds); err != nil {
+	if err := container.Stop(seconds); err != nil {
 		return err
 	}
 
-	if err := container.Start(ctx); err != nil {
+	if err := container.Start(); err != nil {
 		return err
 	}
 
-	container.logEvent(ctx, "restart")
+	container.logEvent("restart")
 	return nil
 }
 
 // Resize changes the TTY of the process running inside the container
 // to the given height and width. The container must be running.
-func (container *Container) Resize(ctx context.Context, h, w int) error {
+func (container *Container) Resize(h, w int) error {
 	if !container.IsRunning() {
 		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
 	}
 	if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
 		return err
 	}
-	container.logEvent(ctx, "resize")
+	container.logEvent("resize")
 	return nil
 }
 
-func (container *Container) export(ctx context.Context) (archive.Archive, error) {
-	if err := container.Mount(ctx); err != nil {
+func (container *Container) export() (archive.Archive, error) {
+	if err := container.Mount(); err != nil {
 		return nil, err
 	}
 
 	archive, err := archive.Tar(container.basefs, archive.Uncompressed)
 	if err != nil {
-		container.Unmount(ctx)
+		container.Unmount()
 		return nil, err
 	}
 	arch := ioutils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
-		container.Unmount(ctx)
+		container.Unmount()
 		return err
 	})
-	container.logEvent(ctx, "export")
+	container.logEvent("export")
 	return arch, err
 }
 
 // Mount sets container.basefs
-func (container *Container) Mount(ctx context.Context) error {
-	return container.daemon.Mount(ctx, container)
+func (container *Container) Mount() error {
+	return container.daemon.Mount(container)
 }
 
 func (container *Container) changes() ([]archive.Change, error) {
@@ -580,7 +578,7 @@ func (container *Container) changes() ([]archive.Change, error) {
 	return container.daemon.changes(container)
 }
 
-func (container *Container) getImage(ctx context.Context) (*image.Image, error) {
+func (container *Container) getImage() (*image.Image, error) {
 	if container.daemon == nil {
 		return nil, derr.ErrorCodeImageUnregContainer
 	}
@@ -589,7 +587,7 @@ func (container *Container) getImage(ctx context.Context) (*image.Image, error)
 
 // Unmount asks the daemon to release the layered filesystems that are
 // mounted by the container.
-func (container *Container) Unmount(ctx context.Context) error {
+func (container *Container) Unmount() error {
 	return container.daemon.unmount(container)
 }
 
@@ -614,7 +612,7 @@ func validateID(id string) error {
 	return nil
 }
 
-func (container *Container) copy(ctx context.Context, resource string) (rc io.ReadCloser, err error) {
+func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
 	container.Lock()
 
 	defer func() {
@@ -626,7 +624,7 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
 		}
 	}()
 
-	if err := container.Mount(ctx); err != nil {
+	if err := container.Mount(); err != nil {
 		return nil, err
 	}
 
@@ -635,7 +633,7 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
 			// unmount any volumes
 			container.unmountVolumes(true)
 			// unmount the container's rootfs
-			container.Unmount(ctx)
+			container.Unmount()
 		}
 	}()
 
@@ -671,11 +669,11 @@ func (container *Container) copy(ctx context.Context, resource string) (rc io.Re
 	reader := ioutils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
 		container.unmountVolumes(true)
-		container.Unmount(ctx)
+		container.Unmount()
 		container.Unlock()
 		return err
 	})
-	container.logEvent(ctx, "copy")
+	container.logEvent("copy")
 	return reader, nil
 }
 
@@ -754,14 +752,14 @@ func (container *Container) startLogging() error {
 	return nil
 }
 
-func (container *Container) waitForStart(ctx context.Context) error {
+func (container *Container) waitForStart() error {
 	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
 
 	// block until we either receive an error from the initial start of the container's
 	// process or until the process is running in the container
 	select {
 	case <-container.monitor.startSignal:
-	case err := <-promise.Go(func() error { return container.monitor.Start(ctx) }):
+	case err := <-promise.Go(container.monitor.Start):
 		return err
 	}
 
@@ -792,11 +790,11 @@ func (container *Container) getExecIDs() []string {
 	return container.execCommands.List()
 }
 
-func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) error {
+func (container *Container) exec(ExecConfig *ExecConfig) error {
 	container.Lock()
 	defer container.Unlock()
 
-	callback := func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
+	callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
 		if processConfig.Tty {
 			// The callback is called after the process Start()
 			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
@@ -811,7 +809,7 @@ func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) er
 
 	// We use a callback here instead of a goroutine and an chan for
 	// synchronization purposes
-	cErr := promise.Go(func() error { return container.monitorExec(ctx, ExecConfig, callback) })
+	cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
 
 	// Exec should not return until the process is actually running
 	select {
@@ -823,13 +821,13 @@ func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) er
 	return nil
 }
 
-func (container *Container) monitorExec(ctx context.Context, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
+func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
 	var (
 		err      error
 		exitCode int
 	)
 	pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
-	exitCode, err = container.daemon.Exec(ctx, container, ExecConfig, pipes, callback)
+	exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
 	if err != nil {
 		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
 	}
@@ -862,7 +860,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr
 	return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
 }
 
-func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
+func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
 	if logs {
 		logDriver, err := container.getLogger()
 		if err != nil {
@@ -894,7 +892,7 @@ func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadClo
 		}
 	}
 
-	container.logEvent(ctx, "attach")
+	container.logEvent("attach")
 
 	//stream
 	if stream {

+ 24 - 25
daemon/container_unix.go

@@ -15,7 +15,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/links"
 	"github.com/docker/docker/daemon/network"
@@ -78,12 +77,12 @@ func killProcessDirectly(container *Container) error {
 	return nil
 }
 
-func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
+func (container *Container) setupLinkedContainers() ([]string, error) {
 	var (
 		env    []string
 		daemon = container.daemon
 	)
-	children, err := daemon.children(ctx, container.Name)
+	children, err := daemon.children(container.Name)
 	if err != nil {
 		return nil, err
 	}
@@ -176,7 +175,7 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
 	return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
 }
 
-func populateCommand(ctx context.Context, c *Container, env []string) error {
+func populateCommand(c *Container, env []string) error {
 	var en *execdriver.Network
 	if !c.Config.NetworkDisabled {
 		en = &execdriver.Network{}
@@ -186,7 +185,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
 
 		parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
 		if parts[0] == "container" {
-			nc, err := c.getNetworkedContainer(ctx)
+			nc, err := c.getNetworkedContainer()
 			if err != nil {
 				return err
 			}
@@ -207,7 +206,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
 	}
 
 	if c.hostConfig.IpcMode.IsContainer() {
-		ic, err := c.getIpcContainer(ctx)
+		ic, err := c.getIpcContainer()
 		if err != nil {
 			return err
 		}
@@ -350,18 +349,18 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
 }
 
 // GetSize returns the real size & virtual size of the container.
-func (container *Container) getSize(ctx context.Context) (int64, int64) {
+func (container *Container) getSize() (int64, int64) {
 	var (
 		sizeRw, sizeRootfs int64
 		err                error
 		driver             = container.daemon.driver
 	)
 
-	if err := container.Mount(ctx); err != nil {
+	if err := container.Mount(); err != nil {
 		logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
 		return sizeRw, sizeRootfs
 	}
-	defer container.Unmount(ctx)
+	defer container.Unmount()
 
 	initID := fmt.Sprintf("%s-init", container.ID)
 	sizeRw, err = driver.DiffSize(container.ID, initID)
@@ -413,7 +412,7 @@ func (container *Container) buildHostnameFile() error {
 	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
 }
 
-func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwork.SandboxOption, error) {
+func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, error) {
 	var (
 		sboxOptions []libnetwork.SandboxOption
 		err         error
@@ -490,7 +489,7 @@ func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwo
 
 	var childEndpoints, parentEndpoints []string
 
-	children, err := container.daemon.children(ctx, container.Name)
+	children, err := container.daemon.children(container.Name)
 	if err != nil {
 		return nil, err
 	}
@@ -521,7 +520,7 @@ func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwo
 			continue
 		}
 
-		c, err := container.daemon.Get(ctx, ref.ParentID)
+		c, err := container.daemon.Get(ref.ParentID)
 		if err != nil {
 			logrus.Error(err)
 		}
@@ -680,7 +679,7 @@ func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox)
 
 // UpdateNetwork is used to update the container's network (e.g. when linked containers
 // get removed/unlinked).
-func (container *Container) updateNetwork(ctx context.Context) error {
+func (container *Container) updateNetwork() error {
 	ctrl := container.daemon.netController
 	sid := container.NetworkSettings.SandboxID
 
@@ -689,7 +688,7 @@ func (container *Container) updateNetwork(ctx context.Context) error {
 		return derr.ErrorCodeNoSandbox.WithArgs(sid, err)
 	}
 
-	options, err := container.buildSandboxOptions(ctx)
+	options, err := container.buildSandboxOptions()
 	if err != nil {
 		return derr.ErrorCodeNetworkUpdate.WithArgs(err)
 	}
@@ -813,7 +812,7 @@ func createNetwork(controller libnetwork.NetworkController, dnet string, driver
 	return controller.NewNetwork(driver, dnet, createOptions...)
 }
 
-func (container *Container) allocateNetwork(ctx context.Context) error {
+func (container *Container) allocateNetwork() error {
 	mode := container.hostConfig.NetworkMode
 	controller := container.daemon.netController
 	if container.Config.NetworkDisabled || mode.IsContainer() {
@@ -847,14 +846,14 @@ func (container *Container) allocateNetwork(ctx context.Context) error {
 		service = strings.Replace(service, "/", "", -1)
 	}
 
-	if err := container.configureNetwork(ctx, networkName, service, networkDriver, mode.IsDefault()); err != nil {
+	if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil {
 		return err
 	}
 
 	return container.writeHostConfig()
 }
 
-func (container *Container) configureNetwork(ctx context.Context, networkName, service, networkDriver string, canCreateNetwork bool) error {
+func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
 	controller := container.daemon.netController
 
 	n, err := controller.NetworkByName(networkName)
@@ -898,7 +897,7 @@ func (container *Container) configureNetwork(ctx context.Context, networkName, s
 		return false
 	})
 	if sb == nil {
-		options, err := container.buildSandboxOptions(ctx)
+		options, err := container.buildSandboxOptions()
 		if err != nil {
 			return err
 		}
@@ -921,12 +920,12 @@ func (container *Container) configureNetwork(ctx context.Context, networkName, s
 	return nil
 }
 
-func (container *Container) initializeNetworking(ctx context.Context) error {
+func (container *Container) initializeNetworking() error {
 	var err error
 
 	if container.hostConfig.NetworkMode.IsContainer() {
 		// we need to get the hosts files from the container to join
-		nc, err := container.getNetworkedContainer(ctx)
+		nc, err := container.getNetworkedContainer()
 		if err != nil {
 			return err
 		}
@@ -952,7 +951,7 @@ func (container *Container) initializeNetworking(ctx context.Context) error {
 
 	}
 
-	if err := container.allocateNetwork(ctx); err != nil {
+	if err := container.allocateNetwork(); err != nil {
 		return err
 	}
 
@@ -973,9 +972,9 @@ func (container *Container) setNetworkNamespaceKey(pid int) error {
 	return sandbox.SetKey(path)
 }
 
-func (container *Container) getIpcContainer(ctx context.Context) (*Container, error) {
+func (container *Container) getIpcContainer() (*Container, error) {
 	containerID := container.hostConfig.IpcMode.Container()
-	c, err := container.daemon.Get(ctx, containerID)
+	c, err := container.daemon.Get(containerID)
 	if err != nil {
 		return nil, err
 	}
@@ -1011,14 +1010,14 @@ func (container *Container) setupWorkingDirectory() error {
 	return nil
 }
 
-func (container *Container) getNetworkedContainer(ctx context.Context) (*Container, error) {
+func (container *Container) getNetworkedContainer() (*Container, error) {
 	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
 	switch parts[0] {
 	case "container":
 		if len(parts) != 2 {
 			return nil, derr.ErrorCodeParseContainer
 		}
-		nc, err := container.daemon.Get(ctx, parts[1])
+		nc, err := container.daemon.Get(parts[1])
 		if err != nil {
 			return nil, err
 		}

+ 5 - 6
daemon/container_windows.go

@@ -5,7 +5,6 @@ package daemon
 import (
 	"strings"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	derr "github.com/docker/docker/errors"
 )
@@ -26,7 +25,7 @@ func killProcessDirectly(container *Container) error {
 	return nil
 }
 
-func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
+func (container *Container) setupLinkedContainers() ([]string, error) {
 	return nil, nil
 }
 
@@ -35,7 +34,7 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string
 	return container.Config.Env
 }
 
-func (container *Container) initializeNetworking(ctx context.Context) error {
+func (container *Container) initializeNetworking() error {
 	return nil
 }
 
@@ -43,7 +42,7 @@ func (container *Container) setupWorkingDirectory() error {
 	return nil
 }
 
-func populateCommand(ctx context.Context, c *Container, env []string) error {
+func populateCommand(c *Container, env []string) error {
 	en := &execdriver.Network{
 		Interface: nil,
 	}
@@ -136,7 +135,7 @@ func populateCommand(ctx context.Context, c *Container, env []string) error {
 }
 
 // GetSize returns real size & virtual size
-func (container *Container) getSize(ctx context.Context) (int64, int64) {
+func (container *Container) getSize() (int64, int64) {
 	// TODO Windows
 	return 0, 0
 }
@@ -151,7 +150,7 @@ func (container *Container) allocateNetwork() error {
 	return nil
 }
 
-func (container *Container) updateNetwork(ctx context.Context) error {
+func (container *Container) updateNetwork() error {
 	return nil
 }
 

+ 16 - 17
daemon/create.go

@@ -5,7 +5,6 @@ import (
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/image"
@@ -16,21 +15,21 @@ import (
 )
 
 // ContainerCreate takes configs and creates a container.
-func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (types.ContainerCreateResponse, error) {
+func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (types.ContainerCreateResponse, error) {
 	if config == nil {
 		return types.ContainerCreateResponse{}, derr.ErrorCodeEmptyConfig
 	}
 
-	warnings, err := daemon.verifyContainerSettings(ctx, hostConfig, config)
+	warnings, err := daemon.verifyContainerSettings(hostConfig, config)
 	if err != nil {
 		return types.ContainerCreateResponse{"", warnings}, err
 	}
 
 	daemon.adaptContainerSettings(hostConfig, adjustCPUShares)
 
-	container, buildWarnings, err := daemon.Create(ctx, config, hostConfig, name)
+	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
 	if err != nil {
-		if daemon.Graph(ctx).IsNotExist(err, config.Image) {
+		if daemon.Graph().IsNotExist(err, config.Image) {
 			if strings.Contains(config.Image, "@") {
 				return types.ContainerCreateResponse{"", warnings}, derr.ErrorCodeNoSuchImageHash.WithArgs(config.Image)
 			}
@@ -49,7 +48,7 @@ func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *
 }
 
 // Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
+func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
 	var (
 		container *Container
 		warnings  []string
@@ -77,29 +76,29 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
 		hostConfig = &runconfig.HostConfig{}
 	}
 	if hostConfig.SecurityOpt == nil {
-		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(ctx, hostConfig.IpcMode, hostConfig.PidMode)
+		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
 		if err != nil {
 			return nil, nil, err
 		}
 	}
-	if container, err = daemon.newContainer(ctx, name, config, imgID); err != nil {
+	if container, err = daemon.newContainer(name, config, imgID); err != nil {
 		return nil, nil, err
 	}
 	defer func() {
 		if retErr != nil {
-			if err := daemon.rm(ctx, container, false); err != nil {
+			if err := daemon.rm(container, false); err != nil {
 				logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err)
 			}
 		}
 	}()
 
-	if err := daemon.Register(ctx, container); err != nil {
+	if err := daemon.Register(container); err != nil {
 		return nil, nil, err
 	}
 	if err := daemon.createRootfs(container); err != nil {
 		return nil, nil, err
 	}
-	if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
+	if err := daemon.setHostConfig(container, hostConfig); err != nil {
 		return nil, nil, err
 	}
 	defer func() {
@@ -109,10 +108,10 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
 			}
 		}
 	}()
-	if err := container.Mount(ctx); err != nil {
+	if err := container.Mount(); err != nil {
 		return nil, nil, err
 	}
-	defer container.Unmount(ctx)
+	defer container.Unmount()
 
 	if err := createContainerPlatformSpecificSettings(container, config, hostConfig, img); err != nil {
 		return nil, nil, err
@@ -122,16 +121,16 @@ func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, host
 		logrus.Errorf("Error saving new container to disk: %v", err)
 		return nil, nil, err
 	}
-	container.logEvent(ctx, "create")
+	container.logEvent("create")
 	return container, warnings, nil
 }
 
-func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
+func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
 	if ipcMode.IsHost() || pidMode.IsHost() {
 		return label.DisableSecOpt(), nil
 	}
 	if ipcContainer := ipcMode.Container(); ipcContainer != "" {
-		c, err := daemon.Get(ctx, ipcContainer)
+		c, err := daemon.Get(ipcContainer)
 		if err != nil {
 			return nil, err
 		}
@@ -143,7 +142,7 @@ func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig
 
 // VolumeCreate creates a volume with the specified name, driver, and opts
 // This is called directly from the remote API
-func (daemon *Daemon) VolumeCreate(ctx context.Context, name, driverName string, opts map[string]string) (*types.Volume, error) {
+func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) {
 	if name == "" {
 		name = stringid.GenerateNonCryptoID()
 	}

+ 44 - 42
daemon/daemon.go

@@ -20,7 +20,6 @@ import (
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
@@ -129,14 +128,14 @@ type Daemon struct {
 //  - A partial container ID prefix (e.g. short ID) of any length that is
 //    unique enough to only return a single container object
 //  If none of these searches succeed, an error is returned
-func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container, error) {
+func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
 	if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
 		// prefix is an exact match to a full container ID
 		return containerByID, nil
 	}
 
 	// GetByName will match only an exact name provided; we ignore errors
-	if containerByName, _ := daemon.GetByName(ctx, prefixOrName); containerByName != nil {
+	if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
 		// prefix is an exact match to a full container Name
 		return containerByName, nil
 	}
@@ -154,8 +153,8 @@ func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container,
 
 // Exists returns a true if a container of the specified ID or name exists,
 // false otherwise.
-func (daemon *Daemon) Exists(ctx context.Context, id string) bool {
-	c, _ := daemon.Get(ctx, id)
+func (daemon *Daemon) Exists(id string) bool {
+	c, _ := daemon.Get(id)
 	return c != nil
 }
 
@@ -180,8 +179,8 @@ func (daemon *Daemon) load(id string) (*Container, error) {
 }
 
 // Register makes a container object usable by the daemon as <container.ID>
-func (daemon *Daemon) Register(ctx context.Context, container *Container) error {
-	if container.daemon != nil || daemon.Exists(ctx, container.ID) {
+func (daemon *Daemon) Register(container *Container) error {
+	if container.daemon != nil || daemon.Exists(container.ID) {
 		return fmt.Errorf("Container is already loaded")
 	}
 	if err := validateID(container.ID); err != nil {
@@ -219,7 +218,10 @@ func (daemon *Daemon) Register(ctx context.Context, container *Container) error
 		}
 		daemon.execDriver.Terminate(cmd)
 
-		if err := container.Unmount(ctx); err != nil {
+		if err := container.unmountIpcMounts(); err != nil {
+			logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
+		}
+		if err := container.Unmount(); err != nil {
 			logrus.Debugf("unmount error %s", err)
 		}
 		if err := container.toDiskLocking(); err != nil {
@@ -253,7 +255,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
 	return nil
 }
 
-func (daemon *Daemon) restore(ctx context.Context) error {
+func (daemon *Daemon) restore() error {
 	type cr struct {
 		container  *Container
 		registered bool
@@ -323,7 +325,7 @@ func (daemon *Daemon) restore(ctx context.Context) error {
 				}
 			}
 
-			if err := daemon.Register(ctx, container); err != nil {
+			if err := daemon.Register(container); err != nil {
 				logrus.Errorf("Failed to register container %s: %s", container.ID, err)
 				// The container register failed should not be started.
 				return
@@ -334,7 +336,7 @@ func (daemon *Daemon) restore(ctx context.Context) error {
 			if daemon.configStore.AutoRestart && container.shouldRestart() {
 				logrus.Debugf("Starting container %s", container.ID)
 
-				if err := container.Start(ctx); err != nil {
+				if err := container.Start(); err != nil {
 					logrus.Errorf("Failed to start container %s: %s", container.ID, err)
 				}
 			}
@@ -364,7 +366,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
 	return nil
 }
 
-func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (string, string, error) {
+func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
 	var (
 		err error
 		id  = stringid.GenerateNonCryptoID()
@@ -377,14 +379,14 @@ func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (strin
 		return id, name, nil
 	}
 
-	if name, err = daemon.reserveName(ctx, id, name); err != nil {
+	if name, err = daemon.reserveName(id, name); err != nil {
 		return "", "", err
 	}
 
 	return id, name, nil
 }
 
-func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string, error) {
+func (daemon *Daemon) reserveName(id, name string) (string, error) {
 	if !validContainerNamePattern.MatchString(name) {
 		return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
 	}
@@ -398,7 +400,7 @@ func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string,
 			return "", err
 		}
 
-		conflictingContainer, err := daemon.GetByName(ctx, name)
+		conflictingContainer, err := daemon.GetByName(name)
 		if err != nil {
 			if strings.Contains(err.Error(), "Could not find entity") {
 				return "", err
@@ -468,12 +470,12 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic
 	return entrypoint, args
 }
 
-func (daemon *Daemon) newContainer(ctx context.Context, name string, config *runconfig.Config, imgID string) (*Container, error) {
+func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) {
 	var (
 		id  string
 		err error
 	)
-	id, name, err = daemon.generateIDAndName(ctx, name)
+	id, name, err = daemon.generateIDAndName(name)
 	if err != nil {
 		return nil, err
 	}
@@ -510,7 +512,7 @@ func GetFullContainerName(name string) (string, error) {
 }
 
 // GetByName returns a container given a name.
-func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, error) {
+func (daemon *Daemon) GetByName(name string) (*Container, error) {
 	fullName, err := GetFullContainerName(name)
 	if err != nil {
 		return nil, err
@@ -529,7 +531,7 @@ func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, e
 // children returns all child containers of the container with the
 // given name. The containers are returned as a map from the container
 // name to a pointer to Container.
-func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Container, error) {
+func (daemon *Daemon) children(name string) (map[string]*Container, error) {
 	name, err := GetFullContainerName(name)
 	if err != nil {
 		return nil, err
@@ -537,7 +539,7 @@ func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Co
 	children := make(map[string]*Container)
 
 	err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
-		c, err := daemon.Get(ctx, e.ID())
+		c, err := daemon.Get(e.ID())
 		if err != nil {
 			return err
 		}
@@ -573,7 +575,7 @@ func (daemon *Daemon) registerLink(parent, child *Container, alias string) error
 
 // NewDaemon sets up everything for the daemon to be able to service
 // requests from the webserver.
-func NewDaemon(ctx context.Context, config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
+func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
 	setDefaultMtu(config)
 
 	// Ensure we have compatible configuration options
@@ -641,7 +643,7 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
 	// Ensure the graph driver is shutdown at a later point
 	defer func() {
 		if err != nil {
-			if err := d.Shutdown(ctx); err != nil {
+			if err := d.Shutdown(); err != nil {
 				logrus.Error(err)
 			}
 		}
@@ -785,7 +787,7 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
 
 	go d.execCommandGC()
 
-	if err := d.restore(ctx); err != nil {
+	if err := d.restore(); err != nil {
 		return nil, err
 	}
 
@@ -793,12 +795,12 @@ func NewDaemon(ctx context.Context, config *Config, registryService *registry.Se
 }
 
 // Shutdown stops the daemon.
-func (daemon *Daemon) Shutdown(ctx context.Context) error {
+func (daemon *Daemon) Shutdown() error {
 	daemon.shutdown = true
 	if daemon.containers != nil {
 		group := sync.WaitGroup{}
 		logrus.Debug("starting clean shutdown of all containers...")
-		for _, container := range daemon.List(ctx) {
+		for _, container := range daemon.List() {
 			c := container
 			if c.IsRunning() {
 				logrus.Debugf("stopping %s", c.ID)
@@ -821,7 +823,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 							logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
 							return
 						}
-						if err := c.unpause(ctx); err != nil {
+						if err := c.unpause(); err != nil {
 							logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
 							return
 						}
@@ -836,7 +838,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 						}
 					} else {
 						// If container failed to exit in 10 seconds of SIGTERM, then using the force
-						if err := c.Stop(ctx, 10); err != nil {
+						if err := c.Stop(10); err != nil {
 							logrus.Errorf("Stop container %s with error: %v", c.ID, err)
 						}
 					}
@@ -874,7 +876,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 
 // Mount sets container.basefs
 // (is it not set coming in? why is it unset?)
-func (daemon *Daemon) Mount(ctx context.Context, container *Container) error {
+func (daemon *Daemon) Mount(container *Container) error {
 	dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
 	if err != nil {
 		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
@@ -899,14 +901,14 @@ func (daemon *Daemon) unmount(container *Container) error {
 	return nil
 }
 
-func (daemon *Daemon) run(ctx context.Context, c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
+func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
 	hooks := execdriver.Hooks{
 		Start: startCallback,
 	}
-	hooks.PreStart = append(hooks.PreStart, func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
+	hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
 		return c.setNetworkNamespaceKey(pid)
 	})
-	return daemon.execDriver.Run(ctx, c.command, pipes, hooks)
+	return daemon.execDriver.Run(c.command, pipes, hooks)
 }
 
 func (daemon *Daemon) kill(c *Container, sig int) error {
@@ -973,12 +975,12 @@ func (daemon *Daemon) createRootfs(container *Container) error {
 // which need direct access to daemon.graph.
 // Once the tests switch to using engine and jobs, this method
 // can go away.
-func (daemon *Daemon) Graph(ctx context.Context) *graph.Graph {
+func (daemon *Daemon) Graph() *graph.Graph {
 	return daemon.graph
 }
 
 // Repositories returns all repositories.
-func (daemon *Daemon) Repositories(ctx context.Context) *graph.TagStore {
+func (daemon *Daemon) Repositories() *graph.TagStore {
 	return daemon.repositories
 }
 
@@ -992,13 +994,13 @@ func (daemon *Daemon) systemInitPath() string {
 
 // GraphDriver returns the currently used driver for processing
 // container layers.
-func (daemon *Daemon) GraphDriver(ctx context.Context) graphdriver.Driver {
+func (daemon *Daemon) GraphDriver() graphdriver.Driver {
 	return daemon.driver
 }
 
 // ExecutionDriver returns the currently used driver for creating and
 // starting execs in a container.
-func (daemon *Daemon) ExecutionDriver(ctx context.Context) execdriver.Driver {
+func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
 	return daemon.execDriver
 }
 
@@ -1010,9 +1012,9 @@ func (daemon *Daemon) containerGraph() *graphdb.Database {
 // of the image with imgID, that had the same config when it was
 // created. nil is returned if a child cannot be found. An error is
 // returned if the parent image cannot be found.
-func (daemon *Daemon) ImageGetCached(ctx context.Context, imgID string, config *runconfig.Config) (*image.Image, error) {
+func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
 	// Retrieve all images
-	images := daemon.Graph(ctx).Map()
+	images := daemon.Graph().Map()
 
 	// Store the tree in a map of map (map[parentId][childId])
 	imageMap := make(map[string]map[string]struct{})
@@ -1048,7 +1050,7 @@ func tempDir(rootDir string) (string, error) {
 	return tmpDir, system.MkdirAll(tmpDir, 0700)
 }
 
-func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
+func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
 	container.Lock()
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
 		container.Unlock()
@@ -1058,14 +1060,14 @@ func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, h
 
 	// Do not lock while creating volumes since this could be calling out to external plugins
 	// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
-	if err := daemon.registerMountPoints(ctx, container, hostConfig); err != nil {
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
 		return err
 	}
 
 	container.Lock()
 	defer container.Unlock()
 	// Register any links from the host config before starting the container
-	if err := daemon.registerLinks(ctx, container, hostConfig); err != nil {
+	if err := daemon.registerLinks(container, hostConfig); err != nil {
 		return err
 	}
 
@@ -1089,7 +1091,7 @@ var errNoDefaultRoute = errors.New("no default route was found")
 
 // verifyContainerSettings performs validation of the hostconfig and config
 // structures.
-func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
+func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
 
 	// First perform verification of settings common across all platforms.
 	if config != nil {
@@ -1126,7 +1128,7 @@ func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *r
 	}
 
 	// Now do platform-specific verification
-	return verifyPlatformContainerSettings(ctx, daemon, hostConfig, config)
+	return verifyPlatformContainerSettings(daemon, hostConfig, config)
 }
 
 func configureVolumes(config *Config) (*store.VolumeStore, error) {

+ 9 - 14
daemon/daemon_test.go

@@ -8,7 +8,6 @@ import (
 	"path/filepath"
 	"testing"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/truncindex"
@@ -93,34 +92,32 @@ func TestGet(t *testing.T) {
 		containerGraphDB: graph,
 	}
 
-	ctx := context.Background()
-
-	if container, _ := daemon.Get(ctx, "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
+	if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
 		t.Fatal("Should explicitly match full container IDs")
 	}
 
-	if container, _ := daemon.Get(ctx, "75fb0b8009"); container != c4 {
+	if container, _ := daemon.Get("75fb0b8009"); container != c4 {
 		t.Fatal("Should match a partial ID")
 	}
 
-	if container, _ := daemon.Get(ctx, "drunk_hawking"); container != c2 {
+	if container, _ := daemon.Get("drunk_hawking"); container != c2 {
 		t.Fatal("Should match a full name")
 	}
 
 	// c3.Name is a partial match for both c3.ID and c2.ID
-	if c, _ := daemon.Get(ctx, "3cdbd1aa"); c != c3 {
+	if c, _ := daemon.Get("3cdbd1aa"); c != c3 {
 		t.Fatal("Should match a full name even though it collides with another container's ID")
 	}
 
-	if container, _ := daemon.Get(ctx, "d22d69a2b896"); container != c5 {
+	if container, _ := daemon.Get("d22d69a2b896"); container != c5 {
 		t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID")
 	}
 
-	if _, err := daemon.Get(ctx, "3cdbd1"); err == nil {
+	if _, err := daemon.Get("3cdbd1"); err == nil {
 		t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
 	}
 
-	if _, err := daemon.Get(ctx, "nothing"); err == nil {
+	if _, err := daemon.Get("nothing"); err == nil {
 		t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
 	}
 
@@ -489,15 +486,13 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
 		t.Fatalf("Expected 1 volume mounted, was 0\n")
 	}
 
-	ctx := context.Background()
-
 	m := c.MountPoints["/vol1"]
-	_, err = daemon.VolumeCreate(ctx, m.Name, m.Driver, nil)
+	_, err = daemon.VolumeCreate(m.Name, m.Driver, nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if err := daemon.VolumeRm(ctx, m.Name); err != nil {
+	if err := daemon.VolumeRm(m.Name); err != nil {
 		t.Fatal(err)
 	}
 

+ 7 - 8
daemon/daemon_unix.go

@@ -13,7 +13,6 @@ import (
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/parsers"
@@ -119,12 +118,12 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // hostconfig and config structures.
-func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
+func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
 	warnings := []string{}
 	sysInfo := sysinfo.New(true)
 
-	if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver(ctx).Name(), "lxc") {
-		return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver(ctx).Name())
+	if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
+		return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
 	}
 
 	// memory subsystem checks and adjustments
@@ -492,12 +491,12 @@ func setupInitLayer(initLayer string) error {
 
 // NetworkAPIRouter implements a feature for server-experimental,
 // directly calling into libnetwork.
-func (daemon *Daemon) NetworkAPIRouter(ctx context.Context) func(w http.ResponseWriter, req *http.Request) {
+func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
 	return nwapi.NewHTTPHandler(daemon.netController)
 }
 
 // registerLinks writes the links to a file.
-func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig == nil || hostConfig.Links == nil {
 		return nil
 	}
@@ -507,14 +506,14 @@ func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, h
 		if err != nil {
 			return err
 		}
-		child, err := daemon.Get(ctx, name)
+		child, err := daemon.Get(name)
 		if err != nil {
 			//An error from daemon.Get() means this name could not be found
 			return fmt.Errorf("Could not get container for %s", name)
 		}
 		for child.hostConfig.NetworkMode.IsContainer() {
 			parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
-			child, err = daemon.Get(ctx, parts[1])
+			child, err = daemon.Get(parts[1])
 			if err != nil {
 				return fmt.Errorf("Could not get container for %s", parts[1])
 			}

+ 3 - 4
daemon/daemon_windows.go

@@ -6,7 +6,6 @@ import (
 	"syscall"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/graphdriver"
 	// register the windows graph driver
 	_ "github.com/docker/docker/daemon/graphdriver/windows"
@@ -48,7 +47,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // hostconfig and config structures.
-func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
+func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
 	return nil, nil
 }
 
@@ -105,7 +104,7 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error)
 
 // registerLinks sets up links between containers and writes the
 // configuration out for persistence.
-func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	// TODO Windows. Factored out for network modes. There may be more
 	// refactoring required here.
 
@@ -118,7 +117,7 @@ func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, h
 		if err != nil {
 			return err
 		}
-		child, err := daemon.Get(ctx, name)
+		child, err := daemon.Get(name)
 		if err != nil {
 			//An error from daemon.Get() means this name could not be found
 			return fmt.Errorf("Could not get container for %s", name)

+ 11 - 13
daemon/delete.go

@@ -5,8 +5,6 @@ import (
 	"os"
 	"path"
 
-	"github.com/docker/docker/context"
-
 	"github.com/Sirupsen/logrus"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/volume/store"
@@ -21,8 +19,8 @@ type ContainerRmConfig struct {
 // is returned if the container is not found, or if the remove
 // fails. If the remove succeeds, the container name is released, and
 // network links are removed.
-func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *ContainerRmConfig) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
@@ -45,9 +43,9 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
 			return err
 		}
 
-		parentContainer, _ := daemon.Get(ctx, pe.ID())
+		parentContainer, _ := daemon.Get(pe.ID())
 		if parentContainer != nil {
-			if err := parentContainer.updateNetwork(ctx); err != nil {
+			if err := parentContainer.updateNetwork(); err != nil {
 				logrus.Debugf("Could not update network to remove link %s: %v", n, err)
 			}
 		}
@@ -55,7 +53,7 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
 		return nil
 	}
 
-	if err := daemon.rm(ctx, container, config.ForceRemove); err != nil {
+	if err := daemon.rm(container, config.ForceRemove); err != nil {
 		// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err))
 		return err
 	}
@@ -68,12 +66,12 @@ func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *Cont
 }
 
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove bool) (err error) {
+func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
 	if container.IsRunning() {
 		if !forceRemove {
 			return derr.ErrorCodeRmRunning
 		}
-		if err := container.Kill(ctx); err != nil {
+		if err := container.Kill(); err != nil {
 			return derr.ErrorCodeRmFailed.WithArgs(err)
 		}
 	}
@@ -94,7 +92,7 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
 
 	defer container.resetRemovalInProgress()
 
-	if err = container.Stop(ctx, 3); err != nil {
+	if err = container.Stop(3); err != nil {
 		return err
 	}
 
@@ -115,7 +113,7 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
 			daemon.idIndex.Delete(container.ID)
 			daemon.containers.Delete(container.ID)
 			os.RemoveAll(container.root)
-			container.logEvent(ctx, "destroy")
+			container.logEvent("destroy")
 		}
 	}()
 
@@ -144,14 +142,14 @@ func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove
 	daemon.idIndex.Delete(container.ID)
 	daemon.containers.Delete(container.ID)
 
-	container.logEvent(ctx, "destroy")
+	container.logEvent("destroy")
 	return nil
 }
 
 // VolumeRm removes the volume with the given name.
 // If the volume is referenced by a container it is not removed
 // This is called directly from the remote API
-func (daemon *Daemon) VolumeRm(ctx context.Context, name string) error {
+func (daemon *Daemon) VolumeRm(name string) error {
 	v, err := daemon.volumes.Get(name)
 	if err != nil {
 		return err

+ 2 - 4
daemon/events/events.go

@@ -4,8 +4,6 @@ import (
 	"sync"
 	"time"
 
-	"github.com/docker/docker/context"
-
 	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/pubsub"
 )
@@ -46,9 +44,9 @@ func (e *Events) Evict(l chan interface{}) {
 
 // Log broadcasts event to listeners. Each listener has 100 millisecond for
 // receiving event or it will be skipped.
-func (e *Events) Log(ctx context.Context, action, id, from string) {
+func (e *Events) Log(action, id, from string) {
 	now := time.Now().UTC()
-	jm := &jsonmessage.JSONMessage{RequestID: ctx.RequestID(), Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
+	jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
 	e.mu.Lock()
 	if len(e.events) == cap(e.events) {
 		// discard oldest event

+ 4 - 8
daemon/events/events_test.go

@@ -5,12 +5,10 @@ import (
 	"testing"
 	"time"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/jsonmessage"
 )
 
 func TestEventsLog(t *testing.T) {
-	ctx := context.Background()
 	e := New()
 	_, l1 := e.Subscribe()
 	_, l2 := e.Subscribe()
@@ -20,7 +18,7 @@ func TestEventsLog(t *testing.T) {
 	if count != 2 {
 		t.Fatalf("Must be 2 subscribers, got %d", count)
 	}
-	e.Log(ctx, "test", "cont", "image")
+	e.Log("test", "cont", "image")
 	select {
 	case msg := <-l1:
 		jmsg, ok := msg.(*jsonmessage.JSONMessage)
@@ -66,14 +64,13 @@ func TestEventsLog(t *testing.T) {
 }
 
 func TestEventsLogTimeout(t *testing.T) {
-	ctx := context.Background()
 	e := New()
 	_, l := e.Subscribe()
 	defer e.Evict(l)
 
 	c := make(chan struct{})
 	go func() {
-		e.Log(ctx, "test", "cont", "image")
+		e.Log("test", "cont", "image")
 		close(c)
 	}()
 
@@ -85,14 +82,13 @@ func TestEventsLogTimeout(t *testing.T) {
 }
 
 func TestLogEvents(t *testing.T) {
-	ctx := context.Background()
 	e := New()
 
 	for i := 0; i < eventsLimit+16; i++ {
 		action := fmt.Sprintf("action_%d", i)
 		id := fmt.Sprintf("cont_%d", i)
 		from := fmt.Sprintf("image_%d", i)
-		e.Log(ctx, action, id, from)
+		e.Log(action, id, from)
 	}
 	time.Sleep(50 * time.Millisecond)
 	current, l := e.Subscribe()
@@ -101,7 +97,7 @@ func TestLogEvents(t *testing.T) {
 		action := fmt.Sprintf("action_%d", num)
 		id := fmt.Sprintf("cont_%d", num)
 		from := fmt.Sprintf("image_%d", num)
-		e.Log(ctx, action, id, from)
+		e.Log(action, id, from)
 	}
 	if len(e.events) != eventsLimit {
 		t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))

+ 10 - 11
daemon/exec.go

@@ -8,7 +8,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/pkg/broadcastwriter"
@@ -118,8 +117,8 @@ func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
 	d.execCommands.Delete(ExecConfig.ID)
 }
 
-func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Container, error) {
-	container, err := d.Get(ctx, name)
+func (d *Daemon) getActiveContainer(name string) (*Container, error) {
+	container, err := d.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -134,13 +133,13 @@ func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Containe
 }
 
 // ContainerExecCreate sets up an exec in a running container.
-func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.ExecConfig) (string, error) {
+func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
 	// Not all drivers support Exec (LXC for example)
 	if err := checkExecSupport(d.execDriver.Name()); err != nil {
 		return "", err
 	}
 
-	container, err := d.getActiveContainer(ctx, config.Container)
+	container, err := d.getActiveContainer(config.Container)
 	if err != nil {
 		return "", err
 	}
@@ -175,14 +174,14 @@ func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.Exec
 
 	d.registerExecCommand(ExecConfig)
 
-	container.logEvent(ctx, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
+	container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
 
 	return ExecConfig.ID, nil
 }
 
 // ContainerExecStart starts a previously set up exec instance. The
 // std streams are set up.
-func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
+func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
 	var (
 		cStdin           io.ReadCloser
 		cStdout, cStderr io.Writer
@@ -208,7 +207,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
 	logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
 	container := ExecConfig.Container
 
-	container.logEvent(ctx, "exec_start: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
+	container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
 
 	if ExecConfig.OpenStdin {
 		r, w := io.Pipe()
@@ -244,7 +243,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
 	// the exitStatus) even after the cmd is done running.
 
 	go func() {
-		if err := container.exec(ctx, ExecConfig); err != nil {
+		if err := container.exec(ExecConfig); err != nil {
 			execErr <- derr.ErrorCodeExecCantRun.WithArgs(execName, container.ID, err)
 		}
 	}()
@@ -268,11 +267,11 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin
 }
 
 // Exec calls the underlying exec driver to run
-func (d *Daemon) Exec(ctx context.Context, c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
+func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
 	hooks := execdriver.Hooks{
 		Start: startCallback,
 	}
-	exitStatus, err := d.execDriver.Exec(ctx, c.command, ExecConfig.ProcessConfig, pipes, hooks)
+	exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks)
 
 	// On err, make sure we don't leave ExitCode at zero
 	if err != nil && exitStatus == 0 {

+ 3 - 4
daemon/execdriver/driver.go

@@ -7,7 +7,6 @@ import (
 	"time"
 
 	// TODO Windows: Factor out ulimit
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/ulimit"
 	"github.com/opencontainers/runc/libcontainer"
 	"github.com/opencontainers/runc/libcontainer/configs"
@@ -30,7 +29,7 @@ var (
 // through PreStart, Start and PostStop events.
 // Callbacks are provided a processConfig pointer and the pid of the child.
 // The channel will be used to notify the OOM events.
-type DriverCallback func(ctx context.Context, processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
+type DriverCallback func(processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
 
 // Hooks is a struct containing function pointers to callbacks
 // used by any execdriver implementation exploiting hooks capabilities
@@ -70,11 +69,11 @@ type ExitStatus struct {
 type Driver interface {
 	// Run executes the process, blocks until the process exits and returns
 	// the exit code. It's the last stage on Docker side for running a container.
-	Run(ctx context.Context, c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
+	Run(c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
 
 	// Exec executes the process in an existing container, blocks until the
 	// process exits and returns the exit code.
-	Exec(ctx context.Context, c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
+	Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
 
 	// Kill sends signals to process in container.
 	Kill(c *Command, sig int) error

+ 3 - 4
daemon/execdriver/lxc/driver.go

@@ -20,7 +20,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/stringutils"
 	sysinfo "github.com/docker/docker/pkg/system"
@@ -126,7 +125,7 @@ func killNetNsProc(proc *os.Process) {
 
 // Run implements the exec driver Driver interface,
 // it calls 'exec.Cmd' to launch lxc commands to run a container.
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
 	var (
 		term     execdriver.Terminal
 		err      error
@@ -330,7 +329,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
 
 	if hooks.Start != nil {
 		logrus.Debugf("Invoking startCallback")
-		hooks.Start(ctx, &c.ProcessConfig, pid, oomKillNotification)
+		hooks.Start(&c.ProcessConfig, pid, oomKillNotification)
 
 	}
 
@@ -872,7 +871,7 @@ func (t *TtyConsole) Close() error {
 
 // Exec implements the exec driver Driver interface,
 // it is not implemented by lxc.
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
 	return -1, ErrExec
 }
 

+ 4 - 5
daemon/execdriver/native/create.go

@@ -9,7 +9,6 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/opencontainers/runc/libcontainer/apparmor"
 	"github.com/opencontainers/runc/libcontainer/configs"
@@ -19,7 +18,7 @@ import (
 
 // createContainer populates and configures the container type with the
 // data provided by the execdriver.Command
-func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
+func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
 	container := execdriver.InitContainer(c)
 
 	if err := d.createIpc(container, c); err != nil {
@@ -34,7 +33,7 @@ func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hoo
 		return nil, err
 	}
 
-	if err := d.createNetwork(ctx, container, c, hooks); err != nil {
+	if err := d.createNetwork(container, c, hooks); err != nil {
 		return nil, err
 	}
 
@@ -120,7 +119,7 @@ func generateIfaceName() (string, error) {
 	return "", errors.New("Failed to find name for new interface")
 }
 
-func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
+func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
 	if c.Network == nil {
 		return nil
 	}
@@ -157,7 +156,7 @@ func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c
 						// non-blocking and return the correct result when read.
 						chOOM := make(chan struct{})
 						close(chOOM)
-						if err := fnHook(ctx, &c.ProcessConfig, s.Pid, chOOM); err != nil {
+						if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil {
 							return err
 						}
 					}

+ 3 - 4
daemon/execdriver/native/driver.go

@@ -14,7 +14,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/pools"
@@ -132,9 +131,9 @@ type execOutput struct {
 
 // Run implements the exec driver Driver interface,
 // it calls libcontainer APIs to run a container.
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
 	// take the Command and populate the libcontainer.Config from it
-	container, err := d.createContainer(ctx, c, hooks)
+	container, err := d.createContainer(c, hooks)
 	if err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
@@ -175,7 +174,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
 			p.Wait()
 			return execdriver.ExitStatus{ExitCode: -1}, err
 		}
-		hooks.Start(ctx, &c.ProcessConfig, pid, oom)
+		hooks.Start(&c.ProcessConfig, pid, oom)
 	}
 
 	waitF := p.Wait

+ 2 - 3
daemon/execdriver/native/exec.go

@@ -9,7 +9,6 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/opencontainers/runc/libcontainer"
 	// Blank import 'nsenter' so that init in that package will call c
@@ -21,7 +20,7 @@ import (
 
 // Exec implements the exec driver Driver interface,
 // it calls libcontainer APIs to execute a container.
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
 	active := d.activeContainers[c.ID]
 	if active == nil {
 		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
@@ -66,7 +65,7 @@ func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig
 		// non-blocking and return the correct result when read.
 		chOOM := make(chan struct{})
 		close(chOOM)
-		hooks.Start(ctx, &c.ProcessConfig, pid, chOOM)
+		hooks.Start(&c.ProcessConfig, pid, chOOM)
 	}
 
 	ps, err := p.Wait()

+ 2 - 3
daemon/execdriver/windows/exec.go

@@ -7,13 +7,12 @@ import (
 	"fmt"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/microsoft/hcsshim"
 )
 
 // Exec implements the exec driver Driver interface.
-func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
+func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
 
 	var (
 		term     execdriver.Terminal
@@ -75,7 +74,7 @@ func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig
 		// non-blocking and return the correct result when read.
 		chOOM := make(chan struct{})
 		close(chOOM)
-		hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
+		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
 	}
 
 	if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {

+ 2 - 3
daemon/execdriver/windows/run.go

@@ -15,7 +15,6 @@ import (
 	"syscall"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/microsoft/hcsshim"
 )
@@ -80,7 +79,7 @@ type containerInit struct {
 const defaultOwner = "docker"
 
 // Run implements the exec driver Driver interface
-func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
+func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
 
 	var (
 		term execdriver.Terminal
@@ -299,7 +298,7 @@ func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriv
 		// non-blocking and return the correct result when read.
 		chOOM := make(chan struct{})
 		close(chOOM)
-		hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
+		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
 	}
 
 	var exitCode int32

+ 3 - 4
daemon/export.go

@@ -3,19 +3,18 @@ package daemon
 import (
 	"io"
 
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
 // ContainerExport writes the contents of the container to the given
 // writer. An error is returned if the container cannot be found.
-func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	data, err := container.export(ctx)
+	data, err := container.export()
 	if err != nil {
 		return derr.ErrorCodeExportFailed.WithArgs(name, err)
 	}

+ 40 - 41
daemon/image_delete.go

@@ -5,7 +5,6 @@ import (
 	"strings"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/image"
@@ -51,10 +50,10 @@ import (
 // FIXME: remove ImageDelete's dependency on Daemon, then move to the graph
 // package. This would require that we no longer need the daemon to determine
 // whether images are being used by a stopped or running container.
-func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDelete, error) {
+func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
 	records := []types.ImageDelete{}
 
-	img, err := daemon.Repositories(ctx).LookupImage(imageRef)
+	img, err := daemon.Repositories().LookupImage(imageRef)
 	if err != nil {
 		return nil, err
 	}
@@ -65,8 +64,8 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
 		// first. We can only remove this reference if either force is
 		// true, there are multiple repository references to this
 		// image, or there are no containers using the given reference.
-		if !(force || daemon.imageHasMultipleRepositoryReferences(ctx, img.ID)) {
-			if container := daemon.getContainerUsingImage(ctx, img.ID); container != nil {
+		if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) {
+			if container := daemon.getContainerUsingImage(img.ID); container != nil {
 				// If we removed the repository reference then
 				// this image would remain "dangling" and since
 				// we really want to avoid that the client must
@@ -75,14 +74,14 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
 			}
 		}
 
-		parsedRef, err := daemon.removeImageRef(ctx, imageRef)
+		parsedRef, err := daemon.removeImageRef(imageRef)
 		if err != nil {
 			return nil, err
 		}
 
 		untaggedRecord := types.ImageDelete{Untagged: parsedRef}
 
-		daemon.EventsService.Log(ctx, "untag", img.ID, "")
+		daemon.EventsService.Log("untag", img.ID, "")
 		records = append(records, untaggedRecord)
 
 		removedRepositoryRef = true
@@ -91,21 +90,21 @@ func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, p
 		// repository reference to the image then we will want to
 		// remove that reference.
 		// FIXME: Is this the behavior we want?
-		repoRefs := daemon.Repositories(ctx).ByID()[img.ID]
+		repoRefs := daemon.Repositories().ByID()[img.ID]
 		if len(repoRefs) == 1 {
-			parsedRef, err := daemon.removeImageRef(ctx, repoRefs[0])
+			parsedRef, err := daemon.removeImageRef(repoRefs[0])
 			if err != nil {
 				return nil, err
 			}
 
 			untaggedRecord := types.ImageDelete{Untagged: parsedRef}
 
-			daemon.EventsService.Log(ctx, "untag", img.ID, "")
+			daemon.EventsService.Log("untag", img.ID, "")
 			records = append(records, untaggedRecord)
 		}
 	}
 
-	return records, daemon.imageDeleteHelper(ctx, img, &records, force, prune, removedRepositoryRef)
+	return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef)
 }
 
 // isImageIDPrefix returns whether the given possiblePrefix is a prefix of the
@@ -116,14 +115,14 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
 
 // imageHasMultipleRepositoryReferences returns whether there are multiple
 // repository references to the given imageID.
-func (daemon *Daemon) imageHasMultipleRepositoryReferences(ctx context.Context, imageID string) bool {
-	return len(daemon.Repositories(ctx).ByID()[imageID]) > 1
+func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool {
+	return len(daemon.Repositories().ByID()[imageID]) > 1
 }
 
 // getContainerUsingImage returns a container that was created using the given
 // imageID. Returns nil if there is no such container.
-func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string) *Container {
-	for _, container := range daemon.List(ctx) {
+func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
+	for _, container := range daemon.List() {
 		if container.ImageID == imageID {
 			return container
 		}
@@ -137,7 +136,7 @@ func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string
 // repositoryRef must not be an image ID but a repository name followed by an
 // optional tag or digest reference. If tag or digest is omitted, the default
 // tag is used. Returns the resolved image reference and an error.
-func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string) (string, error) {
+func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
 	repository, ref := parsers.ParseRepositoryTag(repositoryRef)
 	if ref == "" {
 		ref = tags.DefaultTag
@@ -146,7 +145,7 @@ func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string)
 	// Ignore the boolean value returned, as far as we're concerned, this
 	// is an idempotent operation and it's okay if the reference didn't
 	// exist in the first place.
-	_, err := daemon.Repositories(ctx).Delete(repository, ref)
+	_, err := daemon.Repositories().Delete(repository, ref)
 
 	return utils.ImageReference(repository, ref), err
 }
@@ -156,18 +155,18 @@ func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string)
 // on the first encountered error. Removed references are logged to this
 // daemon's event service. An "Untagged" types.ImageDelete is added to the
 // given list of records.
-func (daemon *Daemon) removeAllReferencesToImageID(ctx context.Context, imgID string, records *[]types.ImageDelete) error {
-	imageRefs := daemon.Repositories(ctx).ByID()[imgID]
+func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error {
+	imageRefs := daemon.Repositories().ByID()[imgID]
 
 	for _, imageRef := range imageRefs {
-		parsedRef, err := daemon.removeImageRef(ctx, imageRef)
+		parsedRef, err := daemon.removeImageRef(imageRef)
 		if err != nil {
 			return err
 		}
 
 		untaggedRecord := types.ImageDelete{Untagged: parsedRef}
 
-		daemon.EventsService.Log(ctx, "untag", imgID, "")
+		daemon.EventsService.Log("untag", imgID, "")
 		*records = append(*records, untaggedRecord)
 	}
 
@@ -204,11 +203,11 @@ func (idc *imageDeleteConflict) Error() string {
 // conflict is encountered, it will be returned immediately without deleting
 // the image. If quiet is true, any encountered conflicts will be ignored and
 // the function will return nil immediately without deleting the image.
-func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
+func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
 	// First, determine if this image has any conflicts. Ignore soft conflicts
 	// if force is true.
-	if conflict := daemon.checkImageDeleteConflict(ctx, img, force); conflict != nil {
-		if quiet && !daemon.imageIsDangling(ctx, img) {
+	if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil {
+		if quiet && !daemon.imageIsDangling(img) {
 			// Ignore conflicts UNLESS the image is "dangling" in
 			// which case we want the user to know.
 			return nil
@@ -220,15 +219,15 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
 	}
 
 	// Delete all repository tag/digest references to this image.
-	if err := daemon.removeAllReferencesToImageID(ctx, img.ID, records); err != nil {
+	if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil {
 		return err
 	}
 
-	if err := daemon.Graph(ctx).Delete(img.ID); err != nil {
+	if err := daemon.Graph().Delete(img.ID); err != nil {
 		return err
 	}
 
-	daemon.EventsService.Log(ctx, "delete", img.ID, "")
+	daemon.EventsService.Log("delete", img.ID, "")
 	*records = append(*records, types.ImageDelete{Deleted: img.ID})
 
 	if !prune || img.Parent == "" {
@@ -238,14 +237,14 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
 	// We need to prune the parent image. This means delete it if there are
 	// no tags/digests referencing it and there are no containers using it (
 	// either running or stopped).
-	parentImg, err := daemon.Graph(ctx).Get(img.Parent)
+	parentImg, err := daemon.Graph().Get(img.Parent)
 	if err != nil {
 		return derr.ErrorCodeImgNoParent.WithArgs(err)
 	}
 
 	// Do not force prunings, but do so quietly (stopping on any encountered
 	// conflicts).
-	return daemon.imageDeleteHelper(ctx, parentImg, records, false, true, true)
+	return daemon.imageDeleteHelper(parentImg, records, false, true, true)
 }
 
 // checkImageDeleteConflict determines whether there are any conflicts
@@ -254,9 +253,9 @@ func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, r
 // using the image. A soft conflict is any tags/digest referencing the given
 // image or any stopped container using the image. If ignoreSoftConflicts is
 // true, this function will not check for soft conflict conditions.
-func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
+func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
 	// Check for hard conflicts first.
-	if conflict := daemon.checkImageDeleteHardConflict(ctx, img); conflict != nil {
+	if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil {
 		return conflict
 	}
 
@@ -266,12 +265,12 @@ func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.I
 		return nil
 	}
 
-	return daemon.checkImageDeleteSoftConflict(ctx, img)
+	return daemon.checkImageDeleteSoftConflict(img)
 }
 
-func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
+func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict {
 	// Check if the image ID is being used by a pull or build.
-	if daemon.Graph(ctx).IsHeld(img.ID) {
+	if daemon.Graph().IsHeld(img.ID) {
 		return &imageDeleteConflict{
 			hard:    true,
 			imgID:   img.ID,
@@ -280,7 +279,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
 	}
 
 	// Check if the image has any descendent images.
-	if daemon.Graph(ctx).HasChildren(img) {
+	if daemon.Graph().HasChildren(img) {
 		return &imageDeleteConflict{
 			hard:    true,
 			imgID:   img.ID,
@@ -289,7 +288,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
 	}
 
 	// Check if any running container is using the image.
-	for _, container := range daemon.List(ctx) {
+	for _, container := range daemon.List() {
 		if !container.IsRunning() {
 			// Skip this until we check for soft conflicts later.
 			continue
@@ -307,9 +306,9 @@ func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *ima
 	return nil
 }
 
-func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
+func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict {
 	// Check if any repository tags/digest reference this image.
-	if daemon.Repositories(ctx).HasReferences(img) {
+	if daemon.Repositories().HasReferences(img) {
 		return &imageDeleteConflict{
 			imgID:   img.ID,
 			message: "image is referenced in one or more repositories",
@@ -317,7 +316,7 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *ima
 	}
 
 	// Check if any stopped containers reference this image.
-	for _, container := range daemon.List(ctx) {
+	for _, container := range daemon.List() {
 		if container.IsRunning() {
 			// Skip this as it was checked above in hard conflict conditions.
 			continue
@@ -337,6 +336,6 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *ima
 // imageIsDangling returns whether the given image is "dangling" which means
 // that there are no repository references to the given image and it has no
 // child images.
-func (daemon *Daemon) imageIsDangling(ctx context.Context, img *image.Image) bool {
-	return !(daemon.Repositories(ctx).HasReferences(img) || daemon.Graph(ctx).HasChildren(img))
+func (daemon *Daemon) imageIsDangling(img *image.Image) bool {
+	return !(daemon.Repositories().HasReferences(img) || daemon.Graph().HasChildren(img))
 }

+ 6 - 7
daemon/info.go

@@ -8,7 +8,6 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/parsers/operatingsystem"
@@ -19,8 +18,8 @@ import (
 )
 
 // SystemInfo returns information about the host server the daemon is running on.
-func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
-	images := daemon.Graph(ctx).Map()
+func (daemon *Daemon) SystemInfo() (*types.Info, error) {
+	images := daemon.Graph().Map()
 	var imgcount int
 	if images == nil {
 		imgcount = 0
@@ -66,10 +65,10 @@ func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
 
 	v := &types.Info{
 		ID:                 daemon.ID,
-		Containers:         len(daemon.List(ctx)),
+		Containers:         len(daemon.List()),
 		Images:             imgcount,
-		Driver:             daemon.GraphDriver(ctx).String(),
-		DriverStatus:       daemon.GraphDriver(ctx).Status(),
+		Driver:             daemon.GraphDriver().String(),
+		DriverStatus:       daemon.GraphDriver().Status(),
 		IPv4Forwarding:     !sysInfo.IPv4ForwardingDisabled,
 		BridgeNfIptables:   !sysInfo.BridgeNfCallIptablesDisabled,
 		BridgeNfIP6tables:  !sysInfo.BridgeNfCallIP6tablesDisabled,
@@ -77,7 +76,7 @@ func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
 		NFd:                fileutils.GetTotalUsedFds(),
 		NGoroutines:        runtime.NumGoroutine(),
 		SystemTime:         time.Now().Format(time.RFC3339Nano),
-		ExecutionDriver:    daemon.ExecutionDriver(ctx).Name(),
+		ExecutionDriver:    daemon.ExecutionDriver().Name(),
 		LoggingDriver:      daemon.defaultLogConfig.Type,
 		NEventsListener:    daemon.EventsService.SubscribersCount(),
 		KernelVersion:      kernelVersion,

+ 10 - 11
daemon/inspect.go

@@ -5,14 +5,13 @@ import (
 	"time"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 )
 
 // ContainerInspect returns low-level information about a
 // container. Returns an error if the container cannot be found, or if
 // there is an error getting the data.
-func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types.ContainerJSON, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -20,7 +19,7 @@ func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types
 	container.Lock()
 	defer container.Unlock()
 
-	base, err := daemon.getInspectData(ctx, container)
+	base, err := daemon.getInspectData(container)
 	if err != nil {
 		return nil, err
 	}
@@ -31,8 +30,8 @@ func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types
 }
 
 // ContainerInspect120 serializes the master version of a container into a json type.
-func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*types.ContainerJSON120, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -40,7 +39,7 @@ func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*ty
 	container.Lock()
 	defer container.Unlock()
 
-	base, err := daemon.getInspectData(ctx, container)
+	base, err := daemon.getInspectData(container)
 	if err != nil {
 		return nil, err
 	}
@@ -54,11 +53,11 @@ func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*ty
 	return &types.ContainerJSON120{base, mountPoints, config}, nil
 }
 
-func (daemon *Daemon) getInspectData(ctx context.Context, container *Container) (*types.ContainerJSONBase, error) {
+func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
 	// make a copy to play with
 	hostConfig := *container.hostConfig
 
-	if children, err := daemon.children(ctx, container.Name); err == nil {
+	if children, err := daemon.children(container.Name); err == nil {
 		for linkAlias, child := range children {
 			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
 		}
@@ -121,7 +120,7 @@ func (daemon *Daemon) getInspectData(ctx context.Context, container *Container)
 
 // ContainerExecInspect returns low-level information about the exec
 // command. An error is returned if the exec cannot be found.
-func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*ExecConfig, error) {
+func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
 	eConfig, err := daemon.getExecConfig(id)
 	if err != nil {
 		return nil, err
@@ -131,7 +130,7 @@ func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*Exe
 
 // VolumeInspect looks up a volume by name. An error is returned if
 // the volume cannot be found.
-func (daemon *Daemon) VolumeInspect(ctx context.Context, name string) (*types.Volume, error) {
+func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
 	v, err := daemon.volumes.Get(name)
 	if err != nil {
 		return nil, err

+ 4 - 7
daemon/inspect_unix.go

@@ -2,10 +2,7 @@
 
 package daemon
 
-import (
-	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
-)
+import "github.com/docker/docker/api/types"
 
 // This sets platform-specific fields
 func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
@@ -18,8 +15,8 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
 }
 
 // ContainerInspectPre120 gets containers for pre 1.20 APIs.
-func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSONPre120, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -27,7 +24,7 @@ func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (
 	container.Lock()
 	defer container.Unlock()
 
-	base, err := daemon.getInspectData(ctx, container)
+	base, err := daemon.getInspectData(container)
 	if err != nil {
 		return nil, err
 	}

+ 3 - 6
daemon/inspect_windows.go

@@ -1,9 +1,6 @@
 package daemon
 
-import (
-	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
-)
+import "github.com/docker/docker/api/types"
 
 // This sets platform-specific fields
 func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
@@ -15,6 +12,6 @@ func addMountPoints(container *Container) []types.MountPoint {
 }
 
 // ContainerInspectPre120 get containers for pre 1.20 APIs.
-func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSON, error) {
-	return daemon.ContainerInspect(ctx, name)
+func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSON, error) {
+	return daemon.ContainerInspect(name)
 }

+ 5 - 9
daemon/kill.go

@@ -1,29 +1,25 @@
 package daemon
 
-import (
-	"syscall"
-
-	"github.com/docker/docker/context"
-)
+import "syscall"
 
 // ContainerKill send signal to the container
 // If no signal is given (sig 0), then Kill with SIGKILL and wait
 // for the container to exit.
 // If a signal is given, then just send it to the container and return.
-func (daemon *Daemon) ContainerKill(ctx context.Context, name string, sig uint64) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
 	// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
 	if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
-		if err := container.Kill(ctx); err != nil {
+		if err := container.Kill(); err != nil {
 			return err
 		}
 	} else {
 		// Otherwise, just send the requested signal
-		if err := container.killSig(ctx, int(sig)); err != nil {
+		if err := container.killSig(int(sig)); err != nil {
 			return err
 		}
 	}

+ 23 - 24
daemon/list.go

@@ -8,7 +8,6 @@ import (
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/graphdb"
@@ -21,7 +20,7 @@ type iterationAction int
 
 // containerReducer represents a reducer for a container.
 // Returns the object to serialize by the api.
-type containerReducer func(context.Context, *Container, *listContext) (*types.Container, error)
+type containerReducer func(*Container, *listContext) (*types.Container, error)
 
 const (
 	// includeContainer is the action to include a container in the reducer.
@@ -36,7 +35,7 @@ const (
 var errStopIteration = errors.New("container list iteration stopped")
 
 // List returns an array of all containers registered in the daemon.
-func (daemon *Daemon) List(ctx context.Context) []*Container {
+func (daemon *Daemon) List() []*Container {
 	return daemon.containers.List()
 }
 
@@ -80,21 +79,21 @@ type listContext struct {
 }
 
 // Containers returns the list of containers to show given the user's filtering.
-func (daemon *Daemon) Containers(ctx context.Context, config *ContainersConfig) ([]*types.Container, error) {
-	return daemon.reduceContainers(ctx, config, daemon.transformContainer)
+func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
+	return daemon.reduceContainers(config, daemon.transformContainer)
 }
 
 // reduceContainer parses the user filtering and generates the list of containers to return based on a reducer.
-func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
+func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
 	containers := []*types.Container{}
 
-	fctx, err := daemon.foldFilter(ctx, config)
+	ctx, err := daemon.foldFilter(config)
 	if err != nil {
 		return nil, err
 	}
 
-	for _, container := range daemon.List(ctx) {
-		t, err := daemon.reducePsContainer(ctx, container, fctx, reducer)
+	for _, container := range daemon.List() {
+		t, err := daemon.reducePsContainer(container, ctx, reducer)
 		if err != nil {
 			if err != errStopIteration {
 				return nil, err
@@ -103,19 +102,19 @@ func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersCo
 		}
 		if t != nil {
 			containers = append(containers, t)
-			fctx.idx++
+			ctx.idx++
 		}
 	}
 	return containers, nil
 }
 
 // reducePsContainer is the basic representation for a container as expected by the ps command.
-func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Container, lctx *listContext, reducer containerReducer) (*types.Container, error) {
+func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
 	container.Lock()
 	defer container.Unlock()
 
 	// filter containers to return
-	action := includeContainerInList(container, lctx)
+	action := includeContainerInList(container, ctx)
 	switch action {
 	case excludeContainer:
 		return nil, nil
@@ -124,11 +123,11 @@ func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Containe
 	}
 
 	// transform internal container struct into api structs
-	return reducer(ctx, container, lctx)
+	return reducer(container, ctx)
 }
 
 // foldFilter generates the container filter based in the user's filtering options.
-func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig) (*listContext, error) {
+func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) {
 	psFilters, err := filters.FromParam(config.Filters)
 	if err != nil {
 		return nil, err
@@ -160,11 +159,11 @@ func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig)
 	var ancestorFilter bool
 	if ancestors, ok := psFilters["ancestor"]; ok {
 		ancestorFilter = true
-		byParents := daemon.Graph(ctx).ByParent()
+		byParents := daemon.Graph().ByParent()
 		// The idea is to walk the graph down the most "efficient" way.
 		for _, ancestor := range ancestors {
 			// First, get the imageId of the ancestor filter (yay)
-			image, err := daemon.Repositories(ctx).LookupImage(ancestor)
+			image, err := daemon.Repositories().LookupImage(ancestor)
 			if err != nil {
 				logrus.Warnf("Error while looking up for image %v", ancestor)
 				continue
@@ -186,14 +185,14 @@ func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig)
 
 	var beforeCont, sinceCont *Container
 	if config.Before != "" {
-		beforeCont, err = daemon.Get(ctx, config.Before)
+		beforeCont, err = daemon.Get(config.Before)
 		if err != nil {
 			return nil, err
 		}
 	}
 
 	if config.Since != "" {
-		sinceCont, err = daemon.Get(ctx, config.Since)
+		sinceCont, err = daemon.Get(config.Since)
 		if err != nil {
 			return nil, err
 		}
@@ -287,13 +286,13 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct
 }
 
 // transformContainer generates the container type expected by the docker ps command.
-func (daemon *Daemon) transformContainer(ctx context.Context, container *Container, lctx *listContext) (*types.Container, error) {
+func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
 	newC := &types.Container{
 		ID:    container.ID,
-		Names: lctx.names[container.ID],
+		Names: ctx.names[container.ID],
 	}
 
-	img, err := daemon.Repositories(ctx).LookupImage(container.Config.Image)
+	img, err := daemon.Repositories().LookupImage(container.Config.Image)
 	if err != nil {
 		// If the image can no longer be found by its original reference,
 		// it makes sense to show the ID instead of a stale reference.
@@ -350,8 +349,8 @@ func (daemon *Daemon) transformContainer(ctx context.Context, container *Contain
 		}
 	}
 
-	if lctx.Size {
-		sizeRw, sizeRootFs := container.getSize(ctx)
+	if ctx.Size {
+		sizeRw, sizeRootFs := container.getSize()
 		newC.SizeRw = sizeRw
 		newC.SizeRootFs = sizeRootFs
 	}
@@ -362,7 +361,7 @@ func (daemon *Daemon) transformContainer(ctx context.Context, container *Contain
 
 // Volumes lists known volumes, using the filter to restrict the range
 // of volumes returned.
-func (daemon *Daemon) Volumes(ctx context.Context, filter string) ([]*types.Volume, error) {
+func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
 	var volumesOut []*types.Volume
 	volFilters, err := filters.FromParam(filter)
 	if err != nil {

+ 1 - 2
daemon/logs.go

@@ -6,7 +6,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/logger"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/pkg/stdcopy"
@@ -31,7 +30,7 @@ type ContainerLogsConfig struct {
 
 // ContainerLogs hooks up a container's stdout and stderr streams
 // configured with the given struct.
-func (daemon *Daemon) ContainerLogs(ctx context.Context, container *Container, config *ContainerLogsConfig) error {
+func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
 	if !(config.UseStdout || config.UseStderr) {
 		return derr.ErrorCodeNeedStream
 	}

+ 10 - 11
daemon/monitor.go

@@ -7,7 +7,6 @@ import (
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
@@ -85,9 +84,9 @@ func (m *containerMonitor) ExitOnNext() {
 
 // Close closes the container's resources such as networking allocations and
 // unmounts the contatiner's root filesystem
-func (m *containerMonitor) Close(ctx context.Context) error {
+func (m *containerMonitor) Close() error {
 	// Cleanup networking and mounts
-	m.container.cleanup(ctx)
+	m.container.cleanup()
 
 	// FIXME: here is race condition between two RUN instructions in Dockerfile
 	// because they share same runconfig and change image. Must be fixed
@@ -102,7 +101,7 @@ func (m *containerMonitor) Close(ctx context.Context) error {
 }
 
 // Start starts the containers process and monitors it according to the restart policy
-func (m *containerMonitor) Start(ctx context.Context) error {
+func (m *containerMonitor) Start() error {
 	var (
 		err        error
 		exitStatus execdriver.ExitStatus
@@ -118,7 +117,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
 			m.container.setStopped(&exitStatus)
 			defer m.container.Unlock()
 		}
-		m.Close(ctx)
+		m.Close()
 	}()
 	// reset stopped flag
 	if m.container.HasBeenManuallyStopped {
@@ -139,11 +138,11 @@ func (m *containerMonitor) Start(ctx context.Context) error {
 
 		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
 
-		m.container.logEvent(ctx, "start")
+		m.container.logEvent("start")
 
 		m.lastStartTime = time.Now()
 
-		if exitStatus, err = m.container.daemon.run(ctx, m.container, pipes, m.callback); err != nil {
+		if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
 			// if we receive an internal error from the initial start of a container then lets
 			// return it instead of entering the restart loop
 			if m.container.RestartCount == 0 {
@@ -163,7 +162,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
 
 		if m.shouldRestart(exitStatus.ExitCode) {
 			m.container.setRestarting(&exitStatus)
-			m.container.logEvent(ctx, "die")
+			m.container.logEvent("die")
 			m.resetContainer(true)
 
 			// sleep with a small time increment between each restart to help avoid issues cased by quickly
@@ -178,7 +177,7 @@ func (m *containerMonitor) Start(ctx context.Context) error {
 			continue
 		}
 
-		m.container.logEvent(ctx, "die")
+		m.container.logEvent("die")
 		m.resetContainer(true)
 		return err
 	}
@@ -246,11 +245,11 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
 
 // callback ensures that the container's state is properly updated after we
 // received ack from the execution drivers
-func (m *containerMonitor) callback(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
+func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
 	go func() {
 		_, ok := <-chOOM
 		if ok {
-			m.container.logEvent(ctx, "oom")
+			m.container.logEvent("oom")
 		}
 	}()
 

+ 3 - 4
daemon/pause.go

@@ -1,18 +1,17 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
 // ContainerPause pauses a container
-func (daemon *Daemon) ContainerPause(ctx context.Context, name string) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerPause(name string) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	if err := container.pause(ctx); err != nil {
+	if err := container.pause(); err != nil {
 		return derr.ErrorCodePauseError.WithArgs(name, err)
 	}
 

+ 5 - 6
daemon/rename.go

@@ -1,19 +1,18 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
 // ContainerRename changes the name of a container, using the oldName
 // to find the container. An error is returned if newName is already
 // reserved.
-func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName string) error {
+func (daemon *Daemon) ContainerRename(oldName, newName string) error {
 	if oldName == "" || newName == "" {
 		return derr.ErrorCodeEmptyRename
 	}
 
-	container, err := daemon.Get(ctx, oldName)
+	container, err := daemon.Get(oldName)
 	if err != nil {
 		return err
 	}
@@ -22,7 +21,7 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
 
 	container.Lock()
 	defer container.Unlock()
-	if newName, err = daemon.reserveName(ctx, container.ID, newName); err != nil {
+	if newName, err = daemon.reserveName(container.ID, newName); err != nil {
 		return derr.ErrorCodeRenameTaken.WithArgs(err)
 	}
 
@@ -30,7 +29,7 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
 
 	undo := func() {
 		container.Name = oldName
-		daemon.reserveName(ctx, container.ID, oldName)
+		daemon.reserveName(container.ID, oldName)
 		daemon.containerGraphDB.Delete(newName)
 	}
 
@@ -44,6 +43,6 @@ func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName stri
 		return err
 	}
 
-	container.logEvent(ctx, "rename")
+	container.logEvent("rename")
 	return nil
 }

+ 4 - 8
daemon/resize.go

@@ -1,24 +1,20 @@
 package daemon
 
-import (
-	"github.com/docker/docker/context"
-)
-
 // ContainerResize changes the size of the TTY of the process running
 // in the container with the given name to the given height and width.
-func (daemon *Daemon) ContainerResize(ctx context.Context, name string, height, width int) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerResize(name string, height, width int) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	return container.Resize(ctx, height, width)
+	return container.Resize(height, width)
 }
 
 // ContainerExecResize changes the size of the TTY of the process
 // running in the exec with the given name to the given height and
 // width.
-func (daemon *Daemon) ContainerExecResize(ctx context.Context, name string, height, width int) error {
+func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
 	ExecConfig, err := daemon.getExecConfig(name)
 	if err != nil {
 		return err

+ 3 - 4
daemon/restart.go

@@ -1,7 +1,6 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
@@ -11,12 +10,12 @@ import (
 // timeout, ContainerRestart will wait forever until a graceful
 // stop. Returns an error if the container cannot be found, or if
 // there is an underlying error at any stage of the restart.
-func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, seconds int) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
-	if err := container.Restart(ctx, seconds); err != nil {
+	if err := container.Restart(seconds); err != nil {
 		return derr.ErrorCodeCantRestart.WithArgs(name, err)
 	}
 	return nil

+ 5 - 6
daemon/start.go

@@ -3,15 +3,14 @@ package daemon
 import (
 	"runtime"
 
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 )
 
 // ContainerStart starts a container.
-func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *runconfig.HostConfig) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
@@ -29,7 +28,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
 		// This is kept for backward compatibility - hostconfig should be passed when
 		// creating a container, not during start.
 		if hostConfig != nil {
-			if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
+			if err := daemon.setHostConfig(container, hostConfig); err != nil {
 				return err
 			}
 		}
@@ -41,11 +40,11 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
 
 	// check if hostConfig is in line with the current system settings.
 	// It may happen cgroups are umounted or the like.
-	if _, err = daemon.verifyContainerSettings(ctx, container.hostConfig, nil); err != nil {
+	if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
 		return err
 	}
 
-	if err := container.Start(ctx); err != nil {
+	if err := container.Start(); err != nil {
 		return derr.ErrorCodeCantStart.WithArgs(name, utils.GetErrorMessage(err))
 	}
 

+ 2 - 3
daemon/stats.go

@@ -5,7 +5,6 @@ import (
 	"io"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/libnetwork/osl"
@@ -23,9 +22,9 @@ type ContainerStatsConfig struct {
 
 // ContainerStats writes information about the container to the stream
 // given in the config object.
-func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *ContainerStatsConfig) error {
+func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error {
 
-	container, err := daemon.Get(ctx, prefixOrName)
+	container, err := daemon.Get(prefixOrName)
 	if err != nil {
 		return err
 	}

+ 3 - 4
daemon/stop.go

@@ -1,7 +1,6 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
@@ -11,15 +10,15 @@ import (
 // will wait for a graceful termination. An error is returned if the
 // container is not found, is already stopped, or if there is a
 // problem stopping the container.
-func (daemon *Daemon) ContainerStop(ctx context.Context, name string, seconds int) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerStop(name string, seconds int) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 	if !container.IsRunning() {
 		return derr.ErrorCodeStopped
 	}
-	if err := container.Stop(ctx, seconds); err != nil {
+	if err := container.Stop(seconds); err != nil {
 		return derr.ErrorCodeCantStop.WithArgs(name, err)
 	}
 	return nil

+ 4 - 5
daemon/top_unix.go

@@ -8,7 +8,6 @@ import (
 	"strings"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
@@ -17,12 +16,12 @@ import (
 // "-ef" if no args are given.  An error is returned if the container
 // is not found, or is not running, or if there are any problems
 // running ps, or parsing the output.
-func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
+func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
 	if psArgs == "" {
 		psArgs = "-ef"
 	}
 
-	container, err := daemon.Get(ctx, name)
+	container, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
 	}
@@ -31,7 +30,7 @@ func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs stri
 		return nil, derr.ErrorCodeNotRunning.WithArgs(name)
 	}
 
-	pids, err := daemon.ExecutionDriver(ctx).GetPidsForContainer(container.ID)
+	pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
 	if err != nil {
 		return nil, err
 	}
@@ -77,6 +76,6 @@ func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs stri
 			}
 		}
 	}
-	container.logEvent(ctx, "top")
+	container.logEvent("top")
 	return procList, nil
 }

+ 1 - 2
daemon/top_windows.go

@@ -2,11 +2,10 @@ package daemon
 
 import (
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
 // ContainerTop is not supported on Windows and returns an error.
-func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
+func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
 	return nil, derr.ErrorCodeNoTop
 }

+ 3 - 4
daemon/unpause.go

@@ -1,18 +1,17 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	derr "github.com/docker/docker/errors"
 )
 
 // ContainerUnpause unpauses a container
-func (daemon *Daemon) ContainerUnpause(ctx context.Context, name string) error {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerUnpause(name string) error {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	if err := container.unpause(ctx); err != nil {
+	if err := container.unpause(); err != nil {
 		return derr.ErrorCodeCantUnpause.WithArgs(name, err)
 	}
 

+ 2 - 3
daemon/volumes_unix.go

@@ -10,7 +10,6 @@ import (
 	"strings"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	derr "github.com/docker/docker/errors"
 	"github.com/docker/docker/pkg/system"
@@ -286,7 +285,7 @@ func parseVolumesFrom(spec string) (string, string, error) {
 // 1. Select the previously configured mount points for the containers, if any.
 // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
 // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
-func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
 	binds := map[string]bool{}
 	mountPoints := map[string]*mountPoint{}
 
@@ -302,7 +301,7 @@ func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Contai
 			return err
 		}
 
-		c, err := daemon.Get(ctx, containerID)
+		c, err := daemon.Get(containerID)
 		if err != nil {
 			return err
 		}

+ 1 - 2
daemon/volumes_windows.go

@@ -3,7 +3,6 @@
 package daemon
 
 import (
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/runconfig"
 )
@@ -32,6 +31,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
 // registerMountPoints initializes the container mount points with the
 // configured volumes and bind mounts. Windows does not support volumes or
 // mount points.
-func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
 	return nil
 }

+ 3 - 7
daemon/wait.go

@@ -1,18 +1,14 @@
 package daemon
 
-import (
-	"time"
-
-	"github.com/docker/docker/context"
-)
+import "time"
 
 // ContainerWait stops processing until the given container is
 // stopped. If the container is not found, an error is returned. On a
 // successful stop, the exit code of the container is returned. On a
 // timeout, an error is returned. If you want to wait forever, supply
 // a negative duration for the timeout.
-func (daemon *Daemon) ContainerWait(ctx context.Context, name string, timeout time.Duration) (int, error) {
-	container, err := daemon.Get(ctx, name)
+func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
 		return -1, err
 	}

+ 9 - 15
docker/daemon.go

@@ -17,7 +17,6 @@ import (
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/opts"
@@ -151,11 +150,6 @@ func getGlobalFlag() (globalFlag *flag.Flag) {
 
 // CmdDaemon is the daemon command, called the raw arguments after `docker daemon`.
 func (cli *DaemonCli) CmdDaemon(args ...string) error {
-	// This may need to be made even more global - it all depends
-	// on whether we want the CLI to have a context object too.
-	// For now we'll leave it as a daemon-side object only.
-	ctx := context.Background()
-
 	// warn from uuid package when running the daemon
 	uuid.Loggerf = logrus.Warnf
 
@@ -230,7 +224,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
 		serverConfig.TLSConfig = tlsConfig
 	}
 
-	api := apiserver.New(ctx, serverConfig)
+	api := apiserver.New(serverConfig)
 
 	// The serve API routine never exits unless an error occurs
 	// We need to start it as a goroutine and wait on it so
@@ -251,7 +245,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
 	cli.TrustKeyPath = commonFlags.TrustKey
 
 	registryService := registry.NewService(cli.registryOptions)
-	d, err := daemon.NewDaemon(ctx, cli.Config, registryService)
+	d, err := daemon.NewDaemon(cli.Config, registryService)
 	if err != nil {
 		if pfile != nil {
 			if err := pfile.Remove(); err != nil {
@@ -266,14 +260,14 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
 	logrus.WithFields(logrus.Fields{
 		"version":     dockerversion.VERSION,
 		"commit":      dockerversion.GITCOMMIT,
-		"execdriver":  d.ExecutionDriver(ctx).Name(),
-		"graphdriver": d.GraphDriver(ctx).String(),
+		"execdriver":  d.ExecutionDriver().Name(),
+		"graphdriver": d.GraphDriver().String(),
 	}).Info("Docker daemon")
 
 	signal.Trap(func() {
 		api.Close()
 		<-serveAPIWait
-		shutdownDaemon(ctx, d, 15)
+		shutdownDaemon(d, 15)
 		if pfile != nil {
 			if err := pfile.Remove(); err != nil {
 				logrus.Error(err)
@@ -283,12 +277,12 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
 
 	// after the daemon is done setting up we can tell the api to start
 	// accepting connections with specified daemon
-	api.AcceptConnections(ctx, d)
+	api.AcceptConnections(d)
 
 	// Daemon is fully initialized and handling API traffic
 	// Wait for serve API to complete
 	errAPI := <-serveAPIWait
-	shutdownDaemon(ctx, d, 15)
+	shutdownDaemon(d, 15)
 	if errAPI != nil {
 		if pfile != nil {
 			if err := pfile.Remove(); err != nil {
@@ -303,10 +297,10 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
 // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
 // d.Shutdown() is waiting too long to kill container or worst it's
 // blocked there
-func shutdownDaemon(ctx context.Context, d *daemon.Daemon, timeout time.Duration) {
+func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
 	ch := make(chan struct{})
 	go func() {
-		d.Shutdown(ctx)
+		d.Shutdown()
 		close(ch)
 	}()
 	select {

+ 2 - 3
graph/import.go

@@ -5,7 +5,6 @@ import (
 	"net/http"
 	"net/url"
 
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/streamformatter"
@@ -17,7 +16,7 @@ import (
 // inConfig (if src is "-"), or from a URI specified in src. Progress output is
 // written to outStream. Repository and tag names can optionally be given in
 // the repo and tag arguments, respectively.
-func (s *TagStore) Import(ctx context.Context, src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
+func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
 	var (
 		sf      = streamformatter.NewJSONStreamFormatter()
 		archive io.ReadCloser
@@ -75,6 +74,6 @@ func (s *TagStore) Import(ctx context.Context, src string, repo string, tag stri
 		logID = utils.ImageReference(logID, tag)
 	}
 
-	s.eventsService.Log(ctx, "import", logID, "")
+	s.eventsService.Log("import", logID, "")
 	return nil
 }

+ 2 - 3
graph/pull.go

@@ -6,7 +6,6 @@ import (
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
@@ -63,7 +62,7 @@ func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.Re
 
 // Pull initiates a pull operation. image is the repository name to pull, and
 // tag may be either empty, or indicate a specific tag to pull.
-func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePullConfig *ImagePullConfig) error {
+func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
 	var sf = streamformatter.NewJSONStreamFormatter()
 
 	// Resolve the Repository name from fqn to RepositoryInfo
@@ -132,7 +131,7 @@ func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePull
 
 		}
 
-		s.eventsService.Log(ctx, "pull", logName, "")
+		s.eventsService.Log("pull", logName, "")
 		return nil
 	}
 

+ 2 - 3
graph/push.go

@@ -7,7 +7,6 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/context"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/registry"
 )
@@ -68,7 +67,7 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository
 }
 
 // Push initiates a push operation on the repository named localName.
-func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *ImagePushConfig) error {
+func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
 	// FIXME: Allow to interrupt current push when new push of same image is done.
 
 	var sf = streamformatter.NewJSONStreamFormatter()
@@ -116,7 +115,7 @@ func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *
 
 		}
 
-		s.eventsService.Log(ctx, "push", repoInfo.LocalName, "")
+		s.eventsService.Log("push", repoInfo.LocalName, "")
 		return nil
 	}
 

+ 1 - 76
integration-cli/docker_cli_events_test.go

@@ -410,7 +410,7 @@ func (s *DockerSuite) TestEventsFilterContainer(c *check.C) {
 			}
 
 			// Check the id
-			parsedID := strings.TrimSuffix(e[3], ":")
+			parsedID := strings.TrimSuffix(e[1], ":")
 			if parsedID != id {
 				return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID)
 			}
@@ -686,78 +686,3 @@ func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) {
 		c.Fatalf("Missing 'push' log event for image %s\n%s", repoName, out)
 	}
 }
-
-func (s *DockerSuite) TestEventsReqID(c *check.C) {
-	// Tests for the "[reqid: xxx]" field in Events
-	testRequires(c, DaemonIsLinux)
-
-	reqIDMatch := `[^ ]+ \[reqid: ([0-9a-z]{12})\] [0-9a-z]+: `
-	reqIDRE := regexp.MustCompile(reqIDMatch)
-
-	// Simple test just to make sure it works at all
-	dockerCmd(c, "create", "busybox", "true")
-
-	out, _ := dockerCmd(c, "events", "--since=0", "--until=0s")
-	events := strings.Split(strings.TrimSpace(out), "\n")
-
-	if len(events) == 0 {
-		c.Fatalf("Wrong # of events, should just be one, got:\n%v\n", events)
-	}
-
-	createEvent := events[len(events)-1]
-
-	matched, err := regexp.MatchString(reqIDMatch, createEvent)
-	if err != nil || !matched {
-		c.Fatalf("Error finding reqID in event: %v\n", createEvent)
-	}
-
-	reqID1 := reqIDRE.FindStringSubmatch(createEvent)[1]
-
-	// Now make sure another cmd doesn't get the same reqID
-	dockerCmd(c, "create", "busybox", "true")
-
-	out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
-	events = strings.Split(strings.TrimSpace(out), "\n")
-	createEvent = events[len(events)-1]
-
-	matched, err = regexp.MatchString(reqIDMatch, createEvent)
-	if err != nil || !matched {
-		c.Fatalf("Error finding reqID in event: %v\n", createEvent)
-	}
-
-	reqID2 := reqIDRE.FindStringSubmatch(createEvent)[1]
-
-	if reqID1 == reqID2 {
-		c.Fatalf("Should not have the same reqID(%s):\n%v\n", reqID1, createEvent)
-	}
-
-	// Now make sure a build **does** use the same reqID for all
-	// 4 events that are generated
-	_, err = buildImage("reqidimg", `
-		  FROM busybox
-		  RUN echo HI`, true)
-	if err != nil {
-		c.Fatalf("Couldn't create image: %q", err)
-	}
-
-	out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
-	events = strings.Split(strings.TrimSpace(out), "\n")
-
-	// Get last event's reqID - will use it to find other matching events
-	lastEvent := events[len(events)-1]
-	reqID := reqIDRE.FindStringSubmatch(lastEvent)[1]
-
-	// Find all events with this same reqID
-	eventList := []string{lastEvent}
-	for i := len(events) - 2; i >= 0; i-- {
-		tmpID := reqIDRE.FindStringSubmatch(events[i])[1]
-		if tmpID != reqID {
-			break
-		}
-		eventList = append(eventList, events[i])
-	}
-
-	if len(eventList) != 5 { // create, start, die, commit, destroy
-		c.Fatalf("Wrong # of matching events - should be 5:\n%q\n", eventList)
-	}
-}

+ 0 - 4
pkg/jsonmessage/jsonmessage.go

@@ -92,7 +92,6 @@ func (p *JSONProgress) String() string {
 // the created time, where it from, status, ID of the
 // message. It's used for docker events.
 type JSONMessage struct {
-	RequestID       string        `json:"reqid,omitempty"`
 	Stream          string        `json:"stream,omitempty"`
 	Status          string        `json:"status,omitempty"`
 	Progress        *JSONProgress `json:"progressDetail,omitempty"`
@@ -128,9 +127,6 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
 	} else if jm.Time != 0 {
 		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
 	}
-	if jm.RequestID != "" {
-		fmt.Fprintf(out, "[reqid: %s] ", jm.RequestID)
-	}
 	if jm.ID != "" {
 		fmt.Fprintf(out, "%s: ", jm.ID)
 	}