Merge pull request #16228 from duglin/ContextualizeEvents

Add context.RequestID to event stream
This commit is contained in:
David Calavera 2015-09-24 14:16:22 -07:00
commit de41640435
68 changed files with 737 additions and 565 deletions

View file

@ -45,7 +45,7 @@ func (s *Server) getContainersJSON(ctx context.Context, w http.ResponseWriter, r
config.Limit = limit
}
containers, err := s.daemon.Containers(config)
containers, err := s.daemon.Containers(ctx, config)
if err != nil {
return err
}
@ -83,7 +83,7 @@ func (s *Server) getContainersStats(ctx context.Context, w http.ResponseWriter,
Version: version,
}
return s.daemon.ContainerStats(vars["name"], config)
return s.daemon.ContainerStats(ctx, vars["name"], config)
}
func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -118,7 +118,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
closeNotifier = notifier.CloseNotify()
}
c, err := s.daemon.Get(vars["name"])
c, err := s.daemon.Get(ctx, vars["name"])
if err != nil {
return err
}
@ -140,7 +140,7 @@ func (s *Server) getContainersLogs(ctx context.Context, w http.ResponseWriter, r
Stop: closeNotifier,
}
if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
if err := s.daemon.ContainerLogs(ctx, c, logsConfig); err != nil {
// The client may be expecting all of the data we're sending to
// be multiplexed, so send it through OutStream, which will
// have been set up to handle that if needed.
@ -155,7 +155,7 @@ func (s *Server) getContainersExport(ctx context.Context, w http.ResponseWriter,
return fmt.Errorf("Missing parameter")
}
return s.daemon.ContainerExport(vars["name"], w)
return s.daemon.ContainerExport(ctx, vars["name"], w)
}
func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -183,7 +183,7 @@ func (s *Server) postContainersStart(ctx context.Context, w http.ResponseWriter,
hostConfig = c
}
if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil {
if err := s.daemon.ContainerStart(ctx, vars["name"], hostConfig); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@ -200,7 +200,7 @@ func (s *Server) postContainersStop(ctx context.Context, w http.ResponseWriter,
seconds, _ := strconv.Atoi(r.Form.Get("t"))
if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
if err := s.daemon.ContainerStop(ctx, vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@ -227,7 +227,7 @@ func (s *Server) postContainersKill(ctx context.Context, w http.ResponseWriter,
}
}
if err := s.daemon.ContainerKill(name, uint64(sig)); err != nil {
if err := s.daemon.ContainerKill(ctx, name, uint64(sig)); err != nil {
theErr, isDerr := err.(errcode.ErrorCoder)
isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning
@ -254,7 +254,7 @@ func (s *Server) postContainersRestart(ctx context.Context, w http.ResponseWrite
timeout, _ := strconv.Atoi(r.Form.Get("t"))
if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
if err := s.daemon.ContainerRestart(ctx, vars["name"], timeout); err != nil {
return err
}
@ -271,7 +271,7 @@ func (s *Server) postContainersPause(ctx context.Context, w http.ResponseWriter,
return err
}
if err := s.daemon.ContainerPause(vars["name"]); err != nil {
if err := s.daemon.ContainerPause(ctx, vars["name"]); err != nil {
return err
}
@ -288,7 +288,7 @@ func (s *Server) postContainersUnpause(ctx context.Context, w http.ResponseWrite
return err
}
if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
if err := s.daemon.ContainerUnpause(ctx, vars["name"]); err != nil {
return err
}
@ -302,7 +302,7 @@ func (s *Server) postContainersWait(ctx context.Context, w http.ResponseWriter,
return fmt.Errorf("Missing parameter")
}
status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second)
status, err := s.daemon.ContainerWait(ctx, vars["name"], -1*time.Second)
if err != nil {
return err
}
@ -317,7 +317,7 @@ func (s *Server) getContainersChanges(ctx context.Context, w http.ResponseWriter
return fmt.Errorf("Missing parameter")
}
changes, err := s.daemon.ContainerChanges(vars["name"])
changes, err := s.daemon.ContainerChanges(ctx, vars["name"])
if err != nil {
return err
}
@ -334,7 +334,7 @@ func (s *Server) getContainersTop(ctx context.Context, w http.ResponseWriter, r
return err
}
procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args"))
procList, err := s.daemon.ContainerTop(ctx, vars["name"], r.Form.Get("ps_args"))
if err != nil {
return err
}
@ -352,7 +352,7 @@ func (s *Server) postContainerRename(ctx context.Context, w http.ResponseWriter,
name := vars["name"]
newName := r.Form.Get("name")
if err := s.daemon.ContainerRename(name, newName); err != nil {
if err := s.daemon.ContainerRename(ctx, name, newName); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@ -378,7 +378,7 @@ func (s *Server) postContainersCreate(ctx context.Context, w http.ResponseWriter
version := ctx.Version()
adjustCPUShares := version.LessThan("1.19")
container, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig, adjustCPUShares)
container, warnings, err := s.daemon.ContainerCreate(ctx, name, config, hostConfig, adjustCPUShares)
if err != nil {
return err
}
@ -404,7 +404,7 @@ func (s *Server) deleteContainers(ctx context.Context, w http.ResponseWriter, r
RemoveLink: boolValue(r, "link"),
}
if err := s.daemon.ContainerRm(name, config); err != nil {
if err := s.daemon.ContainerRm(ctx, name, config); err != nil {
// Force a 404 for the empty string
if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") {
return fmt.Errorf("no such id: \"\"")
@ -434,7 +434,7 @@ func (s *Server) postContainersResize(ctx context.Context, w http.ResponseWriter
return err
}
return s.daemon.ContainerResize(vars["name"], height, width)
return s.daemon.ContainerResize(ctx, vars["name"], height, width)
}
func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -446,7 +446,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
}
containerName := vars["name"]
if !s.daemon.Exists(containerName) {
if !s.daemon.Exists(ctx, containerName) {
return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
}
@ -472,7 +472,7 @@ func (s *Server) postContainersAttach(ctx context.Context, w http.ResponseWriter
Stream: boolValue(r, "stream"),
}
if err := s.daemon.ContainerAttachWithLogs(containerName, attachWithLogsConfig); err != nil {
if err := s.daemon.ContainerAttachWithLogs(ctx, containerName, attachWithLogsConfig); err != nil {
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
@ -488,7 +488,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
}
containerName := vars["name"]
if !s.daemon.Exists(containerName) {
if !s.daemon.Exists(ctx, containerName) {
return derr.ErrorCodeNoSuchContainer.WithArgs(containerName)
}
@ -503,7 +503,7 @@ func (s *Server) wsContainersAttach(ctx context.Context, w http.ResponseWriter,
Stream: boolValue(r, "stream"),
}
if err := s.daemon.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil {
if err := s.daemon.ContainerWsAttachWithLogs(ctx, containerName, wsAttachWithLogsConfig); err != nil {
logrus.Errorf("Error attaching websocket: %s", err)
}
})

View file

@ -32,7 +32,7 @@ func (s *Server) postContainersCopy(ctx context.Context, w http.ResponseWriter,
return fmt.Errorf("Path cannot be empty")
}
data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource)
data, err := s.daemon.ContainerCopy(ctx, vars["name"], cfg.Resource)
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "no such id") {
w.WriteHeader(http.StatusNotFound)
@ -74,7 +74,7 @@ func (s *Server) headContainersArchive(ctx context.Context, w http.ResponseWrite
return err
}
stat, err := s.daemon.ContainerStatPath(v.name, v.path)
stat, err := s.daemon.ContainerStatPath(ctx, v.name, v.path)
if err != nil {
return err
}
@ -88,7 +88,7 @@ func (s *Server) getContainersArchive(ctx context.Context, w http.ResponseWriter
return err
}
tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path)
tarArchive, stat, err := s.daemon.ContainerArchivePath(ctx, v.name, v.path)
if err != nil {
return err
}
@ -111,5 +111,5 @@ func (s *Server) putContainersArchive(ctx context.Context, w http.ResponseWriter
}
noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir")
return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body)
return s.daemon.ContainerExtractToDir(ctx, v.name, v.path, noOverwriteDirNonDir, r.Body)
}

View file

@ -45,7 +45,7 @@ func (s *Server) getVersion(ctx context.Context, w http.ResponseWriter, r *http.
}
func (s *Server) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info, err := s.daemon.SystemInfo()
info, err := s.daemon.SystemInfo(ctx)
if err != nil {
return err
}
@ -120,7 +120,7 @@ func (s *Server) getEvents(ctx context.Context, w http.ResponseWriter, r *http.R
enc := json.NewEncoder(outStream)
getContainerID := func(cn string) string {
c, err := d.Get(cn)
c, err := d.Get(ctx, cn)
if err != nil {
return ""
}

View file

@ -19,7 +19,7 @@ func (s *Server) getExecByID(ctx context.Context, w http.ResponseWriter, r *http
return fmt.Errorf("Missing parameter 'id'")
}
eConfig, err := s.daemon.ContainerExecInspect(vars["id"])
eConfig, err := s.daemon.ContainerExecInspect(ctx, vars["id"])
if err != nil {
return err
}
@ -47,7 +47,7 @@ func (s *Server) postContainerExecCreate(ctx context.Context, w http.ResponseWri
}
// Register an instance of Exec in container.
id, err := s.daemon.ContainerExecCreate(execConfig)
id, err := s.daemon.ContainerExecCreate(ctx, execConfig)
if err != nil {
logrus.Errorf("Error setting up exec command in container %s: %s", name, err)
return err
@ -100,7 +100,7 @@ func (s *Server) postContainerExecStart(ctx context.Context, w http.ResponseWrit
}
// Now run the user process in container.
if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil {
if err := s.daemon.ContainerExecStart(ctx, execName, stdin, stdout, stderr); err != nil {
fmt.Fprintf(outStream, "Error running exec in container: %v\n", err)
}
return nil
@ -123,5 +123,5 @@ func (s *Server) postContainerExecResize(ctx context.Context, w http.ResponseWri
return err
}
return s.daemon.ContainerExecResize(vars["name"], height, width)
return s.daemon.ContainerExecResize(ctx, vars["name"], height, width)
}

View file

@ -55,7 +55,7 @@ func (s *Server) postCommit(ctx context.Context, w http.ResponseWriter, r *http.
Config: c,
}
imgID, err := builder.Commit(cname, s.daemon, commitCfg)
imgID, err := builder.Commit(ctx, cname, s.daemon, commitCfg)
if err != nil {
return err
}
@ -112,7 +112,7 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
OutStream: output,
}
err = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
err = s.daemon.Repositories(ctx).Pull(ctx, image, tag, imagePullConfig)
} else { //import
if tag == "" {
repo, tag = parsers.ParseRepositoryTag(repo)
@ -124,12 +124,12 @@ func (s *Server) postImagesCreate(ctx context.Context, w http.ResponseWriter, r
// generated from the download to be available to the output
// stream processing below
var newConfig *runconfig.Config
newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
newConfig, err = builder.BuildFromConfig(ctx, s.daemon, &runconfig.Config{}, r.Form["changes"])
if err != nil {
return err
}
err = s.daemon.Repositories().Import(src, repo, tag, message, r.Body, output, newConfig)
err = s.daemon.Repositories(ctx).Import(ctx, src, repo, tag, message, r.Body, output, newConfig)
}
if err != nil {
if !output.Flushed() {
@ -184,7 +184,7 @@ func (s *Server) postImagesPush(ctx context.Context, w http.ResponseWriter, r *h
w.Header().Set("Content-Type", "application/json")
if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
if err := s.daemon.Repositories(ctx).Push(ctx, name, imagePushConfig); err != nil {
if !output.Flushed() {
return err
}
@ -212,7 +212,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
names = r.Form["names"]
}
if err := s.daemon.Repositories().ImageExport(names, output); err != nil {
if err := s.daemon.Repositories(ctx).ImageExport(names, output); err != nil {
if !output.Flushed() {
return err
}
@ -223,7 +223,7 @@ func (s *Server) getImagesGet(ctx context.Context, w http.ResponseWriter, r *htt
}
func (s *Server) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return s.daemon.Repositories().Load(r.Body, w)
return s.daemon.Repositories(ctx).Load(r.Body, w)
}
func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -243,7 +243,7 @@ func (s *Server) deleteImages(ctx context.Context, w http.ResponseWriter, r *htt
force := boolValue(r, "force")
prune := !boolValue(r, "noprune")
list, err := s.daemon.ImageDelete(name, force, prune)
list, err := s.daemon.ImageDelete(ctx, name, force, prune)
if err != nil {
return err
}
@ -256,7 +256,7 @@ func (s *Server) getImagesByName(ctx context.Context, w http.ResponseWriter, r *
return fmt.Errorf("Missing parameter")
}
imageInspect, err := s.daemon.Repositories().Lookup(vars["name"])
imageInspect, err := s.daemon.Repositories(ctx).Lookup(vars["name"])
if err != nil {
return err
}
@ -346,7 +346,7 @@ func (s *Server) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
}()
}
if err := builder.Build(s.daemon, buildConfig); err != nil {
if err := builder.Build(ctx, s.daemon, buildConfig); err != nil {
// Do not write the error in the http output if it's still empty.
// This prevents from writing a 200(OK) when there is an interal error.
if !output.Flushed() {
@ -364,7 +364,7 @@ func (s *Server) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *ht
}
// FIXME: The filter parameter could just be a match filter
images, err := s.daemon.Repositories().Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
images, err := s.daemon.Repositories(ctx).Images(r.Form.Get("filters"), r.Form.Get("filter"), boolValue(r, "all"))
if err != nil {
return err
}
@ -378,7 +378,7 @@ func (s *Server) getImagesHistory(ctx context.Context, w http.ResponseWriter, r
}
name := vars["name"]
history, err := s.daemon.Repositories().History(name)
history, err := s.daemon.Repositories(ctx).History(name)
if err != nil {
return err
}
@ -398,10 +398,10 @@ func (s *Server) postImagesTag(ctx context.Context, w http.ResponseWriter, r *ht
tag := r.Form.Get("tag")
force := boolValue(r, "force")
name := vars["name"]
if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
if err := s.daemon.Repositories(ctx).Tag(repo, tag, name, force); err != nil {
return err
}
s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
s.daemon.EventsService.Log(ctx, "tag", utils.ImageReference(repo, tag), "")
w.WriteHeader(http.StatusCreated)
return nil
}

View file

@ -20,11 +20,11 @@ func (s *Server) getContainersByName(ctx context.Context, w http.ResponseWriter,
switch {
case version.LessThan("1.20"):
json, err = s.daemon.ContainerInspectPre120(vars["name"])
json, err = s.daemon.ContainerInspectPre120(ctx, vars["name"])
case version.Equal("1.20"):
json, err = s.daemon.ContainerInspect120(vars["name"])
json, err = s.daemon.ContainerInspect120(ctx, vars["name"])
default:
json, err = s.daemon.ContainerInspect(vars["name"])
json, err = s.daemon.ContainerInspect(ctx, vars["name"])
}
if err != nil {

View file

@ -18,6 +18,7 @@ import (
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/sockets"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/utils"
)
@ -41,12 +42,12 @@ type Server struct {
}
// New returns a new instance of the server based on the specified configuration.
func New(cfg *Config) *Server {
func New(ctx context.Context, cfg *Config) *Server {
srv := &Server{
cfg: cfg,
start: make(chan struct{}),
}
srv.router = createRouter(srv)
srv.router = createRouter(ctx, srv)
return srv
}
@ -290,7 +291,7 @@ func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) {
return
}
func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
func (s *Server) makeHTTPHandler(ctx context.Context, localMethod string, localRoute string, localHandler HTTPAPIFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// log the handler generation
logrus.Debugf("Calling %s %s", localMethod, localRoute)
@ -302,7 +303,8 @@ func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHan
// apply to all requests. Data that is specific to the
// immediate function being called should still be passed
// as 'args' on the function call.
ctx := context.Background()
reqID := stringid.TruncateID(stringid.GenerateNonCryptoID())
ctx = context.WithValue(ctx, context.RequestID, reqID)
handlerFunc := s.handleWithGlobalMiddlewares(localHandler)
if err := handlerFunc(ctx, w, r, mux.Vars(r)); err != nil {
@ -314,7 +316,7 @@ func (s *Server) makeHTTPHandler(localMethod string, localRoute string, localHan
// createRouter initializes the main router the server uses.
// we keep enableCors just for legacy usage, need to be removed in the future
func createRouter(s *Server) *mux.Router {
func createRouter(ctx context.Context, s *Server) *mux.Router {
r := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
profilerSetup(r, "/debug/")
@ -394,7 +396,7 @@ func createRouter(s *Server) *mux.Router {
localMethod := method
// build the handler function
f := s.makeHTTPHandler(localMethod, localRoute, localFct)
f := s.makeHTTPHandler(ctx, localMethod, localRoute, localFct)
// add the new route
if localRoute == "" {

View file

@ -2,8 +2,12 @@
package server
func (s *Server) registerSubRouter() {
httpHandler := s.daemon.NetworkAPIRouter()
import (
"github.com/docker/docker/context"
)
func (s *Server) registerSubRouter(ctx context.Context) {
httpHandler := s.daemon.NetworkAPIRouter(ctx)
subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)

View file

@ -2,5 +2,9 @@
package server
func (s *Server) registerSubRouter() {
import (
"github.com/docker/docker/context"
)
func (s *Server) registerSubRouter(ctx context.Context) {
}

View file

@ -8,6 +8,7 @@ import (
"net/http"
"strconv"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/sockets"
"github.com/docker/libnetwork/portallocator"
@ -63,10 +64,10 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
// AcceptConnections allows clients to connect to the API server.
// Referenced Daemon is notified about this server, and waits for the
// daemon acknowledgement before the incoming connections are accepted.
func (s *Server) AcceptConnections(d *daemon.Daemon) {
func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
// Tell the init daemon we are accepting requests
s.daemon = d
s.registerSubRouter()
s.registerSubRouter(ctx)
go systemdDaemon.SdNotify("READY=1")
// close the lock so the listeners start accepting connections
select {

View file

@ -7,6 +7,7 @@ import (
"net"
"net/http"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
)
@ -42,9 +43,9 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
}
// AcceptConnections allows router to start listening for the incoming requests.
func (s *Server) AcceptConnections(d *daemon.Daemon) {
func (s *Server) AcceptConnections(ctx context.Context, d *daemon.Daemon) {
s.daemon = d
s.registerSubRouter()
s.registerSubRouter(ctx)
// close the lock so the listeners start accepting connections
select {
case <-s.start:

View file

@ -13,7 +13,7 @@ func (s *Server) getVolumesList(ctx context.Context, w http.ResponseWriter, r *h
return err
}
volumes, err := s.daemon.Volumes(r.Form.Get("filters"))
volumes, err := s.daemon.Volumes(ctx, r.Form.Get("filters"))
if err != nil {
return err
}
@ -25,7 +25,7 @@ func (s *Server) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *
return err
}
v, err := s.daemon.VolumeInspect(vars["name"])
v, err := s.daemon.VolumeInspect(ctx, vars["name"])
if err != nil {
return err
}
@ -46,7 +46,7 @@ func (s *Server) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r
return err
}
volume, err := s.daemon.VolumeCreate(req.Name, req.Driver, req.DriverOpts)
volume, err := s.daemon.VolumeCreate(ctx, req.Name, req.Driver, req.DriverOpts)
if err != nil {
return err
}
@ -57,7 +57,7 @@ func (s *Server) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *ht
if err := parseForm(r); err != nil {
return err
}
if err := s.daemon.VolumeRm(vars["name"]); err != nil {
if err := s.daemon.VolumeRm(ctx, vars["name"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)

View file

@ -18,6 +18,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/nat"
@ -43,7 +44,7 @@ func nullDispatch(b *builder, args []string, attributes map[string]bool, origina
// Sets the environment variable foo to bar, also makes interpolation
// in the dockerfile available from the next statement on via ${foo}.
//
func env(b *builder, args []string, attributes map[string]bool, original string) error {
func env(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) == 0 {
return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
}
@ -96,13 +97,13 @@ func env(b *builder, args []string, attributes map[string]bool, original string)
j++
}
return b.commit("", b.Config.Cmd, commitStr)
return b.commit(ctx, "", b.Config.Cmd, commitStr)
}
// MAINTAINER some text <maybe@an.email.address>
//
// Sets the maintainer metadata.
func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
func maintainer(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
}
@ -112,14 +113,14 @@ func maintainer(b *builder, args []string, attributes map[string]bool, original
}
b.maintainer = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
}
// LABEL some json data describing the image
//
// Sets the Label variable foo to bar,
//
func label(b *builder, args []string, attributes map[string]bool, original string) error {
func label(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) == 0 {
return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
}
@ -147,7 +148,7 @@ func label(b *builder, args []string, attributes map[string]bool, original strin
b.Config.Labels[args[j]] = args[j+1]
j++
}
return b.commit("", b.Config.Cmd, commitStr)
return b.commit(ctx, "", b.Config.Cmd, commitStr)
}
// ADD foo /path
@ -155,7 +156,7 @@ func label(b *builder, args []string, attributes map[string]bool, original strin
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
// exist here. If you do not wish to have this automatic handling, use COPY.
//
func add(b *builder, args []string, attributes map[string]bool, original string) error {
func add(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) < 2 {
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
}
@ -164,14 +165,14 @@ func add(b *builder, args []string, attributes map[string]bool, original string)
return err
}
return b.runContextCommand(args, true, true, "ADD")
return b.runContextCommand(ctx, args, true, true, "ADD")
}
// COPY foo /path
//
// Same as 'ADD' but without the tar and remote url handling.
//
func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
func dispatchCopy(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) < 2 {
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
}
@ -180,14 +181,14 @@ func dispatchCopy(b *builder, args []string, attributes map[string]bool, origina
return err
}
return b.runContextCommand(args, false, false, "COPY")
return b.runContextCommand(ctx, args, false, false, "COPY")
}
// FROM imagename
//
// This sets the image the dockerfile will build on top of.
//
func from(b *builder, args []string, attributes map[string]bool, original string) error {
func from(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
}
@ -208,16 +209,16 @@ func from(b *builder, args []string, attributes map[string]bool, original string
return nil
}
image, err := b.Daemon.Repositories().LookupImage(name)
image, err := b.Daemon.Repositories(ctx).LookupImage(name)
if b.Pull {
image, err = b.pullImage(name)
image, err = b.pullImage(ctx, name)
if err != nil {
return err
}
}
if err != nil {
if b.Daemon.Graph().IsNotExist(err, name) {
image, err = b.pullImage(name)
if b.Daemon.Graph(ctx).IsNotExist(err, name) {
image, err = b.pullImage(ctx, name)
}
// note that the top level err will still be !nil here if IsNotExist is
@ -227,7 +228,7 @@ func from(b *builder, args []string, attributes map[string]bool, original string
}
}
return b.processImageFrom(image)
return b.processImageFrom(ctx, image)
}
// ONBUILD RUN echo yo
@ -239,7 +240,7 @@ func from(b *builder, args []string, attributes map[string]bool, original string
// special cases. search for 'OnBuild' in internals.go for additional special
// cases.
//
func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
func onbuild(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) == 0 {
return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
}
@ -259,14 +260,14 @@ func onbuild(b *builder, args []string, attributes map[string]bool, original str
original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
b.Config.OnBuild = append(b.Config.OnBuild, original)
return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
}
// WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
func workdir(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
}
@ -286,7 +287,7 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str
b.Config.WorkingDir = workdir
return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
}
// RUN some command yo
@ -299,7 +300,7 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str
// RUN echo hi # cmd /S /C echo hi (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *builder, args []string, attributes map[string]bool, original string) error {
func run(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if b.image == "" && !b.noBaseImage {
return derr.ErrorCodeMissingFrom
}
@ -380,7 +381,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string)
}
b.Config.Cmd = saveCmd
hit, err := b.probeCache()
hit, err := b.probeCache(ctx)
if err != nil {
return err
}
@ -395,17 +396,17 @@ func run(b *builder, args []string, attributes map[string]bool, original string)
logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
c, err := b.create()
c, err := b.create(ctx)
if err != nil {
return err
}
// Ensure that we keep the container mounted until the commit
// to avoid unmounting and then mounting directly again
c.Mount()
defer c.Unmount()
c.Mount(ctx)
defer c.Unmount(ctx)
err = b.run(c)
err = b.run(ctx, c)
if err != nil {
return err
}
@ -415,7 +416,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string)
// properly match it.
b.Config.Env = env
b.Config.Cmd = saveCmd
if err := b.commit(c.ID, cmd, "run"); err != nil {
if err := b.commit(ctx, c.ID, cmd, "run"); err != nil {
return err
}
@ -427,7 +428,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string)
// Set the default command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
func cmd(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if err := b.BuilderFlags.Parse(); err != nil {
return err
}
@ -444,7 +445,7 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string)
b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...)
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
return err
}
@ -463,7 +464,7 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string)
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
// is initialized at NewBuilder time instead of through argument parsing.
//
func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
func entrypoint(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if err := b.BuilderFlags.Parse(); err != nil {
return err
}
@ -492,7 +493,7 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original
b.Config.Cmd = nil
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
return err
}
@ -504,7 +505,7 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original
// Expose ports for links and port mappings. This all ends up in
// b.Config.ExposedPorts for runconfig.
//
func expose(b *builder, args []string, attributes map[string]bool, original string) error {
func expose(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
portsTab := args
if len(args) == 0 {
@ -537,7 +538,7 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri
i++
}
sort.Strings(portList)
return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
}
// USER foo
@ -545,7 +546,7 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri
// Set the user to 'foo' for future commands and when running the
// ENTRYPOINT/CMD at container run time.
//
func user(b *builder, args []string, attributes map[string]bool, original string) error {
func user(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
}
@ -555,14 +556,14 @@ func user(b *builder, args []string, attributes map[string]bool, original string
}
b.Config.User = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("USER %v", args))
}
// VOLUME /foo
//
// Expose the volume /foo for use. Will also accept the JSON array form.
//
func volume(b *builder, args []string, attributes map[string]bool, original string) error {
func volume(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) == 0 {
return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
}
@ -581,7 +582,7 @@ func volume(b *builder, args []string, attributes map[string]bool, original stri
}
b.Config.Volumes[v] = struct{}{}
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
if err := b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
return err
}
return nil
@ -590,7 +591,7 @@ func volume(b *builder, args []string, attributes map[string]bool, original stri
// STOPSIGNAL signal
//
// Set the signal that will be used to kill the container.
func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
func stopSignal(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("STOPSIGNAL requires exactly one argument")
}
@ -602,7 +603,7 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original
}
b.Config.StopSignal = sig
return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
}
// ARG name[=value]
@ -610,7 +611,7 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original
// Adds the variable foo to the trusted list of variables that can be passed
// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
// Dockerfile author may optionally set a default value of this variable.
func arg(b *builder, args []string, attributes map[string]bool, original string) error {
func arg(ctx context.Context, b *builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("ARG requires exactly one argument definition")
}
@ -646,5 +647,5 @@ func arg(b *builder, args []string, attributes map[string]bool, original string)
b.buildArgs[name] = value
}
return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
return b.commit(ctx, "", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
}

View file

@ -32,6 +32,7 @@ import (
"github.com/docker/docker/builder/command"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/streamformatter"
@ -57,10 +58,10 @@ var replaceEnvAllowed = map[string]struct{}{
command.Arg: {},
}
var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error
var evaluateTable map[string]func(context.Context, *builder, []string, map[string]bool, string) error
func init() {
evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
evaluateTable = map[string]func(context.Context, *builder, []string, map[string]bool, string) error{
command.Env: env,
command.Label: label,
command.Maintainer: maintainer,
@ -158,7 +159,7 @@ type builder struct {
// processing.
// * Print a happy message and return the image ID.
//
func (b *builder) Run(context io.Reader) (string, error) {
func (b *builder) Run(ctx context.Context, context io.Reader) (string, error) {
if err := b.readContext(context); err != nil {
return "", err
}
@ -187,15 +188,15 @@ func (b *builder) Run(context io.Reader) (string, error) {
default:
// Not cancelled yet, keep going...
}
if err := b.dispatch(i, n); err != nil {
if err := b.dispatch(ctx, i, n); err != nil {
if b.ForceRemove {
b.clearTmp()
b.clearTmp(ctx)
}
return "", err
}
fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
if b.Remove {
b.clearTmp()
b.clearTmp(ctx)
}
}
@ -311,7 +312,7 @@ func (b *builder) isBuildArgAllowed(arg string) bool {
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
// deal with that, at least until it becomes more of a general concern with new
// features.
func (b *builder) dispatch(stepN int, ast *parser.Node) error {
func (b *builder) dispatch(ctx context.Context, stepN int, ast *parser.Node) error {
cmd := ast.Value
// To ensure the user is give a decent error message if the platform
@ -404,7 +405,7 @@ func (b *builder) dispatch(stepN int, ast *parser.Node) error {
if f, ok := evaluateTable[cmd]; ok {
b.BuilderFlags = NewBFlags()
b.BuilderFlags.Args = flags
return f(b, strList, attrs, original)
return f(ctx, b, strList, attrs, original)
}
return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))

View file

@ -22,6 +22,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
@ -75,7 +76,7 @@ func (b *builder) readContext(context io.Reader) (err error) {
return
}
func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
if b.disableCommit {
return nil
}
@ -92,7 +93,7 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin
}
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
hit, err := b.probeCache(ctx)
if err != nil {
return err
}
@ -100,18 +101,18 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin
return nil
}
container, err := b.create()
container, err := b.create(ctx)
if err != nil {
return err
}
id = container.ID
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
return err
}
defer container.Unmount()
defer container.Unmount(ctx)
}
container, err := b.Daemon.Get(id)
container, err := b.Daemon.Get(ctx, id)
if err != nil {
return err
}
@ -127,11 +128,11 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin
}
// Commit the container
image, err := b.Daemon.Commit(container, commitCfg)
image, err := b.Daemon.Commit(ctx, container, commitCfg)
if err != nil {
return err
}
b.Daemon.Graph().Retain(b.id, image.ID)
b.Daemon.Graph(ctx).Retain(b.id, image.ID)
b.activeImages = append(b.activeImages, image.ID)
b.image = image.ID
return nil
@ -145,7 +146,7 @@ type copyInfo struct {
tmpDir string
}
func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
if b.context == nil {
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
}
@ -223,7 +224,7 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp
}
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
hit, err := b.probeCache(ctx)
if err != nil {
return err
}
@ -232,16 +233,16 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp
return nil
}
container, _, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
container, _, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true)
if err != nil {
return err
}
b.TmpContainers[container.ID] = struct{}{}
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
return err
}
defer container.Unmount()
defer container.Unmount(ctx)
for _, ci := range copyInfos {
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
@ -249,7 +250,7 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp
}
}
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
return err
}
return nil
@ -484,7 +485,7 @@ func containsWildcards(name string) bool {
return false
}
func (b *builder) pullImage(name string) (*image.Image, error) {
func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
remote, tag := parsers.ParseRepositoryTag(name)
if tag == "" {
tag = "latest"
@ -510,11 +511,11 @@ func (b *builder) pullImage(name string) (*image.Image, error) {
OutStream: ioutils.NopWriteCloser(b.OutOld),
}
if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
if err := b.Daemon.Repositories(ctx).Pull(ctx, remote, tag, imagePullConfig); err != nil {
return nil, err
}
image, err := b.Daemon.Repositories().LookupImage(name)
image, err := b.Daemon.Repositories(ctx).LookupImage(name)
if err != nil {
return nil, err
}
@ -522,7 +523,7 @@ func (b *builder) pullImage(name string) (*image.Image, error) {
return image, nil
}
func (b *builder) processImageFrom(img *image.Image) error {
func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
b.image = img.ID
if img.Config != nil {
@ -562,7 +563,7 @@ func (b *builder) processImageFrom(img *image.Image) error {
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
}
if err := b.dispatch(i, n); err != nil {
if err := b.dispatch(ctx, i, n); err != nil {
return err
}
}
@ -576,12 +577,12 @@ func (b *builder) processImageFrom(img *image.Image) error {
// in the current server `b.Daemon`. If an image is found, probeCache returns
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
// is any error, it returns `(false, err)`.
func (b *builder) probeCache() (bool, error) {
func (b *builder) probeCache(ctx context.Context) (bool, error) {
if !b.UtilizeCache || b.cacheBusted {
return false, nil
}
cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config)
if err != nil {
return false, err
}
@ -594,12 +595,12 @@ func (b *builder) probeCache() (bool, error) {
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
logrus.Debugf("[BUILDER] Use cached version")
b.image = cache.ID
b.Daemon.Graph().Retain(b.id, cache.ID)
b.Daemon.Graph(ctx).Retain(b.id, cache.ID)
b.activeImages = append(b.activeImages, cache.ID)
return true, nil
}
func (b *builder) create() (*daemon.Container, error) {
func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
if b.image == "" && !b.noBaseImage {
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
}
@ -620,7 +621,7 @@ func (b *builder) create() (*daemon.Container, error) {
config := *b.Config
// Create the container
c, warnings, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
c, warnings, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true)
if err != nil {
return nil, err
}
@ -643,14 +644,14 @@ func (b *builder) create() (*daemon.Container, error) {
return c, nil
}
func (b *builder) run(c *daemon.Container) error {
func (b *builder) run(ctx context.Context, c *daemon.Container) error {
var errCh chan error
if b.Verbose {
errCh = c.Attach(nil, b.OutStream, b.ErrStream)
}
//start the container
if err := c.Start(); err != nil {
if err := c.Start(ctx); err != nil {
return err
}
@ -660,7 +661,7 @@ func (b *builder) run(c *daemon.Container) error {
select {
case <-b.cancelled:
logrus.Debugln("Build cancelled, killing container:", c.ID)
c.Kill()
c.Kill(ctx)
case <-finished:
}
}()
@ -791,13 +792,13 @@ func copyAsDirectory(source, destination string, destExisted bool) error {
return fixPermissions(source, destination, 0, 0, destExisted)
}
func (b *builder) clearTmp() {
func (b *builder) clearTmp(ctx context.Context) {
for c := range b.TmpContainers {
rmConfig := &daemon.ContainerRmConfig{
ForceRemove: true,
RemoveVolume: true,
}
if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return
}

View file

@ -14,6 +14,7 @@ import (
"github.com/docker/docker/api"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/graph/tags"
"github.com/docker/docker/pkg/archive"
@ -112,7 +113,7 @@ func NewBuildConfig() *Config {
// Build is the main interface of the package, it gathers the Builder
// struct and calls builder.Run() to do all the real build job.
func Build(d *daemon.Daemon, buildConfig *Config) error {
func Build(ctx context.Context, d *daemon.Daemon, buildConfig *Config) error {
var (
repoName string
tag string
@ -229,15 +230,15 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
}
defer func() {
builder.Daemon.Graph().Release(builder.id, builder.activeImages...)
builder.Daemon.Graph(ctx).Release(builder.id, builder.activeImages...)
}()
id, err := builder.Run(context)
id, err := builder.Run(ctx, context)
if err != nil {
return err
}
if repoName != "" {
return d.Repositories().Tag(repoName, tag, id, true)
return d.Repositories(ctx).Tag(repoName, tag, id, true)
}
return nil
}
@ -247,7 +248,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
//
// - call parse.Parse() to get AST root from Dockerfile entries
// - do build by calling builder.dispatch() to call all entries' handling routines
func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
func BuildFromConfig(ctx context.Context, d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
if err != nil {
return nil, err
@ -269,7 +270,7 @@ func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*
}
for i, n := range ast.Children {
if err := builder.dispatch(i, n); err != nil {
if err := builder.dispatch(ctx, i, n); err != nil {
return nil, err
}
}
@ -289,8 +290,8 @@ type CommitConfig struct {
}
// Commit will create a new image from a container's changes
func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
container, err := d.Get(name)
func Commit(ctx context.Context, name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
container, err := d.Get(ctx, name)
if err != nil {
return "", err
}
@ -304,7 +305,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
c.Config = &runconfig.Config{}
}
newConfig, err := BuildFromConfig(d, c.Config, c.Changes)
newConfig, err := BuildFromConfig(ctx, d, c.Config, c.Changes)
if err != nil {
return "", err
}
@ -322,7 +323,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
Config: newConfig,
}
img, err := d.Commit(container, commitCfg)
img, err := d.Commit(ctx, container, commitCfg)
if err != nil {
return "", err
}

View file

@ -8,6 +8,7 @@ import (
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
@ -20,8 +21,8 @@ var ErrExtractPointNotDirectory = errors.New("extraction point is not a director
// ContainerCopy performs a deprecated operation of archiving the resource at
// the specified path in the conatiner identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerCopy(ctx context.Context, name string, res string) (io.ReadCloser, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
@ -30,30 +31,30 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
res = res[1:]
}
return container.copy(res)
return container.copy(ctx, res)
}
// ContainerStatPath stats the filesystem resource at the specified path in the
// container identified by the given name.
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerStatPath(ctx context.Context, name string, path string) (stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
return container.StatPath(path)
return container.StatPath(ctx, path)
}
// ContainerArchivePath creates an archive of the filesystem resource at the
// specified path in the container identified by the given name. Returns a
// tar archive of the resource and whether it was a directory or a single file.
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerArchivePath(ctx context.Context, name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, nil, err
}
return container.ArchivePath(path)
return container.ArchivePath(ctx, path)
}
// ContainerExtractToDir extracts the given archive to the specified location
@ -62,13 +63,13 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io
// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will
// be an error if unpacking the given content would cause an existing directory
// to be replaced with a non-directory and vice versa.
func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerExtractToDir(ctx context.Context, name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
return container.ExtractToDir(path, noOverwriteDirNonDir, content)
return container.ExtractToDir(ctx, path, noOverwriteDirNonDir, content)
}
// resolvePath resolves the given path in the container to a resource on the
@ -133,14 +134,14 @@ func (container *Container) statPath(resolvedPath, absPath string) (stat *types.
// StatPath stats the filesystem resource at the specified path in this
// container. Returns stat info about the resource.
func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
func (container *Container) StatPath(ctx context.Context, path string) (stat *types.ContainerPathStat, err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
if err = container.Mount(ctx); err != nil {
return nil, err
}
defer container.Unmount()
defer container.Unmount(ctx)
err = container.mountVolumes()
defer container.unmountVolumes(true)
@ -159,7 +160,7 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
// ArchivePath creates an archive of the filesystem resource at the specified
// path in this container. Returns a tar archive of the resource and stat info
// about the resource.
func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
func (container *Container) ArchivePath(ctx context.Context, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container.Lock()
defer func() {
@ -171,7 +172,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
}
}()
if err = container.Mount(); err != nil {
if err = container.Mount(ctx); err != nil {
return nil, nil, err
}
@ -180,7 +181,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
// unmount any volumes
container.unmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
container.Unmount(ctx)
}
}()
@ -214,12 +215,12 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
content = ioutils.NewReadCloserWrapper(data, func() error {
err := data.Close()
container.unmountVolumes(true)
container.Unmount()
container.Unmount(ctx)
container.Unlock()
return err
})
container.logEvent("archive-path")
container.logEvent(ctx, "archive-path")
return content, stat, nil
}
@ -230,14 +231,14 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
func (container *Container) ExtractToDir(ctx context.Context, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
if err = container.Mount(ctx); err != nil {
return err
}
defer container.Unmount()
defer container.Unmount(ctx)
err = container.mountVolumes()
defer container.unmountVolumes(true)
@ -318,7 +319,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
return err
}
container.logEvent("extract-to-dir")
container.logEvent(ctx, "extract-to-dir")
return nil
}

View file

@ -3,6 +3,7 @@ package daemon
import (
"io"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/stdcopy"
)
@ -15,8 +16,8 @@ type ContainerAttachWithLogsConfig struct {
}
// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error {
container, err := daemon.Get(prefixOrName)
func (daemon *Daemon) ContainerAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerAttachWithLogsConfig) error {
container, err := daemon.Get(ctx, prefixOrName)
if err != nil {
return err
}
@ -43,7 +44,7 @@ func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerA
stderr = errStream
}
return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
return container.attachWithLogs(ctx, stdin, stdout, stderr, c.Logs, c.Stream)
}
// ContainerWsAttachWithLogsConfig attach with websockets, since all
@ -55,10 +56,10 @@ type ContainerWsAttachWithLogsConfig struct {
}
// ContainerWsAttachWithLogs websocket connection
func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
container, err := daemon.Get(prefixOrName)
func (daemon *Daemon) ContainerWsAttachWithLogs(ctx context.Context, prefixOrName string, c *ContainerWsAttachWithLogsConfig) error {
container, err := daemon.Get(ctx, prefixOrName)
if err != nil {
return err
}
return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
return container.attachWithLogs(ctx, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
}

View file

@ -1,10 +1,13 @@
package daemon
import "github.com/docker/docker/pkg/archive"
import (
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/archive"
)
// ContainerChanges returns a list of container fs changes
func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}

View file

@ -1,6 +1,7 @@
package daemon
import (
"github.com/docker/docker/context"
"github.com/docker/docker/image"
"github.com/docker/docker/runconfig"
)
@ -18,10 +19,10 @@ type ContainerCommitConfig struct {
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository.
func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
func (daemon *Daemon) Commit(ctx context.Context, container *Container, c *ContainerCommitConfig) (*image.Image, error) {
if c.Pause && !container.isPaused() {
container.pause()
defer container.unpause()
container.pause(ctx)
defer container.unpause(ctx)
}
rwTar, err := container.exportContainerRw()
@ -46,6 +47,6 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i
return img, err
}
}
container.logEvent("commit")
container.logEvent(ctx, "commit")
return img, nil
}

View file

@ -15,6 +15,7 @@ import (
"github.com/opencontainers/runc/libcontainer/label"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog"
@ -170,9 +171,10 @@ func (container *Container) writeHostConfig() error {
return ioutil.WriteFile(pth, data, 0666)
}
func (container *Container) logEvent(action string) {
func (container *Container) logEvent(ctx context.Context, action string) {
d := container.daemon
d.EventsService.Log(
ctx,
action,
container.ID,
container.Config.Image,
@ -238,7 +240,7 @@ func (container *Container) exportContainerRw() (archive.Archive, error) {
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (container *Container) Start() (err error) {
func (container *Container) Start(ctx context.Context) (err error) {
container.Lock()
defer container.Unlock()
@ -260,12 +262,12 @@ func (container *Container) Start() (err error) {
container.ExitCode = 128
}
container.toDisk()
container.cleanup()
container.logEvent("die")
container.cleanup(ctx)
container.logEvent(ctx, "die")
}
}()
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
return err
}
@ -273,10 +275,10 @@ func (container *Container) Start() (err error) {
// backwards API compatibility.
container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
if err := container.initializeNetworking(); err != nil {
if err := container.initializeNetworking(ctx); err != nil {
return err
}
linkedEnv, err := container.setupLinkedContainers()
linkedEnv, err := container.setupLinkedContainers(ctx)
if err != nil {
return err
}
@ -284,7 +286,7 @@ func (container *Container) Start() (err error) {
return err
}
env := container.createDaemonEnvironment(linkedEnv)
if err := populateCommand(container, env); err != nil {
if err := populateCommand(ctx, container, env); err != nil {
return err
}
@ -301,7 +303,7 @@ func (container *Container) Start() (err error) {
mounts = append(mounts, container.ipcMounts()...)
container.command.Mounts = mounts
return container.waitForStart()
return container.waitForStart(ctx)
}
// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
@ -334,14 +336,14 @@ func (container *Container) isNetworkAllocated() bool {
// cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (container *Container) cleanup() {
func (container *Container) cleanup(ctx context.Context) {
container.releaseNetwork()
if err := container.unmountIpcMounts(); err != nil {
logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
}
if err := container.Unmount(); err != nil {
if err := container.Unmount(ctx); err != nil {
logrus.Errorf("%s: Failed to umount filesystem: %v", container.ID, err)
}
@ -357,7 +359,7 @@ func (container *Container) cleanup() {
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (container *Container) killSig(sig int) error {
func (container *Container) killSig(ctx context.Context, sig int) error {
logrus.Debugf("Sending %d to %s", sig, container.ID)
container.Lock()
defer container.Unlock()
@ -385,13 +387,13 @@ func (container *Container) killSig(sig int) error {
if err := container.daemon.kill(container, sig); err != nil {
return err
}
container.logEvent("kill")
container.logEvent(ctx, "kill")
return nil
}
// Wrapper aroung killSig() suppressing "no such process" error.
func (container *Container) killPossiblyDeadProcess(sig int) error {
err := container.killSig(sig)
func (container *Container) killPossiblyDeadProcess(ctx context.Context, sig int) error {
err := container.killSig(ctx, sig)
if err == syscall.ESRCH {
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
return nil
@ -399,7 +401,7 @@ func (container *Container) killPossiblyDeadProcess(sig int) error {
return err
}
func (container *Container) pause() error {
func (container *Container) pause(ctx context.Context) error {
container.Lock()
defer container.Unlock()
@ -417,11 +419,11 @@ func (container *Container) pause() error {
return err
}
container.Paused = true
container.logEvent("pause")
container.logEvent(ctx, "pause")
return nil
}
func (container *Container) unpause() error {
func (container *Container) unpause(ctx context.Context) error {
container.Lock()
defer container.Unlock()
@ -439,18 +441,18 @@ func (container *Container) unpause() error {
return err
}
container.Paused = false
container.logEvent("unpause")
container.logEvent(ctx, "unpause")
return nil
}
// Kill forcefully terminates a container.
func (container *Container) Kill() error {
func (container *Container) Kill(ctx context.Context) error {
if !container.IsRunning() {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// 1. Send SIGKILL
if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
if err := container.killPossiblyDeadProcess(ctx, int(syscall.SIGKILL)); err != nil {
// While normally we might "return err" here we're not going to
// because if we can't stop the container by this point then
// its probably because its already stopped. Meaning, between
@ -484,15 +486,15 @@ func (container *Container) Kill() error {
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (container *Container) Stop(seconds int) error {
func (container *Container) Stop(ctx context.Context, seconds int) error {
if !container.IsRunning() {
return nil
}
// 1. Send a SIGTERM
if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil {
if err := container.killPossiblyDeadProcess(ctx, container.stopSignal()); err != nil {
logrus.Infof("Failed to send SIGTERM to the process, force killing")
if err := container.killPossiblyDeadProcess(9); err != nil {
if err := container.killPossiblyDeadProcess(ctx, 9); err != nil {
return err
}
}
@ -501,13 +503,13 @@ func (container *Container) Stop(seconds int) error {
if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := container.Kill(); err != nil {
if err := container.Kill(ctx); err != nil {
container.WaitStop(-1 * time.Second)
return err
}
}
container.logEvent("stop")
container.logEvent(ctx, "stop")
return nil
}
@ -515,61 +517,61 @@ func (container *Container) Stop(seconds int) error {
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (container *Container) Restart(seconds int) error {
func (container *Container) Restart(ctx context.Context, seconds int) error {
// Avoid unnecessarily unmounting and then directly mounting
// the container when the container stops and then starts
// again
if err := container.Mount(); err == nil {
defer container.Unmount()
if err := container.Mount(ctx); err == nil {
defer container.Unmount(ctx)
}
if err := container.Stop(seconds); err != nil {
if err := container.Stop(ctx, seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
if err := container.Start(ctx); err != nil {
return err
}
container.logEvent("restart")
container.logEvent(ctx, "restart")
return nil
}
// Resize changes the TTY of the process running inside the container
// to the given height and width. The container must be running.
func (container *Container) Resize(h, w int) error {
func (container *Container) Resize(ctx context.Context, h, w int) error {
if !container.IsRunning() {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
return err
}
container.logEvent("resize")
container.logEvent(ctx, "resize")
return nil
}
func (container *Container) export() (archive.Archive, error) {
if err := container.Mount(); err != nil {
func (container *Container) export(ctx context.Context) (archive.Archive, error) {
if err := container.Mount(ctx); err != nil {
return nil, err
}
archive, err := archive.Tar(container.basefs, archive.Uncompressed)
if err != nil {
container.Unmount()
container.Unmount(ctx)
return nil, err
}
arch := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.Unmount()
container.Unmount(ctx)
return err
})
container.logEvent("export")
container.logEvent(ctx, "export")
return arch, err
}
// Mount sets container.basefs
func (container *Container) Mount() error {
return container.daemon.Mount(container)
func (container *Container) Mount(ctx context.Context) error {
return container.daemon.Mount(ctx, container)
}
func (container *Container) changes() ([]archive.Change, error) {
@ -578,7 +580,7 @@ func (container *Container) changes() ([]archive.Change, error) {
return container.daemon.changes(container)
}
func (container *Container) getImage() (*image.Image, error) {
func (container *Container) getImage(ctx context.Context) (*image.Image, error) {
if container.daemon == nil {
return nil, derr.ErrorCodeImageUnregContainer
}
@ -587,7 +589,7 @@ func (container *Container) getImage() (*image.Image, error) {
// Unmount asks the daemon to release the layered filesystems that are
// mounted by the container.
func (container *Container) Unmount() error {
func (container *Container) Unmount(ctx context.Context) error {
return container.daemon.unmount(container)
}
@ -612,7 +614,7 @@ func validateID(id string) error {
return nil
}
func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
func (container *Container) copy(ctx context.Context, resource string) (rc io.ReadCloser, err error) {
container.Lock()
defer func() {
@ -624,7 +626,7 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error)
}
}()
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
return nil, err
}
@ -633,7 +635,7 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error)
// unmount any volumes
container.unmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
container.Unmount(ctx)
}
}()
@ -669,11 +671,11 @@ func (container *Container) copy(resource string) (rc io.ReadCloser, err error)
reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.unmountVolumes(true)
container.Unmount()
container.Unmount(ctx)
container.Unlock()
return err
})
container.logEvent("copy")
container.logEvent(ctx, "copy")
return reader, nil
}
@ -752,14 +754,14 @@ func (container *Container) startLogging() error {
return nil
}
func (container *Container) waitForStart() error {
func (container *Container) waitForStart(ctx context.Context) error {
container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
// block until we either receive an error from the initial start of the container's
// process or until the process is running in the container
select {
case <-container.monitor.startSignal:
case err := <-promise.Go(container.monitor.Start):
case err := <-promise.Go(func() error { return container.monitor.Start(ctx) }):
return err
}
@ -790,11 +792,11 @@ func (container *Container) getExecIDs() []string {
return container.execCommands.List()
}
func (container *Container) exec(ExecConfig *ExecConfig) error {
func (container *Container) exec(ctx context.Context, ExecConfig *ExecConfig) error {
container.Lock()
defer container.Unlock()
callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
callback := func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
if processConfig.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
@ -809,7 +811,7 @@ func (container *Container) exec(ExecConfig *ExecConfig) error {
// We use a callback here instead of a goroutine and an chan for
// synchronization purposes
cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
cErr := promise.Go(func() error { return container.monitorExec(ctx, ExecConfig, callback) })
// Exec should not return until the process is actually running
select {
@ -821,13 +823,13 @@ func (container *Container) exec(ExecConfig *ExecConfig) error {
return nil
}
func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
func (container *Container) monitorExec(ctx context.Context, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
var (
err error
exitCode int
)
pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
exitCode, err = container.daemon.Exec(ctx, container, ExecConfig, pipes, callback)
if err != nil {
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
}
@ -860,7 +862,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr
return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
}
func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
func (container *Container) attachWithLogs(ctx context.Context, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
if logs {
logDriver, err := container.getLogger()
if err != nil {
@ -892,7 +894,7 @@ func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr i
}
}
container.logEvent("attach")
container.logEvent(ctx, "attach")
//stream
if stream {

View file

@ -15,6 +15,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/links"
"github.com/docker/docker/daemon/network"
@ -77,12 +78,12 @@ func killProcessDirectly(container *Container) error {
return nil
}
func (container *Container) setupLinkedContainers() ([]string, error) {
func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
var (
env []string
daemon = container.daemon
)
children, err := daemon.children(container.Name)
children, err := daemon.children(ctx, container.Name)
if err != nil {
return nil, err
}
@ -175,7 +176,7 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
}
func populateCommand(c *Container, env []string) error {
func populateCommand(ctx context.Context, c *Container, env []string) error {
var en *execdriver.Network
if !c.Config.NetworkDisabled {
en = &execdriver.Network{}
@ -185,7 +186,7 @@ func populateCommand(c *Container, env []string) error {
parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
if parts[0] == "container" {
nc, err := c.getNetworkedContainer()
nc, err := c.getNetworkedContainer(ctx)
if err != nil {
return err
}
@ -206,7 +207,7 @@ func populateCommand(c *Container, env []string) error {
}
if c.hostConfig.IpcMode.IsContainer() {
ic, err := c.getIpcContainer()
ic, err := c.getIpcContainer(ctx)
if err != nil {
return err
}
@ -349,18 +350,18 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
}
// GetSize returns the real size & virtual size of the container.
func (container *Container) getSize() (int64, int64) {
func (container *Container) getSize(ctx context.Context) (int64, int64) {
var (
sizeRw, sizeRootfs int64
err error
driver = container.daemon.driver
)
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
return sizeRw, sizeRootfs
}
defer container.Unmount()
defer container.Unmount(ctx)
initID := fmt.Sprintf("%s-init", container.ID)
sizeRw, err = driver.DiffSize(container.ID, initID)
@ -412,7 +413,7 @@ func (container *Container) buildHostnameFile() error {
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
}
func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, error) {
func (container *Container) buildSandboxOptions(ctx context.Context) ([]libnetwork.SandboxOption, error) {
var (
sboxOptions []libnetwork.SandboxOption
err error
@ -489,7 +490,7 @@ func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, e
var childEndpoints, parentEndpoints []string
children, err := container.daemon.children(container.Name)
children, err := container.daemon.children(ctx, container.Name)
if err != nil {
return nil, err
}
@ -520,7 +521,7 @@ func (container *Container) buildSandboxOptions() ([]libnetwork.SandboxOption, e
continue
}
c, err := container.daemon.Get(ref.ParentID)
c, err := container.daemon.Get(ctx, ref.ParentID)
if err != nil {
logrus.Error(err)
}
@ -679,7 +680,7 @@ func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox)
// UpdateNetwork is used to update the container's network (e.g. when linked containers
// get removed/unlinked).
func (container *Container) updateNetwork() error {
func (container *Container) updateNetwork(ctx context.Context) error {
ctrl := container.daemon.netController
sid := container.NetworkSettings.SandboxID
@ -688,7 +689,7 @@ func (container *Container) updateNetwork() error {
return derr.ErrorCodeNoSandbox.WithArgs(sid, err)
}
options, err := container.buildSandboxOptions()
options, err := container.buildSandboxOptions(ctx)
if err != nil {
return derr.ErrorCodeNetworkUpdate.WithArgs(err)
}
@ -812,7 +813,7 @@ func createNetwork(controller libnetwork.NetworkController, dnet string, driver
return controller.NewNetwork(driver, dnet, createOptions...)
}
func (container *Container) secondaryNetworkRequired(primaryNetworkType string) bool {
func (container *Container) secondaryNetworkRequired(ctx context.Context, primaryNetworkType string) bool {
switch primaryNetworkType {
case "bridge", "none", "host", "container":
return false
@ -831,7 +832,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
return false
}
func (container *Container) allocateNetwork() error {
func (container *Container) allocateNetwork(ctx context.Context) error {
mode := container.hostConfig.NetworkMode
controller := container.daemon.netController
if container.Config.NetworkDisabled || mode.IsContainer() {
@ -865,21 +866,21 @@ func (container *Container) allocateNetwork() error {
service = strings.Replace(service, "/", "", -1)
}
if container.secondaryNetworkRequired(networkDriver) {
if container.secondaryNetworkRequired(ctx, networkDriver) {
// Configure Bridge as secondary network for port binding purposes
if err := container.configureNetwork("bridge", service, "bridge", false); err != nil {
if err := container.configureNetwork(ctx, "bridge", service, "bridge", false); err != nil {
return err
}
}
if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil {
if err := container.configureNetwork(ctx, networkName, service, networkDriver, mode.IsDefault()); err != nil {
return err
}
return container.writeHostConfig()
}
func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
func (container *Container) configureNetwork(ctx context.Context, networkName, service, networkDriver string, canCreateNetwork bool) error {
controller := container.daemon.netController
n, err := controller.NetworkByName(networkName)
@ -923,7 +924,7 @@ func (container *Container) configureNetwork(networkName, service, networkDriver
return false
})
if sb == nil {
options, err := container.buildSandboxOptions()
options, err := container.buildSandboxOptions(ctx)
if err != nil {
return err
}
@ -946,12 +947,12 @@ func (container *Container) configureNetwork(networkName, service, networkDriver
return nil
}
func (container *Container) initializeNetworking() error {
func (container *Container) initializeNetworking(ctx context.Context) error {
var err error
if container.hostConfig.NetworkMode.IsContainer() {
// we need to get the hosts files from the container to join
nc, err := container.getNetworkedContainer()
nc, err := container.getNetworkedContainer(ctx)
if err != nil {
return err
}
@ -977,7 +978,7 @@ func (container *Container) initializeNetworking() error {
}
if err := container.allocateNetwork(); err != nil {
if err := container.allocateNetwork(ctx); err != nil {
return err
}
@ -998,9 +999,9 @@ func (container *Container) setNetworkNamespaceKey(pid int) error {
return sandbox.SetKey(path)
}
func (container *Container) getIpcContainer() (*Container, error) {
func (container *Container) getIpcContainer(ctx context.Context) (*Container, error) {
containerID := container.hostConfig.IpcMode.Container()
c, err := container.daemon.Get(containerID)
c, err := container.daemon.Get(ctx, containerID)
if err != nil {
return nil, err
}
@ -1036,14 +1037,14 @@ func (container *Container) setupWorkingDirectory() error {
return nil
}
func (container *Container) getNetworkedContainer() (*Container, error) {
func (container *Container) getNetworkedContainer(ctx context.Context) (*Container, error) {
parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
switch parts[0] {
case "container":
if len(parts) != 2 {
return nil, derr.ErrorCodeParseContainer
}
nc, err := container.daemon.Get(parts[1])
nc, err := container.daemon.Get(ctx, parts[1])
if err != nil {
return nil, err
}

View file

@ -5,6 +5,7 @@ package daemon
import (
"strings"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
)
@ -25,7 +26,7 @@ func killProcessDirectly(container *Container) error {
return nil
}
func (container *Container) setupLinkedContainers() ([]string, error) {
func (container *Container) setupLinkedContainers(ctx context.Context) ([]string, error) {
return nil, nil
}
@ -34,7 +35,7 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string
return container.Config.Env
}
func (container *Container) initializeNetworking() error {
func (container *Container) initializeNetworking(ctx context.Context) error {
return nil
}
@ -42,7 +43,7 @@ func (container *Container) setupWorkingDirectory() error {
return nil
}
func populateCommand(c *Container, env []string) error {
func populateCommand(ctx context.Context, c *Container, env []string) error {
en := &execdriver.Network{
Interface: nil,
}
@ -135,7 +136,7 @@ func populateCommand(c *Container, env []string) error {
}
// GetSize returns real size & virtual size
func (container *Container) getSize() (int64, int64) {
func (container *Container) getSize(ctx context.Context) (int64, int64) {
// TODO Windows
return 0, 0
}
@ -150,7 +151,7 @@ func (container *Container) allocateNetwork() error {
return nil
}
func (container *Container) updateNetwork() error {
func (container *Container) updateNetwork(ctx context.Context) error {
return nil
}

View file

@ -5,6 +5,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/graph/tags"
"github.com/docker/docker/image"
@ -15,21 +16,21 @@ import (
)
// ContainerCreate takes configs and creates a container.
func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
func (daemon *Daemon) ContainerCreate(ctx context.Context, name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
if config == nil {
return nil, nil, derr.ErrorCodeEmptyConfig
}
warnings, err := daemon.verifyContainerSettings(hostConfig, config)
warnings, err := daemon.verifyContainerSettings(ctx, hostConfig, config)
if err != nil {
return nil, warnings, err
}
daemon.adaptContainerSettings(hostConfig, adjustCPUShares)
container, buildWarnings, err := daemon.Create(config, hostConfig, name)
container, buildWarnings, err := daemon.Create(ctx, config, hostConfig, name)
if err != nil {
if daemon.Graph().IsNotExist(err, config.Image) {
if daemon.Graph(ctx).IsNotExist(err, config.Image) {
if strings.Contains(config.Image, "@") {
return nil, warnings, derr.ErrorCodeNoSuchImageHash.WithArgs(config.Image)
}
@ -48,7 +49,7 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos
}
// Create creates a new container from the given configuration with a given name.
func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
func (daemon *Daemon) Create(ctx context.Context, config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
var (
container *Container
warnings []string
@ -76,29 +77,29 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
hostConfig = &runconfig.HostConfig{}
}
if hostConfig.SecurityOpt == nil {
hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(ctx, hostConfig.IpcMode, hostConfig.PidMode)
if err != nil {
return nil, nil, err
}
}
if container, err = daemon.newContainer(name, config, imgID); err != nil {
if container, err = daemon.newContainer(ctx, name, config, imgID); err != nil {
return nil, nil, err
}
defer func() {
if retErr != nil {
if err := daemon.rm(container, false); err != nil {
if err := daemon.rm(ctx, container, false); err != nil {
logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err)
}
}
}()
if err := daemon.Register(container); err != nil {
if err := daemon.Register(ctx, container); err != nil {
return nil, nil, err
}
if err := daemon.createRootfs(container); err != nil {
return nil, nil, err
}
if err := daemon.setHostConfig(container, hostConfig); err != nil {
if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
return nil, nil, err
}
defer func() {
@ -108,10 +109,10 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
}
}
}()
if err := container.Mount(); err != nil {
if err := container.Mount(ctx); err != nil {
return nil, nil, err
}
defer container.Unmount()
defer container.Unmount(ctx)
if err := createContainerPlatformSpecificSettings(container, config, hostConfig, img); err != nil {
return nil, nil, err
@ -121,16 +122,16 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
logrus.Errorf("Error saving new container to disk: %v", err)
return nil, nil, err
}
container.logEvent("create")
container.logEvent(ctx, "create")
return container, warnings, nil
}
func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
func (daemon *Daemon) generateSecurityOpt(ctx context.Context, ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
if ipcMode.IsHost() || pidMode.IsHost() {
return label.DisableSecOpt(), nil
}
if ipcContainer := ipcMode.Container(); ipcContainer != "" {
c, err := daemon.Get(ipcContainer)
c, err := daemon.Get(ctx, ipcContainer)
if err != nil {
return nil, err
}
@ -142,7 +143,7 @@ func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run
// VolumeCreate creates a volume with the specified name, driver, and opts
// This is called directly from the remote API
func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) {
func (daemon *Daemon) VolumeCreate(ctx context.Context, name, driverName string, opts map[string]string) (*types.Volume, error) {
if name == "" {
name = stringid.GenerateNonCryptoID()
}

View file

@ -20,6 +20,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/execdrivers"
@ -127,14 +128,14 @@ type Daemon struct {
// - A partial container ID prefix (e.g. short ID) of any length that is
// unique enough to only return a single container object
// If none of these searches succeed, an error is returned
func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
func (daemon *Daemon) Get(ctx context.Context, prefixOrName string) (*Container, error) {
if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
// prefix is an exact match to a full container ID
return containerByID, nil
}
// GetByName will match only an exact name provided; we ignore errors
if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil {
if containerByName, _ := daemon.GetByName(ctx, prefixOrName); containerByName != nil {
// prefix is an exact match to a full container Name
return containerByName, nil
}
@ -152,8 +153,8 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (daemon *Daemon) Exists(id string) bool {
c, _ := daemon.Get(id)
func (daemon *Daemon) Exists(ctx context.Context, id string) bool {
c, _ := daemon.Get(ctx, id)
return c != nil
}
@ -178,8 +179,8 @@ func (daemon *Daemon) load(id string) (*Container, error) {
}
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *Container) error {
if container.daemon != nil || daemon.Exists(container.ID) {
func (daemon *Daemon) Register(ctx context.Context, container *Container) error {
if container.daemon != nil || daemon.Exists(ctx, container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
@ -217,10 +218,7 @@ func (daemon *Daemon) Register(container *Container) error {
}
daemon.execDriver.Terminate(cmd)
if err := container.unmountIpcMounts(); err != nil {
logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
}
if err := container.Unmount(); err != nil {
if err := container.Unmount(ctx); err != nil {
logrus.Debugf("unmount error %s", err)
}
if err := container.toDiskLocking(); err != nil {
@ -254,7 +252,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
return nil
}
func (daemon *Daemon) restore() error {
func (daemon *Daemon) restore(ctx context.Context) error {
type cr struct {
container *Container
registered bool
@ -324,7 +322,7 @@ func (daemon *Daemon) restore() error {
}
}
if err := daemon.Register(container); err != nil {
if err := daemon.Register(ctx, container); err != nil {
logrus.Errorf("Failed to register container %s: %s", container.ID, err)
// The container register failed should not be started.
return
@ -335,7 +333,7 @@ func (daemon *Daemon) restore() error {
if daemon.configStore.AutoRestart && container.shouldRestart() {
logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil {
if err := container.Start(ctx); err != nil {
logrus.Errorf("Failed to start container %s: %s", container.ID, err)
}
}
@ -365,7 +363,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
return nil
}
func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
func (daemon *Daemon) generateIDAndName(ctx context.Context, name string) (string, string, error) {
var (
err error
id = stringid.GenerateNonCryptoID()
@ -378,14 +376,14 @@ func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
return id, name, nil
}
if name, err = daemon.reserveName(id, name); err != nil {
if name, err = daemon.reserveName(ctx, id, name); err != nil {
return "", "", err
}
return id, name, nil
}
func (daemon *Daemon) reserveName(id, name string) (string, error) {
func (daemon *Daemon) reserveName(ctx context.Context, id, name string) (string, error) {
if !validContainerNamePattern.MatchString(name) {
return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
}
@ -399,7 +397,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
return "", err
}
conflictingContainer, err := daemon.GetByName(name)
conflictingContainer, err := daemon.GetByName(ctx, name)
if err != nil {
if strings.Contains(err.Error(), "Could not find entity") {
return "", err
@ -469,12 +467,12 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic
return entrypoint, args
}
func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) {
func (daemon *Daemon) newContainer(ctx context.Context, name string, config *runconfig.Config, imgID string) (*Container, error) {
var (
id string
err error
)
id, name, err = daemon.generateIDAndName(name)
id, name, err = daemon.generateIDAndName(ctx, name)
if err != nil {
return nil, err
}
@ -511,7 +509,7 @@ func GetFullContainerName(name string) (string, error) {
}
// GetByName returns a container given a name.
func (daemon *Daemon) GetByName(name string) (*Container, error) {
func (daemon *Daemon) GetByName(ctx context.Context, name string) (*Container, error) {
fullName, err := GetFullContainerName(name)
if err != nil {
return nil, err
@ -530,7 +528,7 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
// children returns all child containers of the container with the
// given name. The containers are returned as a map from the container
// name to a pointer to Container.
func (daemon *Daemon) children(name string) (map[string]*Container, error) {
func (daemon *Daemon) children(ctx context.Context, name string) (map[string]*Container, error) {
name, err := GetFullContainerName(name)
if err != nil {
return nil, err
@ -538,7 +536,7 @@ func (daemon *Daemon) children(name string) (map[string]*Container, error) {
children := make(map[string]*Container)
err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
c, err := daemon.Get(e.ID())
c, err := daemon.Get(ctx, e.ID())
if err != nil {
return err
}
@ -574,7 +572,7 @@ func (daemon *Daemon) registerLink(parent, child *Container, alias string) error
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
func NewDaemon(ctx context.Context, config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure we have compatible configuration options
@ -642,7 +640,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
// Ensure the graph driver is shutdown at a later point
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
if err := d.Shutdown(ctx); err != nil {
logrus.Error(err)
}
}
@ -776,7 +774,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
go d.execCommandGC()
if err := d.restore(); err != nil {
if err := d.restore(ctx); err != nil {
return nil, err
}
@ -784,12 +782,12 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
func (daemon *Daemon) Shutdown(ctx context.Context) error {
daemon.shutdown = true
if daemon.containers != nil {
group := sync.WaitGroup{}
logrus.Debug("starting clean shutdown of all containers...")
for _, container := range daemon.List() {
for _, container := range daemon.List(ctx) {
c := container
if c.IsRunning() {
logrus.Debugf("stopping %s", c.ID)
@ -812,7 +810,7 @@ func (daemon *Daemon) Shutdown() error {
logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
return
}
if err := c.unpause(); err != nil {
if err := c.unpause(ctx); err != nil {
logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
return
}
@ -827,7 +825,7 @@ func (daemon *Daemon) Shutdown() error {
}
} else {
// If container failed to exit in 10 seconds of SIGTERM, then using the force
if err := c.Stop(10); err != nil {
if err := c.Stop(ctx, 10); err != nil {
logrus.Errorf("Stop container %s with error: %v", c.ID, err)
}
}
@ -865,7 +863,7 @@ func (daemon *Daemon) Shutdown() error {
// Mount sets container.basefs
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *Container) error {
func (daemon *Daemon) Mount(ctx context.Context, container *Container) error {
dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
if err != nil {
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
@ -890,14 +888,14 @@ func (daemon *Daemon) unmount(container *Container) error {
return nil
}
func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
func (daemon *Daemon) run(ctx context.Context, c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
hooks := execdriver.Hooks{
Start: startCallback,
}
hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
hooks.PreStart = append(hooks.PreStart, func(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
return c.setNetworkNamespaceKey(pid)
})
return daemon.execDriver.Run(c.command, pipes, hooks)
return daemon.execDriver.Run(ctx, c.command, pipes, hooks)
}
func (daemon *Daemon) kill(c *Container, sig int) error {
@ -964,12 +962,12 @@ func (daemon *Daemon) createRootfs(container *Container) error {
// which need direct access to daemon.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
func (daemon *Daemon) Graph() *graph.Graph {
func (daemon *Daemon) Graph(ctx context.Context) *graph.Graph {
return daemon.graph
}
// Repositories returns all repositories.
func (daemon *Daemon) Repositories() *graph.TagStore {
func (daemon *Daemon) Repositories(ctx context.Context) *graph.TagStore {
return daemon.repositories
}
@ -983,13 +981,13 @@ func (daemon *Daemon) systemInitPath() string {
// GraphDriver returns the currently used driver for processing
// container layers.
func (daemon *Daemon) GraphDriver() graphdriver.Driver {
func (daemon *Daemon) GraphDriver(ctx context.Context) graphdriver.Driver {
return daemon.driver
}
// ExecutionDriver returns the currently used driver for creating and
// starting execs in a container.
func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
func (daemon *Daemon) ExecutionDriver(ctx context.Context) execdriver.Driver {
return daemon.execDriver
}
@ -1001,9 +999,9 @@ func (daemon *Daemon) containerGraph() *graphdb.Database {
// of the image with imgID, that had the same config when it was
// created. nil is returned if a child cannot be found. An error is
// returned if the parent image cannot be found.
func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
func (daemon *Daemon) ImageGetCached(ctx context.Context, imgID string, config *runconfig.Config) (*image.Image, error) {
// Retrieve all images
images := daemon.Graph().Map()
images := daemon.Graph(ctx).Map()
// Store the tree in a map of map (map[parentId][childId])
imageMap := make(map[string]map[string]struct{})
@ -1039,7 +1037,7 @@ func tempDir(rootDir string) (string, error) {
return tmpDir, system.MkdirAll(tmpDir, 0700)
}
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
func (daemon *Daemon) setHostConfig(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
container.Lock()
if err := parseSecurityOpt(container, hostConfig); err != nil {
container.Unlock()
@ -1049,14 +1047,14 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
// Do not lock while creating volumes since this could be calling out to external plugins
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
if err := daemon.registerMountPoints(ctx, container, hostConfig); err != nil {
return err
}
container.Lock()
defer container.Unlock()
// Register any links from the host config before starting the container
if err := daemon.registerLinks(container, hostConfig); err != nil {
if err := daemon.registerLinks(ctx, container, hostConfig); err != nil {
return err
}
@ -1094,7 +1092,7 @@ func getDefaultRouteMtu() (int, error) {
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
func (daemon *Daemon) verifyContainerSettings(ctx context.Context, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
// First perform verification of settings common across all platforms.
if config != nil {
@ -1131,7 +1129,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig,
}
// Now do platform-specific verification
return verifyPlatformContainerSettings(daemon, hostConfig, config)
return verifyPlatformContainerSettings(ctx, daemon, hostConfig, config)
}
func configureVolumes(config *Config) (*store.VolumeStore, error) {

View file

@ -8,6 +8,7 @@ import (
"path/filepath"
"testing"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/truncindex"
@ -92,32 +93,34 @@ func TestGet(t *testing.T) {
containerGraphDB: graph,
}
if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
ctx := context.Background()
if container, _ := daemon.Get(ctx, "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
t.Fatal("Should explicitly match full container IDs")
}
if container, _ := daemon.Get("75fb0b8009"); container != c4 {
if container, _ := daemon.Get(ctx, "75fb0b8009"); container != c4 {
t.Fatal("Should match a partial ID")
}
if container, _ := daemon.Get("drunk_hawking"); container != c2 {
if container, _ := daemon.Get(ctx, "drunk_hawking"); container != c2 {
t.Fatal("Should match a full name")
}
// c3.Name is a partial match for both c3.ID and c2.ID
if c, _ := daemon.Get("3cdbd1aa"); c != c3 {
if c, _ := daemon.Get(ctx, "3cdbd1aa"); c != c3 {
t.Fatal("Should match a full name even though it collides with another container's ID")
}
if container, _ := daemon.Get("d22d69a2b896"); container != c5 {
if container, _ := daemon.Get(ctx, "d22d69a2b896"); container != c5 {
t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID")
}
if _, err := daemon.Get("3cdbd1"); err == nil {
if _, err := daemon.Get(ctx, "3cdbd1"); err == nil {
t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
}
if _, err := daemon.Get("nothing"); err == nil {
if _, err := daemon.Get(ctx, "nothing"); err == nil {
t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
}
@ -486,13 +489,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
t.Fatalf("Expected 1 volume mounted, was 0\n")
}
ctx := context.Background()
m := c.MountPoints["/vol1"]
_, err = daemon.VolumeCreate(m.Name, m.Driver, nil)
_, err = daemon.VolumeCreate(ctx, m.Name, m.Driver, nil)
if err != nil {
t.Fatal(err)
}
if err := daemon.VolumeRm(m.Name); err != nil {
if err := daemon.VolumeRm(ctx, m.Name); err != nil {
t.Fatal(err)
}

View file

@ -13,6 +13,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers"
@ -117,12 +118,12 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
warnings := []string{}
sysInfo := sysinfo.New(true)
if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver(ctx).Name(), "lxc") {
return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver(ctx).Name())
}
// memory subsystem checks and adjustments
@ -495,12 +496,12 @@ func setupInitLayer(initLayer string) error {
// NetworkAPIRouter implements a feature for server-experimental,
// directly calling into libnetwork.
func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
func (daemon *Daemon) NetworkAPIRouter(ctx context.Context) func(w http.ResponseWriter, req *http.Request) {
return nwapi.NewHTTPHandler(daemon.netController)
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
if hostConfig == nil || hostConfig.Links == nil {
return nil
}
@ -510,14 +511,14 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.
if err != nil {
return err
}
child, err := daemon.Get(name)
child, err := daemon.Get(ctx, name)
if err != nil {
//An error from daemon.Get() means this name could not be found
return fmt.Errorf("Could not get container for %s", name)
}
for child.hostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
child, err = daemon.Get(parts[1])
child, err = daemon.Get(ctx, parts[1])
if err != nil {
return fmt.Errorf("Could not get container for %s", parts[1])
}

View file

@ -6,6 +6,7 @@ import (
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/graphdriver"
// register the windows graph driver
_ "github.com/docker/docker/daemon/graphdriver/windows"
@ -47,7 +48,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
func verifyPlatformContainerSettings(ctx context.Context, daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {
return nil, nil
}
@ -104,7 +105,7 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error)
// registerLinks sets up links between containers and writes the
// configuration out for persistence.
func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
func (daemon *Daemon) registerLinks(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
// TODO Windows. Factored out for network modes. There may be more
// refactoring required here.
@ -117,7 +118,7 @@ func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.
if err != nil {
return err
}
child, err := daemon.Get(name)
child, err := daemon.Get(ctx, name)
if err != nil {
//An error from daemon.Get() means this name could not be found
return fmt.Errorf("Could not get container for %s", name)

View file

@ -5,6 +5,8 @@ import (
"os"
"path"
"github.com/docker/docker/context"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume/store"
@ -19,8 +21,8 @@ type ContainerRmConfig struct {
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *ContainerRmConfig) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
@ -43,9 +45,9 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
return err
}
parentContainer, _ := daemon.Get(pe.ID())
parentContainer, _ := daemon.Get(ctx, pe.ID())
if parentContainer != nil {
if err := parentContainer.updateNetwork(); err != nil {
if err := parentContainer.updateNetwork(ctx); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
@ -53,7 +55,7 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
return nil
}
if err := daemon.rm(container, config.ForceRemove); err != nil {
if err := daemon.rm(ctx, container, config.ForceRemove); err != nil {
// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err))
return err
}
@ -66,12 +68,12 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
}
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove bool) (err error) {
if container.IsRunning() {
if !forceRemove {
return derr.ErrorCodeRmRunning
}
if err := container.Kill(); err != nil {
if err := container.Kill(ctx); err != nil {
return derr.ErrorCodeRmFailed.WithArgs(err)
}
}
@ -92,7 +94,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
defer container.resetRemovalInProgress()
if err = container.Stop(3); err != nil {
if err = container.Stop(ctx, 3); err != nil {
return err
}
@ -113,7 +115,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
os.RemoveAll(container.root)
container.logEvent("destroy")
container.logEvent(ctx, "destroy")
}
}()
@ -142,14 +144,14 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
container.logEvent("destroy")
container.logEvent(ctx, "destroy")
return nil
}
// VolumeRm removes the volume with the given name.
// If the volume is referenced by a container it is not removed
// This is called directly from the remote API
func (daemon *Daemon) VolumeRm(name string) error {
func (daemon *Daemon) VolumeRm(ctx context.Context, name string) error {
v, err := daemon.volumes.Get(name)
if err != nil {
return err

View file

@ -4,6 +4,8 @@ import (
"sync"
"time"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pubsub"
)
@ -44,9 +46,9 @@ func (e *Events) Evict(l chan interface{}) {
// Log broadcasts event to listeners. Each listener has 100 millisecond for
// receiving event or it will be skipped.
func (e *Events) Log(action, id, from string) {
func (e *Events) Log(ctx context.Context, action, id, from string) {
now := time.Now().UTC()
jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
jm := &jsonmessage.JSONMessage{RequestID: ctx.RequestID(), Status: action, ID: id, From: from, Time: now.Unix(), TimeNano: now.UnixNano()}
e.mu.Lock()
if len(e.events) == cap(e.events) {
// discard oldest event

View file

@ -5,10 +5,12 @@ import (
"testing"
"time"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/jsonmessage"
)
func TestEventsLog(t *testing.T) {
ctx := context.Background()
e := New()
_, l1 := e.Subscribe()
_, l2 := e.Subscribe()
@ -18,7 +20,7 @@ func TestEventsLog(t *testing.T) {
if count != 2 {
t.Fatalf("Must be 2 subscribers, got %d", count)
}
e.Log("test", "cont", "image")
e.Log(ctx, "test", "cont", "image")
select {
case msg := <-l1:
jmsg, ok := msg.(*jsonmessage.JSONMessage)
@ -64,13 +66,14 @@ func TestEventsLog(t *testing.T) {
}
func TestEventsLogTimeout(t *testing.T) {
ctx := context.Background()
e := New()
_, l := e.Subscribe()
defer e.Evict(l)
c := make(chan struct{})
go func() {
e.Log("test", "cont", "image")
e.Log(ctx, "test", "cont", "image")
close(c)
}()
@ -82,13 +85,14 @@ func TestEventsLogTimeout(t *testing.T) {
}
func TestLogEvents(t *testing.T) {
ctx := context.Background()
e := New()
for i := 0; i < eventsLimit+16; i++ {
action := fmt.Sprintf("action_%d", i)
id := fmt.Sprintf("cont_%d", i)
from := fmt.Sprintf("image_%d", i)
e.Log(action, id, from)
e.Log(ctx, action, id, from)
}
time.Sleep(50 * time.Millisecond)
current, l := e.Subscribe()
@ -97,7 +101,7 @@ func TestLogEvents(t *testing.T) {
action := fmt.Sprintf("action_%d", num)
id := fmt.Sprintf("cont_%d", num)
from := fmt.Sprintf("image_%d", num)
e.Log(action, id, from)
e.Log(ctx, action, id, from)
}
if len(e.events) != eventsLimit {
t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))

View file

@ -8,6 +8,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/broadcastwriter"
@ -117,8 +118,8 @@ func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
d.execCommands.Delete(ExecConfig.ID)
}
func (d *Daemon) getActiveContainer(name string) (*Container, error) {
container, err := d.Get(name)
func (d *Daemon) getActiveContainer(ctx context.Context, name string) (*Container, error) {
container, err := d.Get(ctx, name)
if err != nil {
return nil, err
}
@ -133,13 +134,13 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
}
// ContainerExecCreate sets up an exec in a running container.
func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
func (d *Daemon) ContainerExecCreate(ctx context.Context, config *runconfig.ExecConfig) (string, error) {
// Not all drivers support Exec (LXC for example)
if err := checkExecSupport(d.execDriver.Name()); err != nil {
return "", err
}
container, err := d.getActiveContainer(config.Container)
container, err := d.getActiveContainer(ctx, config.Container)
if err != nil {
return "", err
}
@ -174,14 +175,14 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
d.registerExecCommand(ExecConfig)
container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
container.logEvent(ctx, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
return ExecConfig.ID, nil
}
// ContainerExecStart starts a previously set up exec instance. The
// std streams are set up.
func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
func (d *Daemon) ContainerExecStart(ctx context.Context, execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
var (
cStdin io.ReadCloser
cStdout, cStderr io.Writer
@ -207,7 +208,7 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
container := ExecConfig.Container
container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
container.logEvent(ctx, "exec_start: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
if ExecConfig.OpenStdin {
r, w := io.Pipe()
@ -243,7 +244,7 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
// the exitStatus) even after the cmd is done running.
go func() {
if err := container.exec(ExecConfig); err != nil {
if err := container.exec(ctx, ExecConfig); err != nil {
execErr <- derr.ErrorCodeExecCantRun.WithArgs(execName, container.ID, err)
}
}()
@ -267,11 +268,11 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
}
// Exec calls the underlying exec driver to run
func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
func (d *Daemon) Exec(ctx context.Context, c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
hooks := execdriver.Hooks{
Start: startCallback,
}
exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks)
exitStatus, err := d.execDriver.Exec(ctx, c.command, ExecConfig.ProcessConfig, pipes, hooks)
// On err, make sure we don't leave ExitCode at zero
if err != nil && exitStatus == 0 {

View file

@ -7,6 +7,7 @@ import (
"time"
// TODO Windows: Factor out ulimit
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/ulimit"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
@ -29,7 +30,7 @@ var (
// through PreStart, Start and PostStop events.
// Callbacks are provided a processConfig pointer and the pid of the child.
// The channel will be used to notify the OOM events.
type DriverCallback func(processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
type DriverCallback func(ctx context.Context, processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error
// Hooks is a struct containing function pointers to callbacks
// used by any execdriver implementation exploiting hooks capabilities
@ -69,11 +70,11 @@ type ExitStatus struct {
type Driver interface {
// Run executes the process, blocks until the process exits and returns
// the exit code. It's the last stage on Docker side for running a container.
Run(c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
Run(ctx context.Context, c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error)
// Exec executes the process in an existing container, blocks until the
// process exits and returns the exit code.
Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
Exec(ctx context.Context, c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error)
// Kill sends signals to process in container.
Kill(c *Command, sig int) error

View file

@ -20,6 +20,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/stringutils"
sysinfo "github.com/docker/docker/pkg/system"
@ -125,7 +126,7 @@ func killNetNsProc(proc *os.Process) {
// Run implements the exec driver Driver interface,
// it calls 'exec.Cmd' to launch lxc commands to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
@ -329,7 +330,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
if hooks.Start != nil {
logrus.Debugf("Invoking startCallback")
hooks.Start(&c.ProcessConfig, pid, oomKillNotification)
hooks.Start(ctx, &c.ProcessConfig, pid, oomKillNotification)
}
@ -871,7 +872,7 @@ func (t *TtyConsole) Close() error {
// Exec implements the exec driver Driver interface,
// it is not implemented by lxc.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
return -1, ErrExec
}

View file

@ -9,6 +9,7 @@ import (
"strings"
"syscall"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runc/libcontainer/configs"
@ -18,7 +19,7 @@ import (
// createContainer populates and configures the container type with the
// data provided by the execdriver.Command
func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
func (d *Driver) createContainer(ctx context.Context, c *execdriver.Command, hooks execdriver.Hooks) (*configs.Config, error) {
container := execdriver.InitContainer(c)
if err := d.createIpc(container, c); err != nil {
@ -33,7 +34,7 @@ func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks)
return nil, err
}
if err := d.createNetwork(container, c, hooks); err != nil {
if err := d.createNetwork(ctx, container, c, hooks); err != nil {
return nil, err
}
@ -113,7 +114,7 @@ func generateIfaceName() (string, error) {
return "", errors.New("Failed to find name for new interface")
}
func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
func (d *Driver) createNetwork(ctx context.Context, container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
if c.Network == nil {
return nil
}
@ -150,7 +151,7 @@ func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command,
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil {
if err := fnHook(ctx, &c.ProcessConfig, s.Pid, chOOM); err != nil {
return err
}
}

View file

@ -14,6 +14,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/pools"
@ -131,9 +132,9 @@ type execOutput struct {
// Run implements the exec driver Driver interface,
// it calls libcontainer APIs to run a container.
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
// take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c, hooks)
container, err := d.createContainer(ctx, c, hooks)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
@ -174,7 +175,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
p.Wait()
return execdriver.ExitStatus{ExitCode: -1}, err
}
hooks.Start(&c.ProcessConfig, pid, oom)
hooks.Start(ctx, &c.ProcessConfig, pid, oom)
}
waitF := p.Wait

View file

@ -8,6 +8,7 @@ import (
"os/exec"
"syscall"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/opencontainers/runc/libcontainer"
// Blank import 'nsenter' so that init in that package will call c
@ -19,7 +20,7 @@ import (
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
@ -57,7 +58,7 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, pid, chOOM)
hooks.Start(ctx, &c.ProcessConfig, pid, chOOM)
}
ps, err := p.Wait()

View file

@ -7,12 +7,13 @@ import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/microsoft/hcsshim"
)
// Exec implements the exec driver Driver interface.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
func (d *Driver) Exec(ctx context.Context, c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
var (
term execdriver.Terminal
@ -74,7 +75,7 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
}
if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil {

View file

@ -15,6 +15,7 @@ import (
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/microsoft/hcsshim"
)
@ -79,7 +80,7 @@ type containerInit struct {
const defaultOwner = "docker"
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
func (d *Driver) Run(ctx context.Context, c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
@ -298,7 +299,7 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
// non-blocking and return the correct result when read.
chOOM := make(chan struct{})
close(chOOM)
hooks.Start(&c.ProcessConfig, int(pid), chOOM)
hooks.Start(ctx, &c.ProcessConfig, int(pid), chOOM)
}
var exitCode int32

View file

@ -3,18 +3,19 @@ package daemon
import (
"io"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
// ContainerExport writes the contents of the container to the given
// writer. An error is returned if the container cannot be found.
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
data, err := container.export()
data, err := container.export(ctx)
if err != nil {
return derr.ErrorCodeExportFailed.WithArgs(name, err)
}

View file

@ -5,6 +5,7 @@ import (
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/graph/tags"
"github.com/docker/docker/image"
@ -50,10 +51,10 @@ import (
// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph
// package. This would require that we no longer need the daemon to determine
// whether images are being used by a stopped or running container.
func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
func (daemon *Daemon) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDelete, error) {
records := []types.ImageDelete{}
img, err := daemon.Repositories().LookupImage(imageRef)
img, err := daemon.Repositories(ctx).LookupImage(imageRef)
if err != nil {
return nil, err
}
@ -64,8 +65,8 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
// first. We can only remove this reference if either force is
// true, there are multiple repository references to this
// image, or there are no containers using the given reference.
if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) {
if container := daemon.getContainerUsingImage(img.ID); container != nil {
if !(force || daemon.imageHasMultipleRepositoryReferences(ctx, img.ID)) {
if container := daemon.getContainerUsingImage(ctx, img.ID); container != nil {
// If we removed the repository reference then
// this image would remain "dangling" and since
// we really want to avoid that the client must
@ -74,14 +75,14 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
}
}
parsedRef, err := daemon.removeImageRef(imageRef)
parsedRef, err := daemon.removeImageRef(ctx, imageRef)
if err != nil {
return nil, err
}
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
daemon.EventsService.Log("untag", img.ID, "")
daemon.EventsService.Log(ctx, "untag", img.ID, "")
records = append(records, untaggedRecord)
removedRepositoryRef = true
@ -90,21 +91,21 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
// repository reference to the image then we will want to
// remove that reference.
// FIXME: Is this the behavior we want?
repoRefs := daemon.Repositories().ByID()[img.ID]
repoRefs := daemon.Repositories(ctx).ByID()[img.ID]
if len(repoRefs) == 1 {
parsedRef, err := daemon.removeImageRef(repoRefs[0])
parsedRef, err := daemon.removeImageRef(ctx, repoRefs[0])
if err != nil {
return nil, err
}
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
daemon.EventsService.Log("untag", img.ID, "")
daemon.EventsService.Log(ctx, "untag", img.ID, "")
records = append(records, untaggedRecord)
}
}
return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef)
return records, daemon.imageDeleteHelper(ctx, img, &records, force, prune, removedRepositoryRef)
}
// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the
@ -115,14 +116,14 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
// imageHasMultipleRepositoryReferences returns whether there are multiple
// repository references to the given imageID.
func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool {
return len(daemon.Repositories().ByID()[imageID]) > 1
func (daemon *Daemon) imageHasMultipleRepositoryReferences(ctx context.Context, imageID string) bool {
return len(daemon.Repositories(ctx).ByID()[imageID]) > 1
}
// getContainerUsingImage returns a container that was created using the given
// imageID. Returns nil if there is no such container.
func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
for _, container := range daemon.List() {
func (daemon *Daemon) getContainerUsingImage(ctx context.Context, imageID string) *Container {
for _, container := range daemon.List(ctx) {
if container.ImageID == imageID {
return container
}
@ -136,7 +137,7 @@ func (daemon *Daemon) getContainerUsingImage(imageID string) *Container {
// repositoryRef must not be an image ID but a repository name followed by an
// optional tag or digest reference. If tag or digest is omitted, the default
// tag is used. Returns the resolved image reference and an error.
func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
func (daemon *Daemon) removeImageRef(ctx context.Context, repositoryRef string) (string, error) {
repository, ref := parsers.ParseRepositoryTag(repositoryRef)
if ref == "" {
ref = tags.DefaultTag
@ -145,7 +146,7 @@ func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
// Ignore the boolean value returned, as far as we're concerned, this
// is an idempotent operation and it's okay if the reference didn't
// exist in the first place.
_, err := daemon.Repositories().Delete(repository, ref)
_, err := daemon.Repositories(ctx).Delete(repository, ref)
return utils.ImageReference(repository, ref), err
}
@ -155,18 +156,18 @@ func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) {
// on the first encountered error. Removed references are logged to this
// daemon's event service. An "Untagged" types.ImageDelete is added to the
// given list of records.
func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error {
imageRefs := daemon.Repositories().ByID()[imgID]
func (daemon *Daemon) removeAllReferencesToImageID(ctx context.Context, imgID string, records *[]types.ImageDelete) error {
imageRefs := daemon.Repositories(ctx).ByID()[imgID]
for _, imageRef := range imageRefs {
parsedRef, err := daemon.removeImageRef(imageRef)
parsedRef, err := daemon.removeImageRef(ctx, imageRef)
if err != nil {
return err
}
untaggedRecord := types.ImageDelete{Untagged: parsedRef}
daemon.EventsService.Log("untag", imgID, "")
daemon.EventsService.Log(ctx, "untag", imgID, "")
*records = append(*records, untaggedRecord)
}
@ -203,11 +204,11 @@ func (idc *imageDeleteConflict) Error() string {
// conflict is encountered, it will be returned immediately without deleting
// the image. If quiet is true, any encountered conflicts will be ignored and
// the function will return nil immediately without deleting the image.
func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
func (daemon *Daemon) imageDeleteHelper(ctx context.Context, img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error {
// First, determine if this image has any conflicts. Ignore soft conflicts
// if force is true.
if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil {
if quiet && !daemon.imageIsDangling(img) {
if conflict := daemon.checkImageDeleteConflict(ctx, img, force); conflict != nil {
if quiet && !daemon.imageIsDangling(ctx, img) {
// Ignore conflicts UNLESS the image is "dangling" in
// which case we want the user to know.
return nil
@ -219,15 +220,15 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image
}
// Delete all repository tag/digest references to this image.
if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil {
if err := daemon.removeAllReferencesToImageID(ctx, img.ID, records); err != nil {
return err
}
if err := daemon.Graph().Delete(img.ID); err != nil {
if err := daemon.Graph(ctx).Delete(img.ID); err != nil {
return err
}
daemon.EventsService.Log("delete", img.ID, "")
daemon.EventsService.Log(ctx, "delete", img.ID, "")
*records = append(*records, types.ImageDelete{Deleted: img.ID})
if !prune || img.Parent == "" {
@ -237,14 +238,14 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image
// We need to prune the parent image. This means delete it if there are
// no tags/digests referencing it and there are no containers using it (
// either running or stopped).
parentImg, err := daemon.Graph().Get(img.Parent)
parentImg, err := daemon.Graph(ctx).Get(img.Parent)
if err != nil {
return derr.ErrorCodeImgNoParent.WithArgs(err)
}
// Do not force prunings, but do so quietly (stopping on any encountered
// conflicts).
return daemon.imageDeleteHelper(parentImg, records, false, true, true)
return daemon.imageDeleteHelper(ctx, parentImg, records, false, true, true)
}
// checkImageDeleteConflict determines whether there are any conflicts
@ -253,9 +254,9 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image
// using the image. A soft conflict is any tags/digest referencing the given
// image or any stopped container using the image. If ignoreSoftConflicts is
// true, this function will not check for soft conflict conditions.
func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
func (daemon *Daemon) checkImageDeleteConflict(ctx context.Context, img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict {
// Check for hard conflicts first.
if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil {
if conflict := daemon.checkImageDeleteHardConflict(ctx, img); conflict != nil {
return conflict
}
@ -265,12 +266,12 @@ func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConfl
return nil
}
return daemon.checkImageDeleteSoftConflict(img)
return daemon.checkImageDeleteSoftConflict(ctx, img)
}
func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict {
func (daemon *Daemon) checkImageDeleteHardConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
// Check if the image ID is being used by a pull or build.
if daemon.Graph().IsHeld(img.ID) {
if daemon.Graph(ctx).IsHeld(img.ID) {
return &imageDeleteConflict{
hard: true,
imgID: img.ID,
@ -279,7 +280,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet
}
// Check if the image has any descendent images.
if daemon.Graph().HasChildren(img) {
if daemon.Graph(ctx).HasChildren(img) {
return &imageDeleteConflict{
hard: true,
imgID: img.ID,
@ -288,7 +289,7 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet
}
// Check if any running container is using the image.
for _, container := range daemon.List() {
for _, container := range daemon.List(ctx) {
if !container.IsRunning() {
// Skip this until we check for soft conflicts later.
continue
@ -306,9 +307,9 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet
return nil
}
func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict {
func (daemon *Daemon) checkImageDeleteSoftConflict(ctx context.Context, img *image.Image) *imageDeleteConflict {
// Check if any repository tags/digest reference this image.
if daemon.Repositories().HasReferences(img) {
if daemon.Repositories(ctx).HasReferences(img) {
return &imageDeleteConflict{
imgID: img.ID,
message: "image is referenced in one or more repositories",
@ -316,7 +317,7 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet
}
// Check if any stopped containers reference this image.
for _, container := range daemon.List() {
for _, container := range daemon.List(ctx) {
if container.IsRunning() {
// Skip this as it was checked above in hard conflict conditions.
continue
@ -336,6 +337,6 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet
// imageIsDangling returns whether the given image is "dangling" which means
// that there are no repository references to the given image and it has no
// child images.
func (daemon *Daemon) imageIsDangling(img *image.Image) bool {
return !(daemon.Repositories().HasReferences(img) || daemon.Graph().HasChildren(img))
func (daemon *Daemon) imageIsDangling(ctx context.Context, img *image.Image) bool {
return !(daemon.Repositories(ctx).HasReferences(img) || daemon.Graph(ctx).HasChildren(img))
}

View file

@ -8,6 +8,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
@ -18,8 +19,8 @@ import (
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() (*types.Info, error) {
images := daemon.Graph().Map()
func (daemon *Daemon) SystemInfo(ctx context.Context) (*types.Info, error) {
images := daemon.Graph(ctx).Map()
var imgcount int
if images == nil {
imgcount = 0
@ -65,10 +66,10 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
v := &types.Info{
ID: daemon.ID,
Containers: len(daemon.List()),
Containers: len(daemon.List(ctx)),
Images: imgcount,
Driver: daemon.GraphDriver().String(),
DriverStatus: daemon.GraphDriver().Status(),
Driver: daemon.GraphDriver(ctx).String(),
DriverStatus: daemon.GraphDriver(ctx).Status(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNfCallIptablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNfCallIP6tablesDisabled,
@ -76,7 +77,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
ExecutionDriver: daemon.ExecutionDriver().Name(),
ExecutionDriver: daemon.ExecutionDriver(ctx).Name(),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion,

View file

@ -5,13 +5,14 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
)
// ContainerInspect returns low-level information about a
// container. Returns an error if the container cannot be found, or if
// there is an error getting the data.
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types.ContainerJSON, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
@ -19,7 +20,7 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
container.Lock()
defer container.Unlock()
base, err := daemon.getInspectData(container)
base, err := daemon.getInspectData(ctx, container)
if err != nil {
return nil, err
}
@ -30,8 +31,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
}
// ContainerInspect120 serializes the master version of a container into a json type.
func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*types.ContainerJSON120, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
@ -39,7 +40,7 @@ func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120,
container.Lock()
defer container.Unlock()
base, err := daemon.getInspectData(container)
base, err := daemon.getInspectData(ctx, container)
if err != nil {
return nil, err
}
@ -53,11 +54,11 @@ func (daemon *Daemon) ContainerInspect120(name string) (*types.ContainerJSON120,
return &types.ContainerJSON120{base, mountPoints, config}, nil
}
func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
func (daemon *Daemon) getInspectData(ctx context.Context, container *Container) (*types.ContainerJSONBase, error) {
// make a copy to play with
hostConfig := *container.hostConfig
if children, err := daemon.children(container.Name); err == nil {
if children, err := daemon.children(ctx, container.Name); err == nil {
for linkAlias, child := range children {
hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
}
@ -120,7 +121,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
// ContainerExecInspect returns low-level information about the exec
// command. An error is returned if the exec cannot be found.
func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*ExecConfig, error) {
eConfig, err := daemon.getExecConfig(id)
if err != nil {
return nil, err
@ -130,7 +131,7 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
// VolumeInspect looks up a volume by name. An error is returned if
// the volume cannot be found.
func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
func (daemon *Daemon) VolumeInspect(ctx context.Context, name string) (*types.Volume, error) {
v, err := daemon.volumes.Get(name)
if err != nil {
return nil, err

View file

@ -2,7 +2,10 @@
package daemon
import "github.com/docker/docker/api/types"
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
)
// This sets platform-specific fields
func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
@ -15,8 +18,8 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
}
// ContainerInspectPre120 gets containers for pre 1.20 APIs.
func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSONPre120, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
@ -24,7 +27,7 @@ func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONP
container.Lock()
defer container.Unlock()
base, err := daemon.getInspectData(container)
base, err := daemon.getInspectData(ctx, container)
if err != nil {
return nil, err
}

View file

@ -1,6 +1,9 @@
package daemon
import "github.com/docker/docker/api/types"
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
)
// This sets platform-specific fields
func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
@ -12,6 +15,6 @@ func addMountPoints(container *Container) []types.MountPoint {
}
// ContainerInspectPre120 get containers for pre 1.20 APIs.
func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSON, error) {
return daemon.ContainerInspect(name)
func (daemon *Daemon) ContainerInspectPre120(ctx context.Context, name string) (*types.ContainerJSON, error) {
return daemon.ContainerInspect(ctx, name)
}

View file

@ -1,25 +1,29 @@
package daemon
import "syscall"
import (
"syscall"
"github.com/docker/docker/context"
)
// ContainerKill send signal to the container
// If no signal is given (sig 0), then Kill with SIGKILL and wait
// for the container to exit.
// If a signal is given, then just send it to the container and return.
func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerKill(ctx context.Context, name string, sig uint64) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
if err := container.Kill(); err != nil {
if err := container.Kill(ctx); err != nil {
return err
}
} else {
// Otherwise, just send the requested signal
if err := container.killSig(int(sig)); err != nil {
if err := container.killSig(ctx, int(sig)); err != nil {
return err
}
}

View file

@ -8,6 +8,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/graphdb"
@ -20,7 +21,7 @@ type iterationAction int
// containerReducer represents a reducer for a container.
// Returns the object to serialize by the api.
type containerReducer func(*Container, *listContext) (*types.Container, error)
type containerReducer func(context.Context, *Container, *listContext) (*types.Container, error)
const (
// includeContainer is the action to include a container in the reducer.
@ -35,7 +36,7 @@ const (
var errStopIteration = errors.New("container list iteration stopped")
// List returns an array of all containers registered in the daemon.
func (daemon *Daemon) List() []*Container {
func (daemon *Daemon) List(ctx context.Context) []*Container {
return daemon.containers.List()
}
@ -79,21 +80,21 @@ type listContext struct {
}
// Containers returns the list of containers to show given the user's filtering.
func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
return daemon.reduceContainers(config, daemon.transformContainer)
func (daemon *Daemon) Containers(ctx context.Context, config *ContainersConfig) ([]*types.Container, error) {
return daemon.reduceContainers(ctx, config, daemon.transformContainer)
}
// reduceContainer parses the user filtering and generates the list of containers to return based on a reducer.
func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
func (daemon *Daemon) reduceContainers(ctx context.Context, config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) {
containers := []*types.Container{}
ctx, err := daemon.foldFilter(config)
fctx, err := daemon.foldFilter(ctx, config)
if err != nil {
return nil, err
}
for _, container := range daemon.List() {
t, err := daemon.reducePsContainer(container, ctx, reducer)
for _, container := range daemon.List(ctx) {
t, err := daemon.reducePsContainer(ctx, container, fctx, reducer)
if err != nil {
if err != errStopIteration {
return nil, err
@ -102,19 +103,19 @@ func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer contain
}
if t != nil {
containers = append(containers, t)
ctx.idx++
fctx.idx++
}
}
return containers, nil
}
// reducePsContainer is the basic representation for a container as expected by the ps command.
func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
func (daemon *Daemon) reducePsContainer(ctx context.Context, container *Container, lctx *listContext, reducer containerReducer) (*types.Container, error) {
container.Lock()
defer container.Unlock()
// filter containers to return
action := includeContainerInList(container, ctx)
action := includeContainerInList(container, lctx)
switch action {
case excludeContainer:
return nil, nil
@ -123,11 +124,11 @@ func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext,
}
// transform internal container struct into api structs
return reducer(container, ctx)
return reducer(ctx, container, lctx)
}
// foldFilter generates the container filter based in the user's filtering options.
func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) {
func (daemon *Daemon) foldFilter(ctx context.Context, config *ContainersConfig) (*listContext, error) {
psFilters, err := filters.FromParam(config.Filters)
if err != nil {
return nil, err
@ -159,11 +160,11 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error)
var ancestorFilter bool
if ancestors, ok := psFilters["ancestor"]; ok {
ancestorFilter = true
byParents := daemon.Graph().ByParent()
byParents := daemon.Graph(ctx).ByParent()
// The idea is to walk the graph down the most "efficient" way.
for _, ancestor := range ancestors {
// First, get the imageId of the ancestor filter (yay)
image, err := daemon.Repositories().LookupImage(ancestor)
image, err := daemon.Repositories(ctx).LookupImage(ancestor)
if err != nil {
logrus.Warnf("Error while looking up for image %v", ancestor)
continue
@ -185,14 +186,14 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error)
var beforeCont, sinceCont *Container
if config.Before != "" {
beforeCont, err = daemon.Get(config.Before)
beforeCont, err = daemon.Get(ctx, config.Before)
if err != nil {
return nil, err
}
}
if config.Since != "" {
sinceCont, err = daemon.Get(config.Since)
sinceCont, err = daemon.Get(ctx, config.Since)
if err != nil {
return nil, err
}
@ -286,13 +287,13 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct
}
// transformContainer generates the container type expected by the docker ps command.
func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
func (daemon *Daemon) transformContainer(ctx context.Context, container *Container, lctx *listContext) (*types.Container, error) {
newC := &types.Container{
ID: container.ID,
Names: ctx.names[container.ID],
Names: lctx.names[container.ID],
}
img, err := daemon.Repositories().LookupImage(container.Config.Image)
img, err := daemon.Repositories(ctx).LookupImage(container.Config.Image)
if err != nil {
// If the image can no longer be found by its original reference,
// it makes sense to show the ID instead of a stale reference.
@ -349,8 +350,8 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext)
}
}
if ctx.Size {
sizeRw, sizeRootFs := container.getSize()
if lctx.Size {
sizeRw, sizeRootFs := container.getSize(ctx)
newC.SizeRw = sizeRw
newC.SizeRootFs = sizeRootFs
}
@ -361,7 +362,7 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext)
// Volumes lists known volumes, using the filter to restrict the range
// of volumes returned.
func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
func (daemon *Daemon) Volumes(ctx context.Context, filter string) ([]*types.Volume, error) {
var volumesOut []*types.Volume
volFilters, err := filters.FromParam(filter)
if err != nil {

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/logger"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/stdcopy"
@ -30,7 +31,7 @@ type ContainerLogsConfig struct {
// ContainerLogs hooks up a container's stdout and stderr streams
// configured with the given struct.
func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
func (daemon *Daemon) ContainerLogs(ctx context.Context, container *Container, config *ContainerLogsConfig) error {
if !(config.UseStdout || config.UseStderr) {
return derr.ErrorCodeNeedStream
}

View file

@ -7,6 +7,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
@ -84,9 +85,9 @@ func (m *containerMonitor) ExitOnNext() {
// Close closes the container's resources such as networking allocations and
// unmounts the contatiner's root filesystem
func (m *containerMonitor) Close() error {
func (m *containerMonitor) Close(ctx context.Context) error {
// Cleanup networking and mounts
m.container.cleanup()
m.container.cleanup(ctx)
// FIXME: here is race condition between two RUN instructions in Dockerfile
// because they share same runconfig and change image. Must be fixed
@ -101,7 +102,7 @@ func (m *containerMonitor) Close() error {
}
// Start starts the containers process and monitors it according to the restart policy
func (m *containerMonitor) Start() error {
func (m *containerMonitor) Start(ctx context.Context) error {
var (
err error
exitStatus execdriver.ExitStatus
@ -117,7 +118,7 @@ func (m *containerMonitor) Start() error {
m.container.setStopped(&exitStatus)
defer m.container.Unlock()
}
m.Close()
m.Close(ctx)
}()
// reset stopped flag
if m.container.HasBeenManuallyStopped {
@ -138,11 +139,11 @@ func (m *containerMonitor) Start() error {
pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
m.container.logEvent("start")
m.container.logEvent(ctx, "start")
m.lastStartTime = time.Now()
if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
if exitStatus, err = m.container.daemon.run(ctx, m.container, pipes, m.callback); err != nil {
// if we receive an internal error from the initial start of a container then lets
// return it instead of entering the restart loop
if m.container.RestartCount == 0 {
@ -162,7 +163,7 @@ func (m *containerMonitor) Start() error {
if m.shouldRestart(exitStatus.ExitCode) {
m.container.setRestarting(&exitStatus)
m.container.logEvent("die")
m.container.logEvent(ctx, "die")
m.resetContainer(true)
// sleep with a small time increment between each restart to help avoid issues cased by quickly
@ -177,7 +178,7 @@ func (m *containerMonitor) Start() error {
continue
}
m.container.logEvent("die")
m.container.logEvent(ctx, "die")
m.resetContainer(true)
return err
}
@ -245,11 +246,11 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
// callback ensures that the container's state is properly updated after we
// received ack from the execution drivers
func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
func (m *containerMonitor) callback(ctx context.Context, processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
go func() {
_, ok := <-chOOM
if ok {
m.container.logEvent("oom")
m.container.logEvent(ctx, "oom")
}
}()

View file

@ -1,17 +1,18 @@
package daemon
import (
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
// ContainerPause pauses a container
func (daemon *Daemon) ContainerPause(name string) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerPause(ctx context.Context, name string) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
if err := container.pause(); err != nil {
if err := container.pause(ctx); err != nil {
return derr.ErrorCodePauseError.WithArgs(name, err)
}

View file

@ -1,18 +1,19 @@
package daemon
import (
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
// ContainerRename changes the name of a container, using the oldName
// to find the container. An error is returned if newName is already
// reserved.
func (daemon *Daemon) ContainerRename(oldName, newName string) error {
func (daemon *Daemon) ContainerRename(ctx context.Context, oldName, newName string) error {
if oldName == "" || newName == "" {
return derr.ErrorCodeEmptyRename
}
container, err := daemon.Get(oldName)
container, err := daemon.Get(ctx, oldName)
if err != nil {
return err
}
@ -21,7 +22,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
container.Lock()
defer container.Unlock()
if newName, err = daemon.reserveName(container.ID, newName); err != nil {
if newName, err = daemon.reserveName(ctx, container.ID, newName); err != nil {
return derr.ErrorCodeRenameTaken.WithArgs(err)
}
@ -29,7 +30,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
undo := func() {
container.Name = oldName
daemon.reserveName(container.ID, oldName)
daemon.reserveName(ctx, container.ID, oldName)
daemon.containerGraphDB.Delete(newName)
}
@ -43,6 +44,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
return err
}
container.logEvent("rename")
container.logEvent(ctx, "rename")
return nil
}

View file

@ -1,20 +1,24 @@
package daemon
import (
"github.com/docker/docker/context"
)
// ContainerResize changes the size of the TTY of the process running
// in the container with the given name to the given height and width.
func (daemon *Daemon) ContainerResize(name string, height, width int) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerResize(ctx context.Context, name string, height, width int) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
return container.Resize(height, width)
return container.Resize(ctx, height, width)
}
// ContainerExecResize changes the size of the TTY of the process
// running in the exec with the given name to the given height and
// width.
func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
func (daemon *Daemon) ContainerExecResize(ctx context.Context, name string, height, width int) error {
ExecConfig, err := daemon.getExecConfig(name)
if err != nil {
return err

View file

@ -1,6 +1,7 @@
package daemon
import (
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
@ -10,12 +11,12 @@ import (
// timeout, ContainerRestart will wait forever until a graceful
// stop. Returns an error if the container cannot be found, or if
// there is an underlying error at any stage of the restart.
func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, seconds int) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
if err := container.Restart(seconds); err != nil {
if err := container.Restart(ctx, seconds); err != nil {
return derr.ErrorCodeCantRestart.WithArgs(name, err)
}
return nil

View file

@ -3,14 +3,15 @@ package daemon
import (
"runtime"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *runconfig.HostConfig) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
@ -28,7 +29,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
// This is kept for backward compatibility - hostconfig should be passed when
// creating a container, not during start.
if hostConfig != nil {
if err := daemon.setHostConfig(container, hostConfig); err != nil {
if err := daemon.setHostConfig(ctx, container, hostConfig); err != nil {
return err
}
}
@ -40,11 +41,11 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
// check if hostConfig is in line with the current system settings.
// It may happen cgroups are umounted or the like.
if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
if _, err = daemon.verifyContainerSettings(ctx, container.hostConfig, nil); err != nil {
return err
}
if err := container.Start(); err != nil {
if err := container.Start(ctx); err != nil {
return derr.ErrorCodeCantStart.WithArgs(name, utils.GetErrorMessage(err))
}

View file

@ -5,6 +5,7 @@ import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/version"
"github.com/docker/libnetwork/osl"
@ -22,9 +23,9 @@ type ContainerStatsConfig struct {
// ContainerStats writes information about the container to the stream
// given in the config object.
func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error {
func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *ContainerStatsConfig) error {
container, err := daemon.Get(prefixOrName)
container, err := daemon.Get(ctx, prefixOrName)
if err != nil {
return err
}

View file

@ -1,6 +1,7 @@
package daemon
import (
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
@ -10,15 +11,15 @@ import (
// will wait for a graceful termination. An error is returned if the
// container is not found, is already stopped, or if there is a
// problem stopping the container.
func (daemon *Daemon) ContainerStop(name string, seconds int) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerStop(ctx context.Context, name string, seconds int) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
if !container.IsRunning() {
return derr.ErrorCodeStopped
}
if err := container.Stop(seconds); err != nil {
if err := container.Stop(ctx, seconds); err != nil {
return derr.ErrorCodeCantStop.WithArgs(name, err)
}
return nil

View file

@ -8,6 +8,7 @@ import (
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
@ -16,12 +17,12 @@ import (
// "-ef" if no args are given. An error is returned if the container
// is not found, or is not running, or if there are any problems
// running ps, or parsing the output.
func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
if psArgs == "" {
psArgs = "-ef"
}
container, err := daemon.Get(name)
container, err := daemon.Get(ctx, name)
if err != nil {
return nil, err
}
@ -30,7 +31,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
return nil, derr.ErrorCodeNotRunning.WithArgs(name)
}
pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
pids, err := daemon.ExecutionDriver(ctx).GetPidsForContainer(container.ID)
if err != nil {
return nil, err
}
@ -76,6 +77,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
}
}
}
container.logEvent("top")
container.logEvent(ctx, "top")
return procList, nil
}

View file

@ -2,10 +2,11 @@ package daemon
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
// ContainerTop is not supported on Windows and returns an error.
func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
func (daemon *Daemon) ContainerTop(ctx context.Context, name string, psArgs string) (*types.ContainerProcessList, error) {
return nil, derr.ErrorCodeNoTop
}

View file

@ -1,17 +1,18 @@
package daemon
import (
"github.com/docker/docker/context"
derr "github.com/docker/docker/errors"
)
// ContainerUnpause unpauses a container
func (daemon *Daemon) ContainerUnpause(name string) error {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerUnpause(ctx context.Context, name string) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
if err := container.unpause(); err != nil {
if err := container.unpause(ctx); err != nil {
return derr.ErrorCodeCantUnpause.WithArgs(name, err)
}

View file

@ -10,6 +10,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/system"
@ -285,7 +286,7 @@ func parseVolumesFrom(spec string) (string, string, error) {
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
binds := map[string]bool{}
mountPoints := map[string]*mountPoint{}
@ -301,7 +302,7 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc
return err
}
c, err := daemon.Get(containerID)
c, err := daemon.Get(ctx, containerID)
if err != nil {
return err
}

View file

@ -3,6 +3,7 @@
package daemon
import (
"github.com/docker/docker/context"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/runconfig"
)
@ -31,6 +32,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
// registerMountPoints initializes the container mount points with the
// configured volumes and bind mounts. Windows does not support volumes or
// mount points.
func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
func (daemon *Daemon) registerMountPoints(ctx context.Context, container *Container, hostConfig *runconfig.HostConfig) error {
return nil
}

View file

@ -1,14 +1,18 @@
package daemon
import "time"
import (
"time"
"github.com/docker/docker/context"
)
// ContainerWait stops processing until the given container is
// stopped. If the container is not found, an error is returned. On a
// successful stop, the exit code of the container is returned. On a
// timeout, an error is returned. If you want to wait forever, supply
// a negative duration for the timeout.
func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
container, err := daemon.Get(name)
func (daemon *Daemon) ContainerWait(ctx context.Context, name string, timeout time.Duration) (int, error) {
container, err := daemon.Get(ctx, name)
if err != nil {
return -1, err
}

View file

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/cli"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/opts"
@ -150,6 +151,11 @@ func getGlobalFlag() (globalFlag *flag.Flag) {
// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`.
func (cli *DaemonCli) CmdDaemon(args ...string) error {
// This may need to be made even more global - it all depends
// on whether we want the CLI to have a context object too.
// For now we'll leave it as a daemon-side object only.
ctx := context.Background()
// warn from uuid package when running the daemon
uuid.Loggerf = logrus.Warnf
@ -224,7 +230,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
serverConfig.TLSConfig = tlsConfig
}
api := apiserver.New(serverConfig)
api := apiserver.New(ctx, serverConfig)
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
@ -245,7 +251,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
cli.TrustKeyPath = commonFlags.TrustKey
registryService := registry.NewService(cli.registryOptions)
d, err := daemon.NewDaemon(cli.Config, registryService)
d, err := daemon.NewDaemon(ctx, cli.Config, registryService)
if err != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
@ -260,14 +266,14 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
logrus.WithFields(logrus.Fields{
"version": dockerversion.VERSION,
"commit": dockerversion.GITCOMMIT,
"execdriver": d.ExecutionDriver().Name(),
"graphdriver": d.GraphDriver().String(),
"execdriver": d.ExecutionDriver(ctx).Name(),
"graphdriver": d.GraphDriver(ctx).String(),
}).Info("Docker daemon")
signal.Trap(func() {
api.Close()
<-serveAPIWait
shutdownDaemon(d, 15)
shutdownDaemon(ctx, d, 15)
if pfile != nil {
if err := pfile.Remove(); err != nil {
logrus.Error(err)
@ -277,12 +283,12 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
// after the daemon is done setting up we can tell the api to start
// accepting connections with specified daemon
api.AcceptConnections(d)
api.AcceptConnections(ctx, d)
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
shutdownDaemon(d, 15)
shutdownDaemon(ctx, d, 15)
if errAPI != nil {
if pfile != nil {
if err := pfile.Remove(); err != nil {
@ -297,10 +303,10 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
func shutdownDaemon(ctx context.Context, d *daemon.Daemon, timeout time.Duration) {
ch := make(chan struct{})
go func() {
d.Shutdown()
d.Shutdown(ctx)
close(ch)
}()
select {

View file

@ -5,6 +5,7 @@ import (
"net/http"
"net/url"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/httputils"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter"
@ -16,7 +17,7 @@ import (
// inConfig (if src is "-"), or from a URI specified in src. Progress output is
// written to outStream. Repository and tag names can optionally be given in
// the repo and tag arguments, respectively.
func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
func (s *TagStore) Import(ctx context.Context, src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
var (
sf = streamformatter.NewJSONStreamFormatter()
archive io.ReadCloser
@ -74,6 +75,6 @@ func (s *TagStore) Import(src string, repo string, tag string, msg string, inCon
logID = utils.ImageReference(logID, tag)
}
s.eventsService.Log("import", logID, "")
s.eventsService.Log(ctx, "import", logID, "")
return nil
}

View file

@ -6,6 +6,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
@ -62,7 +63,7 @@ func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.Re
// Pull initiates a pull operation. image is the repository name to pull, and
// tag may be either empty, or indicate a specific tag to pull.
func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
func (s *TagStore) Pull(ctx context.Context, image string, tag string, imagePullConfig *ImagePullConfig) error {
var sf = streamformatter.NewJSONStreamFormatter()
// Resolve the Repository name from fqn to RepositoryInfo
@ -131,7 +132,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
}
s.eventsService.Log("pull", logName, "")
s.eventsService.Log(ctx, "pull", logName, "")
return nil
}

View file

@ -7,6 +7,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/context"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/registry"
)
@ -67,7 +68,7 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository
}
// Push initiates a push operation on the repository named localName.
func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
func (s *TagStore) Push(ctx context.Context, localName string, imagePushConfig *ImagePushConfig) error {
// FIXME: Allow to interrupt current push when new push of same image is done.
var sf = streamformatter.NewJSONStreamFormatter()
@ -115,7 +116,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro
}
s.eventsService.Log("push", repoInfo.LocalName, "")
s.eventsService.Log(ctx, "push", repoInfo.LocalName, "")
return nil
}

View file

@ -410,7 +410,7 @@ func (s *DockerSuite) TestEventsFilterContainer(c *check.C) {
}
// Check the id
parsedID := strings.TrimSuffix(e[1], ":")
parsedID := strings.TrimSuffix(e[3], ":")
if parsedID != id {
return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID)
}
@ -686,3 +686,78 @@ func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) {
c.Fatalf("Missing 'push' log event for image %s\n%s", repoName, out)
}
}
func (s *DockerSuite) TestEventsReqID(c *check.C) {
// Tests for the "[reqid: xxx]" field in Events
testRequires(c, DaemonIsLinux)
reqIDMatch := `[^ ]+ \[reqid: ([0-9a-z]{12})\] [0-9a-z]+: `
reqIDRE := regexp.MustCompile(reqIDMatch)
// Simple test just to make sure it works at all
dockerCmd(c, "create", "busybox", "true")
out, _ := dockerCmd(c, "events", "--since=0", "--until=0s")
events := strings.Split(strings.TrimSpace(out), "\n")
if len(events) == 0 {
c.Fatalf("Wrong # of events, should just be one, got:\n%v\n", events)
}
createEvent := events[len(events)-1]
matched, err := regexp.MatchString(reqIDMatch, createEvent)
if err != nil || !matched {
c.Fatalf("Error finding reqID in event: %v\n", createEvent)
}
reqID1 := reqIDRE.FindStringSubmatch(createEvent)[1]
// Now make sure another cmd doesn't get the same reqID
dockerCmd(c, "create", "busybox", "true")
out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
events = strings.Split(strings.TrimSpace(out), "\n")
createEvent = events[len(events)-1]
matched, err = regexp.MatchString(reqIDMatch, createEvent)
if err != nil || !matched {
c.Fatalf("Error finding reqID in event: %v\n", createEvent)
}
reqID2 := reqIDRE.FindStringSubmatch(createEvent)[1]
if reqID1 == reqID2 {
c.Fatalf("Should not have the same reqID(%s):\n%v\n", reqID1, createEvent)
}
// Now make sure a build **does** use the same reqID for all
// 4 events that are generated
_, err = buildImage("reqidimg", `
FROM busybox
RUN echo HI`, true)
if err != nil {
c.Fatalf("Couldn't create image: %q", err)
}
out, _ = dockerCmd(c, "events", "--since=0", "--until=0s")
events = strings.Split(strings.TrimSpace(out), "\n")
// Get last event's reqID - will use it to find other matching events
lastEvent := events[len(events)-1]
reqID := reqIDRE.FindStringSubmatch(lastEvent)[1]
// Find all events with this same reqID
eventList := []string{lastEvent}
for i := len(events) - 2; i >= 0; i-- {
tmpID := reqIDRE.FindStringSubmatch(events[i])[1]
if tmpID != reqID {
break
}
eventList = append(eventList, events[i])
}
if len(eventList) != 5 { // create, start, die, commit, destroy
c.Fatalf("Wrong # of matching events - should be 5:\n%q\n", eventList)
}
}

View file

@ -92,6 +92,7 @@ func (p *JSONProgress) String() string {
// the created time, where it from, status, ID of the
// message. It's used for docker events.
type JSONMessage struct {
RequestID string `json:"reqid,omitempty"`
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
@ -127,6 +128,9 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
} else if jm.Time != 0 {
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
}
if jm.RequestID != "" {
fmt.Fprintf(out, "[reqid: %s] ", jm.RequestID)
}
if jm.ID != "" {
fmt.Fprintf(out, "%s: ", jm.ID)
}