瀏覽代碼

Adding Exec method to native execdriver.
Modified Attach() method to support docker exec.

Docker-DCO-1.1-Signed-off-by: Vishnu Kannan <vishnuk@google.com> (github: vishh)

Vishnu Kannan 10 年之前
父節點
當前提交
f3c767d798

+ 1 - 1
builder/internals.go

@@ -407,7 +407,7 @@ func (b *Builder) run(c *daemon.Container) error {
 			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
 			// but without hijacking for stdin. Also, with attach there can be race
 			// condition because of some output already was printed before it.
-			return <-b.Daemon.Attach(c, nil, nil, b.OutStream, b.ErrStream)
+			return <-b.Daemon.Attach(c, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
 		})
 	}
 

+ 20 - 12
daemon/attach.go

@@ -8,9 +8,9 @@ import (
 	"time"
 
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/jsonlog"
 	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/utils"
 )
 
@@ -103,7 +103,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 			cStderr = job.Stderr
 		}
 
-		<-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
+		<-daemon.Attach(container, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr)
 		// If we are in stdinonce mode, wait for the process to end
 		// otherwise, simply return
 		if container.Config.StdinOnce && !container.Config.Tty {
@@ -119,15 +119,17 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 // Attach and ContainerAttach.
 //
 // This method is in use by builder/builder.go.
-func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
+func (daemon *Daemon) Attach(container *Container, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
 		nJobs            int
 		errors           = make(chan error, 3)
 	)
 
-	if stdin != nil && container.Config.OpenStdin {
-		nJobs++
+	// Connect stdin of container to the http conn.
+	if stdin != nil && openStdin {
+		nJobs += 1
+		// Get the stdin pipe.
 		if cStdin, err := container.StdinPipe(); err != nil {
 			errors <- err
 		} else {
@@ -135,7 +137,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 				log.Debugf("attach: stdin: begin")
 				defer log.Debugf("attach: stdin: end")
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
-				if container.Config.StdinOnce && !container.Config.Tty {
+				if stdinOnce && !tty {
 					defer cStdin.Close()
 				} else {
 					defer func() {
@@ -147,10 +149,11 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 						}
 					}()
 				}
-				if container.Config.Tty {
+				if tty {
 					_, err = utils.CopyEscapable(cStdin, stdin)
 				} else {
 					_, err = io.Copy(cStdin, stdin)
+
 				}
 				if err == io.ErrClosedPipe {
 					err = nil
@@ -163,7 +166,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 		}
 	}
 	if stdout != nil {
-		nJobs++
+		nJobs += 1
+		// Get a reader end of a pipe that is attached as stdout to the container.
 		if p, err := container.StdoutPipe(); err != nil {
 			errors <- err
 		} else {
@@ -172,7 +176,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 				log.Debugf("attach: stdout: begin")
 				defer log.Debugf("attach: stdout: end")
 				// If we are in StdinOnce mode, then close stdin
-				if container.Config.StdinOnce && stdin != nil {
+				if stdinOnce && stdin != nil {
 					defer stdin.Close()
 				}
 				if stdinCloser != nil {
@@ -189,6 +193,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 			}()
 		}
 	} else {
+		// Point stdout of container to a no-op writer.
 		go func() {
 			if stdinCloser != nil {
 				defer stdinCloser.Close()
@@ -201,7 +206,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 		}()
 	}
 	if stderr != nil {
-		nJobs++
+		nJobs += 1
 		if p, err := container.StderrPipe(); err != nil {
 			errors <- err
 		} else {
@@ -210,7 +215,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 				log.Debugf("attach: stderr: begin")
 				defer log.Debugf("attach: stderr: end")
 				// If we are in StdinOnce mode, then close stdin
-				if container.Config.StdinOnce && stdin != nil {
+				// Why are we closing stdin here and above while handling stdout?
+				if stdinOnce && stdin != nil {
 					defer stdin.Close()
 				}
 				if stdinCloser != nil {
@@ -223,10 +229,12 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 				if err != nil {
 					log.Errorf("attach: stderr: %s", err)
 				}
+				log.Debugf("stdout attach end")
 				errors <- err
 			}()
 		}
 	} else {
+		// Point stderr at a no-op writer.
 		go func() {
 			if stdinCloser != nil {
 				defer stdinCloser.Close()
@@ -252,7 +260,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
 
 		// FIXME: how to clean up the stdin goroutine without the unwanted side effect
 		// of closing the passed stdin? Add an intermediary io.Pipe?
-		for i := 0; i < nJobs; i++ {
+		for i := 0; i < nJobs; i += 1 {
 			log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
 			if err := <-errors; err != nil {
 				log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)

+ 2 - 0
daemon/execdriver/driver.go

@@ -42,6 +42,8 @@ type TtyTerminal interface {
 
 type Driver interface {
 	Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code
+	// Exec executes the process in an existing container, blocks until the process exits and returns the exit code
+	Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error)
 	Kill(c *Command, sig int) error
 	Pause(c *Command) error
 	Unpause(c *Command) error

+ 4 - 0
daemon/execdriver/lxc/driver.go

@@ -527,3 +527,7 @@ func (t *TtyConsole) Close() error {
 	t.SlavePty.Close()
 	return t.MasterPty.Close()
 }
+
+func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+	return -1, fmt.Errorf("Unsupported: Exec is not supported by the lxc driver")
+}

+ 68 - 0
daemon/execdriver/native/exec.go

@@ -0,0 +1,68 @@
+// +build linux
+
+package native
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/namespaces"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/reexec"	
+)
+
+const commandName = "nsenter-exec"
+
+func init() {
+	reexec.Register(commandName, nsenterExec)
+}
+
+func nsenterExec() {
+	runtime.LockOSThread()
+
+	userArgs := findUserArgs()
+
+	config, err := loadConfigFromFd()
+	if err != nil {
+		log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err)
+	}
+
+	if err := namespaces.FinalizeSetns(config, userArgs); err != nil {
+		log.Fatalf("docker-exec: failed to exec: %s", err)
+	}
+}
+
+func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+	active := d.activeContainers[c.ID]
+	if active == nil {
+		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
+	}
+	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
+	if err != nil {
+		return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err)
+	}
+
+	var term execdriver.Terminal
+
+	if processConfig.Tty {
+		term, err = NewTtyConsole(processConfig, pipes)
+	} else {
+		term, err = execdriver.NewStdConsole(processConfig, pipes)
+	}
+
+	processConfig.Terminal = term
+
+	args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...)
+
+	return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console, 
+		func(cmd *exec.Cmd) {
+			if startCallback != nil {
+				startCallback(&c.ProcessConfig, cmd.Process.Pid)
+			}
+		})
+}

+ 40 - 0
daemon/execdriver/native/utils.go

@@ -0,0 +1,40 @@
+// +build linux
+
+package native
+
+import (
+	"os"
+	
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/syncpipe"
+)
+
+func findUserArgs() []string {
+	i := 0
+	for _, a := range os.Args {
+		i++
+
+		if a == "--" {
+			break
+		}
+	}
+
+	return os.Args[i:]
+}
+
+// loadConfigFromFd loads a container's config from the sync pipe that is provided by
+// fd 3 when running a process
+func loadConfigFromFd() (*libcontainer.Config, error) {
+	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3)
+	if err != nil {
+		return nil, err
+	}
+
+	var config *libcontainer.Config
+	if err := syncPipe.ReadFromParent(&config); err != nil {
+		return nil, err
+	}
+
+	return config, nil
+}
+