Browse Source

Merge pull request #7379 from tiborvass/merge-7366

Cleanup: merge container-related jobs into daemon/ (Closes 7366)
Victor Vieux 11 years ago
parent
commit
867ffca84f
21 changed files with 1057 additions and 961 deletions
  1. 2 2
      api/server/server.go
  2. 2 2
      api/server/server_unit_test.go
  3. 32 0
      daemon/changes.go
  4. 84 0
      daemon/commit.go
  5. 33 0
      daemon/copy.go
  6. 86 0
      daemon/create.go
  7. 26 134
      daemon/daemon.go
  8. 181 0
      daemon/delete.go
  9. 30 0
      daemon/export.go
  10. 59 0
      daemon/kill.go
  11. 121 0
      daemon/list.go
  12. 133 0
      daemon/logs.go
  13. 29 0
      daemon/resize.go
  14. 27 0
      daemon/restart.go
  15. 68 0
      daemon/start.go
  16. 30 0
      daemon/stop.go
  17. 79 0
      daemon/top.go
  18. 20 0
      daemon/wait.go
  19. 1 1
      integration/server_test.go
  20. 0 793
      server/container.go
  21. 14 29
      server/init.go

+ 2 - 2
api/server/server.go

@@ -302,7 +302,7 @@ func getContainersChanges(eng *engine.Engine, version version.Version, w http.Re
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("changes", vars["name"])
+	var job = eng.Job("container_changes", vars["name"])
 	streamJSON(job, w, false)
 
 	return job.Run()
@@ -678,7 +678,7 @@ func deleteContainers(eng *engine.Engine, version version.Version, w http.Respon
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("container_delete", vars["name"])
+	job := eng.Job("delete", vars["name"])
 
 	if version.GreaterThanOrEqualTo("1.14") {
 		job.Setenv("stop", r.Form.Get("stop"))

+ 2 - 2
api/server/server_unit_test.go

@@ -455,7 +455,7 @@ func TestDeleteContainers(t *testing.T) {
 	eng := engine.New()
 	name := "foo"
 	var called bool
-	eng.Register("container_delete", func(job *engine.Job) engine.Status {
+	eng.Register("delete", func(job *engine.Job) engine.Status {
 		called = true
 		if len(job.Args) == 0 {
 			t.Fatalf("Job arguments is empty")
@@ -480,7 +480,7 @@ func TestDeleteContainersWithStopAndKill(t *testing.T) {
 	}
 	eng := engine.New()
 	var called bool
-	eng.Register("container_delete", func(job *engine.Job) engine.Status {
+	eng.Register("delete", func(job *engine.Job) engine.Status {
 		called = true
 		return engine.StatusOK
 	})

+ 32 - 0
daemon/changes.go

@@ -0,0 +1,32 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s CONTAINER", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		outs := engine.NewTable("", 0)
+		changes, err := container.Changes()
+		if err != nil {
+			return job.Error(err)
+		}
+		for _, change := range changes {
+			out := &engine.Env{}
+			if err := out.Import(change); err != nil {
+				return job.Error(err)
+			}
+			outs.Add(out)
+		}
+		if _, err := outs.WriteListTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}

+ 84 - 0
daemon/commit.go

@@ -0,0 +1,84 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	var (
+		config    = container.Config
+		newConfig runconfig.Config
+	)
+
+	if err := job.GetenvJson("config", &newConfig); err != nil {
+		return job.Error(err)
+	}
+
+	if err := runconfig.Merge(&newConfig, config); err != nil {
+		return job.Error(err)
+	}
+
+	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
+	if err != nil {
+		return job.Error(err)
+	}
+	job.Printf("%s\n", img.ID)
+	return engine.StatusOK
+}
+
+// Commit creates a new filesystem image from the current state of a container.
+// The image can optionally be tagged into a repository
+func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
+	if pause {
+		container.Pause()
+		defer container.Unpause()
+	}
+
+	if err := container.Mount(); err != nil {
+		return nil, err
+	}
+	defer container.Unmount()
+
+	rwTar, err := container.ExportRw()
+	if err != nil {
+		return nil, err
+	}
+	defer rwTar.Close()
+
+	// Create a new image from the container's base layers + a new layer from container changes
+	var (
+		containerID, containerImage string
+		containerConfig             *runconfig.Config
+	)
+
+	if container != nil {
+		containerID = container.ID
+		containerImage = container.Image
+		containerConfig = container.Config
+	}
+
+	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
+	if err != nil {
+		return nil, err
+	}
+
+	// Register the image if needed
+	if repository != "" {
+		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
+			return img, err
+		}
+	}
+	return img, nil
+}

+ 33 - 0
daemon/copy.go

@@ -0,0 +1,33 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 {
+		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
+	}
+
+	var (
+		name     = job.Args[0]
+		resource = job.Args[1]
+	)
+
+	if container := daemon.Get(name); container != nil {
+
+		data, err := container.Copy(resource)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer data.Close()
+
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 86 - 0
daemon/create.go

@@ -0,0 +1,86 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
+	var name string
+	if len(job.Args) == 1 {
+		name = job.Args[0]
+	} else if len(job.Args) > 1 {
+		return job.Errorf("Usage: %s", job.Name)
+	}
+	config := runconfig.ContainerConfigFromJob(job)
+	if config.Memory != 0 && config.Memory < 524288 {
+		return job.Errorf("Minimum memory limit allowed is 512k")
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
+		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		config.Memory = 0
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
+		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		config.MemorySwap = -1
+	}
+	container, buildWarnings, err := daemon.Create(config, name)
+	if err != nil {
+		if daemon.Graph().IsNotExist(err) {
+			_, tag := parsers.ParseRepositoryTag(config.Image)
+			if tag == "" {
+				tag = graph.DEFAULTTAG
+			}
+			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+		}
+		return job.Error(err)
+	}
+	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
+		job.Errorf("IPv4 forwarding is disabled.\n")
+	}
+	job.Eng.Job("log", "create", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+	// FIXME: this is necessary because daemon.Create might return a nil container
+	// with a non-nil error. This should not happen! Once it's fixed we
+	// can remove this workaround.
+	if container != nil {
+		job.Printf("%s\n", container.ID)
+	}
+	for _, warning := range buildWarnings {
+		job.Errorf("%s\n", warning)
+	}
+	return engine.StatusOK
+}
+
+// Create creates a new container from the given configuration with a given name.
+func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
+	var (
+		container *Container
+		warnings  []string
+	)
+
+	img, err := daemon.repositories.LookupImage(config.Image)
+	if err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.checkImageDepth(img); err != nil {
+		return nil, nil, err
+	}
+	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
+		return nil, nil, err
+	}
+	if container, err = daemon.newContainer(name, config, img); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.createRootfs(container, img); err != nil {
+		return nil, nil, err
+	}
+	if err := container.ToDisk(); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.Register(container); err != nil {
+		return nil, nil, err
+	}
+	return container, warnings, nil
+}

+ 26 - 134
daemon/daemon.go

@@ -105,26 +105,36 @@ type Daemon struct {
 
 // Install installs daemon capabilities to eng.
 func (daemon *Daemon) Install(eng *engine.Engine) error {
-	if err := eng.Register("container_inspect", daemon.ContainerInspect); err != nil {
-		return err
-	}
-	if err := eng.Register("attach", daemon.ContainerAttach); err != nil {
-		return err
-	}
-	if err := eng.Register("pause", daemon.ContainerPause); err != nil {
-		return err
-	}
-	if err := eng.Register("unpause", daemon.ContainerUnpause); err != nil {
-		return err
+	// FIXME: rename "delete" to "rm" for consistency with the CLI command
+	// FIXME: rename ContainerDestroy to ContainerRm for consistency with the CLI command
+	for name, method := range map[string]engine.Handler{
+		"attach":            daemon.ContainerAttach,
+		"commit":            daemon.ContainerCommit,
+		"container_changes": daemon.ContainerChanges,
+		"container_copy":    daemon.ContainerCopy,
+		"container_inspect": daemon.ContainerInspect,
+		"containers":        daemon.Containers,
+		"create":            daemon.ContainerCreate,
+		"delete":            daemon.ContainerDestroy,
+		"export":            daemon.ContainerExport,
+		"kill":              daemon.ContainerKill,
+		"logs":              daemon.ContainerLogs,
+		"pause":             daemon.ContainerPause,
+		"resize":            daemon.ContainerResize,
+		"restart":           daemon.ContainerRestart,
+		"start":             daemon.ContainerStart,
+		"stop":              daemon.ContainerStop,
+		"top":               daemon.ContainerTop,
+		"unpause":           daemon.ContainerUnpause,
+		"wait":              daemon.ContainerWait,
+	} {
+		if err := eng.Register(name, method); err != nil {
+			return err
+		}
 	}
 	return nil
 }
 
-// List returns an array of all containers registered in the daemon.
-func (daemon *Daemon) List() []*Container {
-	return daemon.containers.List()
-}
-
 // Get looks for a container by the specified ID or name, and returns it.
 // If the container is not found, or if an error occurs, nil is returned.
 func (daemon *Daemon) Get(name string) *Container {
@@ -279,47 +289,6 @@ func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, strea
 	return nil
 }
 
-// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) Destroy(container *Container) error {
-	if container == nil {
-		return fmt.Errorf("The given container is <nil>")
-	}
-
-	element := daemon.containers.Get(container.ID)
-	if element == nil {
-		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
-	}
-
-	if err := container.Stop(3); err != nil {
-		return err
-	}
-
-	// Deregister the container before removing its directory, to avoid race conditions
-	daemon.idIndex.Delete(container.ID)
-	daemon.containers.Delete(container.ID)
-
-	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
-		utils.Debugf("Unable to remove container from link graph: %s", err)
-	}
-
-	if err := daemon.driver.Remove(container.ID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
-	}
-
-	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := daemon.driver.Remove(initID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
-	}
-
-	if err := os.RemoveAll(container.root); err != nil {
-		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
-	}
-
-	selinuxFreeLxcContexts(container.ProcessLabel)
-
-	return nil
-}
-
 func (daemon *Daemon) restore() error {
 	var (
 		debug             = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
@@ -397,38 +366,6 @@ func (daemon *Daemon) restore() error {
 	return nil
 }
 
-// Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
-	var (
-		container *Container
-		warnings  []string
-	)
-
-	img, err := daemon.repositories.LookupImage(config.Image)
-	if err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.checkImageDepth(img); err != nil {
-		return nil, nil, err
-	}
-	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
-		return nil, nil, err
-	}
-	if container, err = daemon.newContainer(name, config, img); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.createRootfs(container, img); err != nil {
-		return nil, nil, err
-	}
-	if err := container.ToDisk(); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.Register(container); err != nil {
-		return nil, nil, err
-	}
-	return container, warnings, nil
-}
-
 func (daemon *Daemon) checkImageDepth(img *image.Image) error {
 	// We add 2 layers to the depth because the container's rw and
 	// init layer add to the restriction
@@ -634,51 +571,6 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
 	return nil
 }
 
-// Commit creates a new filesystem image from the current state of a container.
-// The image can optionally be tagged into a repository
-func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
-	if pause {
-		container.Pause()
-		defer container.Unpause()
-	}
-
-	if err := container.Mount(); err != nil {
-		return nil, err
-	}
-	defer container.Unmount()
-
-	rwTar, err := container.ExportRw()
-	if err != nil {
-		return nil, err
-	}
-	defer rwTar.Close()
-
-	// Create a new image from the container's base layers + a new layer from container changes
-	var (
-		containerID, containerImage string
-		containerConfig             *runconfig.Config
-	)
-
-	if container != nil {
-		containerID = container.ID
-		containerImage = container.Image
-		containerConfig = container.Config
-	}
-
-	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
-	if err != nil {
-		return nil, err
-	}
-
-	// Register the image if needed
-	if repository != "" {
-		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
-			return img, err
-		}
-	}
-	return img, nil
-}
-
 func GetFullContainerName(name string) (string, error) {
 	if name == "" {
 		return "", fmt.Errorf("Container name cannot be empty")

+ 181 - 0
daemon/delete.go

@@ -0,0 +1,181 @@
+package daemon
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/utils"
+)
+
+// FIXME: rename to ContainerRemove for consistency with the CLI command.
+func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+	removeVolume := job.GetenvBool("removeVolume")
+	removeLink := job.GetenvBool("removeLink")
+	stop := job.GetenvBool("stop")
+	kill := job.GetenvBool("kill")
+
+	container := daemon.Get(name)
+
+	if removeLink {
+		if container == nil {
+			return job.Errorf("No such link: %s", name)
+		}
+		name, err := GetFullContainerName(name)
+		if err != nil {
+			job.Error(err)
+		}
+		parent, n := path.Split(name)
+		if parent == "/" {
+			return job.Errorf("Conflict, cannot remove the default name of the container")
+		}
+		pe := daemon.ContainerGraph().Get(parent)
+		if pe == nil {
+			return job.Errorf("Cannot get parent %s for name %s", parent, name)
+		}
+		parentContainer := daemon.Get(pe.ID())
+
+		if parentContainer != nil {
+			parentContainer.DisableLink(n)
+		}
+
+		if err := daemon.ContainerGraph().Delete(name); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+
+	if container != nil {
+		if container.State.IsRunning() {
+			if stop {
+				if err := container.Stop(5); err != nil {
+					return job.Errorf("Could not stop running container, cannot remove - %v", err)
+				}
+			} else if kill {
+				if err := container.Kill(); err != nil {
+					return job.Errorf("Could not kill running container, cannot remove - %v", err)
+				}
+			} else {
+				return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -s or -k")
+			}
+		}
+		if err := daemon.Destroy(container); err != nil {
+			return job.Errorf("Cannot destroy container %s: %s", name, err)
+		}
+		job.Eng.Job("log", "destroy", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+
+		if removeVolume {
+			var (
+				volumes     = make(map[string]struct{})
+				binds       = make(map[string]struct{})
+				usedVolumes = make(map[string]*Container)
+			)
+
+			// the volume id is always the base of the path
+			getVolumeId := func(p string) string {
+				return filepath.Base(strings.TrimSuffix(p, "/layer"))
+			}
+
+			// populate bind map so that they can be skipped and not removed
+			for _, bind := range container.HostConfig().Binds {
+				source := strings.Split(bind, ":")[0]
+				// TODO: refactor all volume stuff, all of it
+				// it is very important that we eval the link or comparing the keys to container.Volumes will not work
+				//
+				// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
+				p, err := filepath.EvalSymlinks(source)
+				if err != nil && !os.IsNotExist(err) {
+					return job.Error(err)
+				}
+				if p != "" {
+					source = p
+				}
+				binds[source] = struct{}{}
+			}
+
+			// Store all the deleted containers volumes
+			for _, volumeId := range container.Volumes {
+				// Skip the volumes mounted from external
+				// bind mounts here will will be evaluated for a symlink
+				if _, exists := binds[volumeId]; exists {
+					continue
+				}
+
+				volumeId = getVolumeId(volumeId)
+				volumes[volumeId] = struct{}{}
+			}
+
+			// Retrieve all volumes from all remaining containers
+			for _, container := range daemon.List() {
+				for _, containerVolumeId := range container.Volumes {
+					containerVolumeId = getVolumeId(containerVolumeId)
+					usedVolumes[containerVolumeId] = container
+				}
+			}
+
+			for volumeId := range volumes {
+				// If the requested volu
+				if c, exists := usedVolumes[volumeId]; exists {
+					log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
+					continue
+				}
+				if err := daemon.Volumes().Delete(volumeId); err != nil {
+					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
+				}
+			}
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}
+
+// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
+// FIXME: rename to Rm for consistency with the CLI command
+func (daemon *Daemon) Destroy(container *Container) error {
+	if container == nil {
+		return fmt.Errorf("The given container is <nil>")
+	}
+
+	element := daemon.containers.Get(container.ID)
+	if element == nil {
+		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
+	}
+
+	if err := container.Stop(3); err != nil {
+		return err
+	}
+
+	// Deregister the container before removing its directory, to avoid race conditions
+	daemon.idIndex.Delete(container.ID)
+	daemon.containers.Delete(container.ID)
+
+	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
+		utils.Debugf("Unable to remove container from link graph: %s", err)
+	}
+
+	if err := daemon.driver.Remove(container.ID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
+	}
+
+	initID := fmt.Sprintf("%s-init", container.ID)
+	if err := daemon.driver.Remove(initID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
+	}
+
+	if err := os.RemoveAll(container.root); err != nil {
+		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
+	}
+
+	selinuxFreeLxcContexts(container.ProcessLabel)
+
+	return nil
+}

+ 30 - 0
daemon/export.go

@@ -0,0 +1,30 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s container_id", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		data, err := container.Export()
+		if err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		defer data.Close()
+
+		// Stream the entire contents of the container (basically a volatile snapshot)
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		// FIXME: factor job-specific LogEvent to engine.Job.Run()
+		job.Eng.Job("log", "export", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 59 - 0
daemon/kill.go

@@ -0,0 +1,59 @@
+package daemon
+
+import (
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/signal"
+)
+
+// ContainerKill send signal to the container
+// If no signal is given (sig 0), then Kill with SIGKILL and wait
+// for the container to exit.
+// If a signal is given, then just send it to the container and return.
+func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
+	if n := len(job.Args); n < 1 || n > 2 {
+		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		sig  uint64
+		err  error
+	)
+
+	// If we have a signal, look at it. Otherwise, do nothing
+	if len(job.Args) == 2 && job.Args[1] != "" {
+		// Check if we passed the signal as a number:
+		// The largest legal signal is 31, so let's parse on 5 bits
+		sig, err = strconv.ParseUint(job.Args[1], 10, 5)
+		if err != nil {
+			// The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
+			sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
+		}
+
+		if sig == 0 {
+			return job.Errorf("Invalid signal: %s", job.Args[1])
+		}
+	}
+
+	if container := daemon.Get(name); container != nil {
+		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
+		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
+			if err := container.Kill(); err != nil {
+				return job.Errorf("Cannot kill container %s: %s", name, err)
+			}
+			job.Eng.Job("log", "kill", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+		} else {
+			// Otherwise, just send the requested signal
+			if err := container.KillSig(int(sig)); err != nil {
+				return job.Errorf("Cannot kill container %s: %s", name, err)
+			}
+			// FIXME: Add event for signals
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}

+ 121 - 0
daemon/list.go

@@ -0,0 +1,121 @@
+package daemon
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/pkg/graphdb"
+
+	"github.com/docker/docker/engine"
+)
+
+// List returns an array of all containers registered in the daemon.
+func (daemon *Daemon) List() []*Container {
+	return daemon.containers.List()
+}
+
+func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
+	var (
+		foundBefore bool
+		displayed   int
+		all         = job.GetenvBool("all")
+		since       = job.Getenv("since")
+		before      = job.Getenv("before")
+		n           = job.GetenvInt("limit")
+		size        = job.GetenvBool("size")
+	)
+	outs := engine.NewTable("Created", 0)
+
+	names := map[string][]string{}
+	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
+		names[e.ID()] = append(names[e.ID()], p)
+		return nil
+	}, -1)
+
+	var beforeCont, sinceCont *Container
+	if before != "" {
+		beforeCont = daemon.Get(before)
+		if beforeCont == nil {
+			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
+		}
+	}
+
+	if since != "" {
+		sinceCont = daemon.Get(since)
+		if sinceCont == nil {
+			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
+		}
+	}
+
+	errLast := errors.New("last container")
+	writeCont := func(container *Container) error {
+		container.Lock()
+		defer container.Unlock()
+		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
+			return nil
+		}
+		if before != "" && !foundBefore {
+			if container.ID == beforeCont.ID {
+				foundBefore = true
+			}
+			return nil
+		}
+		if n > 0 && displayed == n {
+			return errLast
+		}
+		if since != "" {
+			if container.ID == sinceCont.ID {
+				return errLast
+			}
+		}
+		displayed++
+		out := &engine.Env{}
+		out.Set("Id", container.ID)
+		out.SetList("Names", names[container.ID])
+		out.Set("Image", daemon.Repositories().ImageName(container.Image))
+		if len(container.Args) > 0 {
+			args := []string{}
+			for _, arg := range container.Args {
+				if strings.Contains(arg, " ") {
+					args = append(args, fmt.Sprintf("'%s'", arg))
+				} else {
+					args = append(args, arg)
+				}
+			}
+			argsAsString := strings.Join(args, " ")
+
+			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
+		} else {
+			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
+		}
+		out.SetInt64("Created", container.Created.Unix())
+		out.Set("Status", container.State.String())
+		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
+		if err != nil {
+			return err
+		}
+		out.Set("Ports", str)
+		if size {
+			sizeRw, sizeRootFs := container.GetSize()
+			out.SetInt64("SizeRw", sizeRw)
+			out.SetInt64("SizeRootFs", sizeRootFs)
+		}
+		outs.Add(out)
+		return nil
+	}
+
+	for _, container := range daemon.List() {
+		if err := writeCont(container); err != nil {
+			if err != errLast {
+				return job.Error(err)
+			}
+			break
+		}
+	}
+	outs.ReverseSort()
+	if _, err := outs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}

+ 133 - 0
daemon/logs.go

@@ -0,0 +1,133 @@
+package daemon
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"time"
+
+	"github.com/docker/docker/pkg/tailfile"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/utils"
+)
+
+func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+
+	var (
+		name   = job.Args[0]
+		stdout = job.GetenvBool("stdout")
+		stderr = job.GetenvBool("stderr")
+		tail   = job.Getenv("tail")
+		follow = job.GetenvBool("follow")
+		times  = job.GetenvBool("timestamps")
+		lines  = -1
+		format string
+	)
+	if !(stdout || stderr) {
+		return job.Errorf("You must choose at least one stream")
+	}
+	if times {
+		format = time.RFC3339Nano
+	}
+	if tail == "" {
+		tail = "all"
+	}
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+	cLog, err := container.ReadLog("json")
+	if err != nil && os.IsNotExist(err) {
+		// Legacy logs
+		utils.Debugf("Old logs format")
+		if stdout {
+			cLog, err := container.ReadLog("stdout")
+			if err != nil {
+				utils.Errorf("Error reading logs (stdout): %s", err)
+			} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
+				utils.Errorf("Error streaming logs (stdout): %s", err)
+			}
+		}
+		if stderr {
+			cLog, err := container.ReadLog("stderr")
+			if err != nil {
+				utils.Errorf("Error reading logs (stderr): %s", err)
+			} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
+				utils.Errorf("Error streaming logs (stderr): %s", err)
+			}
+		}
+	} else if err != nil {
+		utils.Errorf("Error reading logs (json): %s", err)
+	} else {
+		if tail != "all" {
+			var err error
+			lines, err = strconv.Atoi(tail)
+			if err != nil {
+				utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
+				lines = -1
+			}
+		}
+		if lines != 0 {
+			if lines > 0 {
+				f := cLog.(*os.File)
+				ls, err := tailfile.TailFile(f, lines)
+				if err != nil {
+					return job.Error(err)
+				}
+				tmp := bytes.NewBuffer([]byte{})
+				for _, l := range ls {
+					fmt.Fprintf(tmp, "%s\n", l)
+				}
+				cLog = tmp
+			}
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &utils.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					utils.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				logLine := l.Log
+				if times {
+					logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine)
+				}
+				if l.Stream == "stdout" && stdout {
+					fmt.Fprintf(job.Stdout, "%s", logLine)
+				}
+				if l.Stream == "stderr" && stderr {
+					fmt.Fprintf(job.Stderr, "%s", logLine)
+				}
+			}
+		}
+	}
+	if follow {
+		errors := make(chan error, 2)
+		if stdout {
+			stdoutPipe := container.StdoutLogPipe()
+			go func() {
+				errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
+			}()
+		}
+		if stderr {
+			stderrPipe := container.StderrLogPipe()
+			go func() {
+				errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
+			}()
+		}
+		err := <-errors
+		if err != nil {
+			utils.Errorf("%s", err)
+		}
+	}
+	return engine.StatusOK
+}

+ 29 - 0
daemon/resize.go

@@ -0,0 +1,29 @@
+package daemon
+
+import (
+	"strconv"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
+	if len(job.Args) != 3 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
+	}
+	name := job.Args[0]
+	height, err := strconv.Atoi(job.Args[1])
+	if err != nil {
+		return job.Error(err)
+	}
+	width, err := strconv.Atoi(job.Args[2])
+	if err != nil {
+		return job.Error(err)
+	}
+	if container := daemon.Get(name); container != nil {
+		if err := container.Resize(height, width); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 27 - 0
daemon/restart.go

@@ -0,0 +1,27 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		t    = 10
+	)
+	if job.EnvExists("t") {
+		t = job.GetenvInt("t")
+	}
+	if container := daemon.Get(name); container != nil {
+		if err := container.Restart(int(t)); err != nil {
+			return job.Errorf("Cannot restart container %s: %s\n", name, err)
+		}
+		job.Eng.Job("log", "restart", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+	} else {
+		return job.Errorf("No such container: %s\n", name)
+	}
+	return engine.StatusOK
+}

+ 68 - 0
daemon/start.go

@@ -0,0 +1,68 @@
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
+	if len(job.Args) < 1 {
+		return job.Errorf("Usage: %s container_id", job.Name)
+	}
+	var (
+		name      = job.Args[0]
+		container = daemon.Get(name)
+	)
+
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	if container.State.IsRunning() {
+		return job.Errorf("Container already started")
+	}
+
+	// If no environment was set, then no hostconfig was passed.
+	if len(job.Environ()) > 0 {
+		hostConfig := runconfig.ContainerHostConfigFromJob(job)
+		if err := daemon.setHostConfig(container, hostConfig); err != nil {
+			return job.Error(err)
+		}
+	}
+	if err := container.Start(); err != nil {
+		return job.Errorf("Cannot start container %s: %s", name, err)
+	}
+	job.Eng.Job("log", "start", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+
+	return engine.StatusOK
+}
+
+func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+	// Validate the HostConfig binds. Make sure that:
+	// the source exists
+	for _, bind := range hostConfig.Binds {
+		splitBind := strings.Split(bind, ":")
+		source := splitBind[0]
+
+		// ensure the source exists on the host
+		_, err := os.Stat(source)
+		if err != nil && os.IsNotExist(err) {
+			err = os.MkdirAll(source, 0755)
+			if err != nil {
+				return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
+			}
+		}
+	}
+	// Register any links from the host config before starting the container
+	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
+		return err
+	}
+	container.SetHostConfig(hostConfig)
+	container.ToDisk()
+
+	return nil
+}

+ 30 - 0
daemon/stop.go

@@ -0,0 +1,30 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		t    = 10
+	)
+	if job.EnvExists("t") {
+		t = job.GetenvInt("t")
+	}
+	if container := daemon.Get(name); container != nil {
+		if !container.State.IsRunning() {
+			return job.Errorf("Container already stopped")
+		}
+		if err := container.Stop(int(t)); err != nil {
+			return job.Errorf("Cannot stop container %s: %s\n", name, err)
+		}
+		job.Eng.Job("log", "stop", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
+	} else {
+		return job.Errorf("No such container: %s\n", name)
+	}
+	return engine.StatusOK
+}

+ 79 - 0
daemon/top.go

@@ -0,0 +1,79 @@
+package daemon
+
+import (
+	"os/exec"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 && len(job.Args) != 2 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
+	}
+	var (
+		name   = job.Args[0]
+		psArgs = "-ef"
+	)
+
+	if len(job.Args) == 2 && job.Args[1] != "" {
+		psArgs = job.Args[1]
+	}
+
+	if container := daemon.Get(name); container != nil {
+		if !container.State.IsRunning() {
+			return job.Errorf("Container %s is not running", name)
+		}
+		pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
+		if err != nil {
+			return job.Error(err)
+		}
+		output, err := exec.Command("ps", psArgs).Output()
+		if err != nil {
+			return job.Errorf("Error running ps: %s", err)
+		}
+
+		lines := strings.Split(string(output), "\n")
+		header := strings.Fields(lines[0])
+		out := &engine.Env{}
+		out.SetList("Titles", header)
+
+		pidIndex := -1
+		for i, name := range header {
+			if name == "PID" {
+				pidIndex = i
+			}
+		}
+		if pidIndex == -1 {
+			return job.Errorf("Couldn't find PID field in ps output")
+		}
+
+		processes := [][]string{}
+		for _, line := range lines[1:] {
+			if len(line) == 0 {
+				continue
+			}
+			fields := strings.Fields(line)
+			p, err := strconv.Atoi(fields[pidIndex])
+			if err != nil {
+				return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
+			}
+
+			for _, pid := range pids {
+				if pid == p {
+					// Make sure number of fields equals number of header titles
+					// merging "overhanging" fields
+					process := fields[:len(header)-1]
+					process = append(process, strings.Join(fields[len(header)-1:], " "))
+					processes = append(processes, process)
+				}
+			}
+		}
+		out.SetJson("Processes", processes)
+		out.WriteTo(job.Stdout)
+		return engine.StatusOK
+
+	}
+	return job.Errorf("No such container: %s", name)
+}

+ 20 - 0
daemon/wait.go

@@ -0,0 +1,20 @@
+package daemon
+
+import (
+	"time"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		status, _ := container.State.WaitStop(-1 * time.Second)
+		job.Printf("%d\n", status)
+		return engine.StatusOK
+	}
+	return job.Errorf("%s: no such container: %s", job.Name, name)
+}

+ 1 - 1
integration/server_test.go

@@ -221,7 +221,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
 	}
 
 	// FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty")
-	job = eng.Job("container_delete", id)
+	job = eng.Job("delete", id)
 	job.SetenvBool("removeVolume", true)
 	if err := job.Run(); err != nil {
 		t.Fatal(err)

+ 0 - 793
server/container.go

@@ -1,793 +0,0 @@
-// DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
-//
-// For additional commments see server/server.go
-//
-package server
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/graph"
-	"github.com/docker/docker/pkg/graphdb"
-	"github.com/docker/docker/pkg/parsers"
-	"github.com/docker/docker/pkg/signal"
-	"github.com/docker/docker/pkg/tailfile"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
-)
-
-// ContainerKill send signal to the container
-// If no signal is given (sig 0), then Kill with SIGKILL and wait
-// for the container to exit.
-// If a signal is given, then just send it to the container and return.
-func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
-	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		sig  uint64
-		err  error
-	)
-
-	// If we have a signal, look at it. Otherwise, do nothing
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		// Check if we passed the signal as a number:
-		// The largest legal signal is 31, so let's parse on 5 bits
-		sig, err = strconv.ParseUint(job.Args[1], 10, 5)
-		if err != nil {
-			// The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
-			sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
-		}
-
-		if sig == 0 {
-			return job.Errorf("Invalid signal: %s", job.Args[1])
-		}
-	}
-
-	if container := srv.daemon.Get(name); container != nil {
-		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
-		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
-			if err := container.Kill(); err != nil {
-				return job.Errorf("Cannot kill container %s: %s", name, err)
-			}
-			srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-		} else {
-			// Otherwise, just send the requested signal
-			if err := container.KillSig(int(sig)); err != nil {
-				return job.Errorf("Cannot kill container %s: %s", name, err)
-			}
-			// FIXME: Add event for signals
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		data, err := container.Export()
-		if err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		defer data.Close()
-
-		// Stream the entire contents of the container (basically a volatile snapshot)
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		// FIXME: factor job-specific LogEvent to engine.Job.Run()
-		srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 && len(job.Args) != 2 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
-	}
-	var (
-		name   = job.Args[0]
-		psArgs = "-ef"
-	)
-
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		psArgs = job.Args[1]
-	}
-
-	if container := srv.daemon.Get(name); container != nil {
-		if !container.State.IsRunning() {
-			return job.Errorf("Container %s is not running", name)
-		}
-		pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
-		if err != nil {
-			return job.Error(err)
-		}
-		output, err := exec.Command("ps", psArgs).Output()
-		if err != nil {
-			return job.Errorf("Error running ps: %s", err)
-		}
-
-		lines := strings.Split(string(output), "\n")
-		header := strings.Fields(lines[0])
-		out := &engine.Env{}
-		out.SetList("Titles", header)
-
-		pidIndex := -1
-		for i, name := range header {
-			if name == "PID" {
-				pidIndex = i
-			}
-		}
-		if pidIndex == -1 {
-			return job.Errorf("Couldn't find PID field in ps output")
-		}
-
-		processes := [][]string{}
-		for _, line := range lines[1:] {
-			if len(line) == 0 {
-				continue
-			}
-			fields := strings.Fields(line)
-			p, err := strconv.Atoi(fields[pidIndex])
-			if err != nil {
-				return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
-			}
-
-			for _, pid := range pids {
-				if pid == p {
-					// Make sure number of fields equals number of header titles
-					// merging "overhanging" fields
-					process := fields[:len(header)-1]
-					process = append(process, strings.Join(fields[len(header)-1:], " "))
-					processes = append(processes, process)
-				}
-			}
-		}
-		out.SetJson("Processes", processes)
-		out.WriteTo(job.Stdout)
-		return engine.StatusOK
-
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		outs := engine.NewTable("", 0)
-		changes, err := container.Changes()
-		if err != nil {
-			return job.Error(err)
-		}
-		for _, change := range changes {
-			out := &engine.Env{}
-			if err := out.Import(change); err != nil {
-				return job.Error(err)
-			}
-			outs.Add(out)
-		}
-		if _, err := outs.WriteListTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) Containers(job *engine.Job) engine.Status {
-	var (
-		foundBefore bool
-		displayed   int
-		all         = job.GetenvBool("all")
-		since       = job.Getenv("since")
-		before      = job.Getenv("before")
-		n           = job.GetenvInt("limit")
-		size        = job.GetenvBool("size")
-	)
-	outs := engine.NewTable("Created", 0)
-
-	names := map[string][]string{}
-	srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
-		names[e.ID()] = append(names[e.ID()], p)
-		return nil
-	}, -1)
-
-	var beforeCont, sinceCont *daemon.Container
-	if before != "" {
-		beforeCont = srv.daemon.Get(before)
-		if beforeCont == nil {
-			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
-		}
-	}
-
-	if since != "" {
-		sinceCont = srv.daemon.Get(since)
-		if sinceCont == nil {
-			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
-		}
-	}
-
-	errLast := errors.New("last container")
-	writeCont := func(container *daemon.Container) error {
-		container.Lock()
-		defer container.Unlock()
-		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
-			return nil
-		}
-		if before != "" && !foundBefore {
-			if container.ID == beforeCont.ID {
-				foundBefore = true
-			}
-			return nil
-		}
-		if n > 0 && displayed == n {
-			return errLast
-		}
-		if since != "" {
-			if container.ID == sinceCont.ID {
-				return errLast
-			}
-		}
-		displayed++
-		out := &engine.Env{}
-		out.Set("Id", container.ID)
-		out.SetList("Names", names[container.ID])
-		out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
-		if len(container.Args) > 0 {
-			args := []string{}
-			for _, arg := range container.Args {
-				if strings.Contains(arg, " ") {
-					args = append(args, fmt.Sprintf("'%s'", arg))
-				} else {
-					args = append(args, arg)
-				}
-			}
-			argsAsString := strings.Join(args, " ")
-
-			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
-		} else {
-			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
-		}
-		out.SetInt64("Created", container.Created.Unix())
-		out.Set("Status", container.State.String())
-		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
-		if err != nil {
-			return err
-		}
-		out.Set("Ports", str)
-		if size {
-			sizeRw, sizeRootFs := container.GetSize()
-			out.SetInt64("SizeRw", sizeRw)
-			out.SetInt64("SizeRootFs", sizeRootFs)
-		}
-		outs.Add(out)
-		return nil
-	}
-
-	for _, container := range srv.daemon.List() {
-		if err := writeCont(container); err != nil {
-			if err != errLast {
-				return job.Error(err)
-			}
-			break
-		}
-	}
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-
-	var (
-		config    = container.Config
-		newConfig runconfig.Config
-	)
-
-	if err := job.GetenvJson("config", &newConfig); err != nil {
-		return job.Error(err)
-	}
-
-	if err := runconfig.Merge(&newConfig, config); err != nil {
-		return job.Error(err)
-	}
-
-	img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
-	if err != nil {
-		return job.Error(err)
-	}
-	job.Printf("%s\n", img.ID)
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
-	var name string
-	if len(job.Args) == 1 {
-		name = job.Args[0]
-	} else if len(job.Args) > 1 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-	config := runconfig.ContainerConfigFromJob(job)
-	if config.Memory != 0 && config.Memory < 524288 {
-		return job.Errorf("Minimum memory limit allowed is 512k")
-	}
-	if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
-		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
-		config.Memory = 0
-	}
-	if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
-		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
-		config.MemorySwap = -1
-	}
-	container, buildWarnings, err := srv.daemon.Create(config, name)
-	if err != nil {
-		if srv.daemon.Graph().IsNotExist(err) {
-			_, tag := parsers.ParseRepositoryTag(config.Image)
-			if tag == "" {
-				tag = graph.DEFAULTTAG
-			}
-			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
-		}
-		return job.Error(err)
-	}
-	if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
-		job.Errorf("IPv4 forwarding is disabled.\n")
-	}
-	srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	// FIXME: this is necessary because daemon.Create might return a nil container
-	// with a non-nil error. This should not happen! Once it's fixed we
-	// can remove this workaround.
-	if container != nil {
-		job.Printf("%s\n", container.ID)
-	}
-	for _, warning := range buildWarnings {
-		job.Errorf("%s\n", warning)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if err := container.Restart(int(t)); err != nil {
-			return job.Errorf("Cannot restart container %s: %s\n", name, err)
-		}
-		srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	} else {
-		return job.Errorf("No such container: %s\n", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-	removeVolume := job.GetenvBool("removeVolume")
-	removeLink := job.GetenvBool("removeLink")
-	stop := job.GetenvBool("stop")
-	kill := job.GetenvBool("kill")
-
-	container := srv.daemon.Get(name)
-
-	if removeLink {
-		if container == nil {
-			return job.Errorf("No such link: %s", name)
-		}
-		name, err := daemon.GetFullContainerName(name)
-		if err != nil {
-			job.Error(err)
-		}
-		parent, n := path.Split(name)
-		if parent == "/" {
-			return job.Errorf("Conflict, cannot remove the default name of the container")
-		}
-		pe := srv.daemon.ContainerGraph().Get(parent)
-		if pe == nil {
-			return job.Errorf("Cannot get parent %s for name %s", parent, name)
-		}
-		parentContainer := srv.daemon.Get(pe.ID())
-
-		if parentContainer != nil {
-			parentContainer.DisableLink(n)
-		}
-
-		if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-
-	if container != nil {
-		if container.State.IsRunning() {
-			if stop {
-				if err := container.Stop(5); err != nil {
-					return job.Errorf("Could not stop running container, cannot remove - %v", err)
-				}
-			} else if kill {
-				if err := container.Kill(); err != nil {
-					return job.Errorf("Could not kill running container, cannot remove - %v", err)
-				}
-			} else {
-				return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -s or -k")
-			}
-		}
-		if err := srv.daemon.Destroy(container); err != nil {
-			return job.Errorf("Cannot destroy container %s: %s", name, err)
-		}
-		srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-
-		if removeVolume {
-			var (
-				volumes     = make(map[string]struct{})
-				binds       = make(map[string]struct{})
-				usedVolumes = make(map[string]*daemon.Container)
-			)
-
-			// the volume id is always the base of the path
-			getVolumeId := func(p string) string {
-				return filepath.Base(strings.TrimSuffix(p, "/layer"))
-			}
-
-			// populate bind map so that they can be skipped and not removed
-			for _, bind := range container.HostConfig().Binds {
-				source := strings.Split(bind, ":")[0]
-				// TODO: refactor all volume stuff, all of it
-				// it is very important that we eval the link or comparing the keys to container.Volumes will not work
-				//
-				// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
-				p, err := filepath.EvalSymlinks(source)
-				if err != nil && !os.IsNotExist(err) {
-					return job.Error(err)
-				}
-				if p != "" {
-					source = p
-				}
-				binds[source] = struct{}{}
-			}
-
-			// Store all the deleted containers volumes
-			for _, volumeId := range container.Volumes {
-				// Skip the volumes mounted from external
-				// bind mounts here will will be evaluated for a symlink
-				if _, exists := binds[volumeId]; exists {
-					continue
-				}
-
-				volumeId = getVolumeId(volumeId)
-				volumes[volumeId] = struct{}{}
-			}
-
-			// Retrieve all volumes from all remaining containers
-			for _, container := range srv.daemon.List() {
-				for _, containerVolumeId := range container.Volumes {
-					containerVolumeId = getVolumeId(containerVolumeId)
-					usedVolumes[containerVolumeId] = container
-				}
-			}
-
-			for volumeId := range volumes {
-				// If the requested volu
-				if c, exists := usedVolumes[volumeId]; exists {
-					log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
-					continue
-				}
-				if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
-					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
-				}
-			}
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) setHostConfig(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
-	// Validate the HostConfig binds. Make sure that:
-	// the source exists
-	for _, bind := range hostConfig.Binds {
-		splitBind := strings.Split(bind, ":")
-		source := splitBind[0]
-
-		// ensure the source exists on the host
-		_, err := os.Stat(source)
-		if err != nil && os.IsNotExist(err) {
-			err = os.MkdirAll(source, 0755)
-			if err != nil {
-				return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
-			}
-		}
-	}
-	// Register any links from the host config before starting the container
-	if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
-		return err
-	}
-	container.SetHostConfig(hostConfig)
-	container.ToDisk()
-
-	return nil
-}
-
-func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
-	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	var (
-		name      = job.Args[0]
-		daemon    = srv.daemon
-		container = daemon.Get(name)
-	)
-
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-
-	if container.State.IsRunning() {
-		return job.Errorf("Container already started")
-	}
-
-	// If no environment was set, then no hostconfig was passed.
-	if len(job.Environ()) > 0 {
-		hostConfig := runconfig.ContainerHostConfigFromJob(job)
-		if err := srv.setHostConfig(container, hostConfig); err != nil {
-			return job.Error(err)
-		}
-	}
-	if err := container.Start(); err != nil {
-		return job.Errorf("Cannot start container %s: %s", name, err)
-	}
-	srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
-
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if !container.State.IsRunning() {
-			return job.Errorf("Container already stopped")
-		}
-		if err := container.Stop(int(t)); err != nil {
-			return job.Errorf("Cannot stop container %s: %s\n", name, err)
-		}
-		srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	} else {
-		return job.Errorf("No such container: %s\n", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		status, _ := container.State.WaitStop(-1 * time.Second)
-		job.Printf("%d\n", status)
-		return engine.StatusOK
-	}
-	return job.Errorf("%s: no such container: %s", job.Name, name)
-}
-
-func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
-	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
-	}
-	name := job.Args[0]
-	height, err := strconv.Atoi(job.Args[1])
-	if err != nil {
-		return job.Error(err)
-	}
-	width, err := strconv.Atoi(job.Args[2])
-	if err != nil {
-		return job.Error(err)
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if err := container.Resize(height, width); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-
-	var (
-		name   = job.Args[0]
-		stdout = job.GetenvBool("stdout")
-		stderr = job.GetenvBool("stderr")
-		tail   = job.Getenv("tail")
-		follow = job.GetenvBool("follow")
-		times  = job.GetenvBool("timestamps")
-		lines  = -1
-		format string
-	)
-	if !(stdout || stderr) {
-		return job.Errorf("You must choose at least one stream")
-	}
-	if times {
-		format = time.RFC3339Nano
-	}
-	if tail == "" {
-		tail = "all"
-	}
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-	cLog, err := container.ReadLog("json")
-	if err != nil && os.IsNotExist(err) {
-		// Legacy logs
-		utils.Debugf("Old logs format")
-		if stdout {
-			cLog, err := container.ReadLog("stdout")
-			if err != nil {
-				utils.Errorf("Error reading logs (stdout): %s", err)
-			} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
-				utils.Errorf("Error streaming logs (stdout): %s", err)
-			}
-		}
-		if stderr {
-			cLog, err := container.ReadLog("stderr")
-			if err != nil {
-				utils.Errorf("Error reading logs (stderr): %s", err)
-			} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
-				utils.Errorf("Error streaming logs (stderr): %s", err)
-			}
-		}
-	} else if err != nil {
-		utils.Errorf("Error reading logs (json): %s", err)
-	} else {
-		if tail != "all" {
-			var err error
-			lines, err = strconv.Atoi(tail)
-			if err != nil {
-				utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
-				lines = -1
-			}
-		}
-		if lines != 0 {
-			if lines > 0 {
-				f := cLog.(*os.File)
-				ls, err := tailfile.TailFile(f, lines)
-				if err != nil {
-					return job.Error(err)
-				}
-				tmp := bytes.NewBuffer([]byte{})
-				for _, l := range ls {
-					fmt.Fprintf(tmp, "%s\n", l)
-				}
-				cLog = tmp
-			}
-			dec := json.NewDecoder(cLog)
-			for {
-				l := &utils.JSONLog{}
-
-				if err := dec.Decode(l); err == io.EOF {
-					break
-				} else if err != nil {
-					utils.Errorf("Error streaming logs: %s", err)
-					break
-				}
-				logLine := l.Log
-				if times {
-					logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine)
-				}
-				if l.Stream == "stdout" && stdout {
-					fmt.Fprintf(job.Stdout, "%s", logLine)
-				}
-				if l.Stream == "stderr" && stderr {
-					fmt.Fprintf(job.Stderr, "%s", logLine)
-				}
-			}
-		}
-	}
-	if follow {
-		errors := make(chan error, 2)
-		if stdout {
-			stdoutPipe := container.StdoutLogPipe()
-			go func() {
-				errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
-			}()
-		}
-		if stderr {
-			stderrPipe := container.StderrLogPipe()
-			go func() {
-				errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
-			}()
-		}
-		err := <-errors
-		if err != nil {
-			utils.Errorf("%s", err)
-		}
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 {
-		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
-	}
-
-	var (
-		name     = job.Args[0]
-		resource = job.Args[1]
-	)
-
-	if container := srv.daemon.Get(name); container != nil {
-
-		data, err := container.Copy(resource)
-		if err != nil {
-			return job.Error(err)
-		}
-		defer data.Close()
-
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}

+ 14 - 29
server/init.go

@@ -86,35 +86,20 @@ func InitServer(job *engine.Job) engine.Status {
 	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
 
 	for name, handler := range map[string]engine.Handler{
-		"export":           srv.ContainerExport,
-		"create":           srv.ContainerCreate,
-		"stop":             srv.ContainerStop,
-		"restart":          srv.ContainerRestart,
-		"start":            srv.ContainerStart,
-		"kill":             srv.ContainerKill,
-		"wait":             srv.ContainerWait,
-		"tag":              srv.ImageTag, // FIXME merge with "image_tag"
-		"resize":           srv.ContainerResize,
-		"commit":           srv.ContainerCommit,
-		"info":             srv.DockerInfo,
-		"container_delete": srv.ContainerDestroy,
-		"image_export":     srv.ImageExport,
-		"images":           srv.Images,
-		"history":          srv.ImageHistory,
-		"viz":              srv.ImagesViz,
-		"container_copy":   srv.ContainerCopy,
-		"log":              srv.Log,
-		"logs":             srv.ContainerLogs,
-		"changes":          srv.ContainerChanges,
-		"top":              srv.ContainerTop,
-		"load":             srv.ImageLoad,
-		"build":            srv.Build,
-		"pull":             srv.ImagePull,
-		"import":           srv.ImageImport,
-		"image_delete":     srv.ImageDelete,
-		"events":           srv.Events,
-		"push":             srv.ImagePush,
-		"containers":       srv.Containers,
+		"tag":          srv.ImageTag, // FIXME merge with "image_tag"
+		"info":         srv.DockerInfo,
+		"image_export": srv.ImageExport,
+		"images":       srv.Images,
+		"history":      srv.ImageHistory,
+		"viz":          srv.ImagesViz,
+		"log":          srv.Log,
+		"load":         srv.ImageLoad,
+		"build":        srv.Build,
+		"pull":         srv.ImagePull,
+		"import":       srv.ImageImport,
+		"image_delete": srv.ImageDelete,
+		"events":       srv.Events,
+		"push":         srv.ImagePush,
 	} {
 		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
 			return job.Error(err)