Browse Source

Rename runtime/* to daemon/*

Docker-DCO-1.1-Signed-off-by: Alexander Larsson <alexl@redhat.com> (github: alexlarsson)
Alexander Larsson 11 years ago
parent
commit
359b7df5d2
91 changed files with 827 additions and 827 deletions
  1. 1 1
      builtins/builtins.go
  2. 1 1
      contrib/docker-device-tool/device_tool.go
  3. 43 43
      daemon/container.go
  4. 1 1
      daemon/container_unit_test.go
  5. 172 172
      daemon/daemon.go
  6. 3 3
      daemon/daemon_aufs.go
  7. 7 0
      daemon/daemon_btrfs.go
  8. 7 0
      daemon/daemon_devicemapper.go
  9. 2 2
      daemon/daemon_no_aufs.go
  10. 0 0
      daemon/execdriver/MAINTAINERS
  11. 0 0
      daemon/execdriver/driver.go
  12. 3 3
      daemon/execdriver/execdrivers/execdrivers.go
  13. 1 1
      daemon/execdriver/lxc/driver.go
  14. 0 0
      daemon/execdriver/lxc/info.go
  15. 0 0
      daemon/execdriver/lxc/info_test.go
  16. 1 1
      daemon/execdriver/lxc/init.go
  17. 0 0
      daemon/execdriver/lxc/lxc_init_linux.go
  18. 0 0
      daemon/execdriver/lxc/lxc_init_unsupported.go
  19. 1 1
      daemon/execdriver/lxc/lxc_template.go
  20. 1 1
      daemon/execdriver/lxc/lxc_template_unit_test.go
  21. 0 0
      daemon/execdriver/native/configuration/parse.go
  22. 1 1
      daemon/execdriver/native/configuration/parse_test.go
  23. 3 3
      daemon/execdriver/native/create.go
  24. 1 1
      daemon/execdriver/native/driver.go
  25. 0 0
      daemon/execdriver/native/info.go
  26. 0 0
      daemon/execdriver/native/template/default_template.go
  27. 1 1
      daemon/execdriver/native/term.go
  28. 0 0
      daemon/execdriver/pipes.go
  29. 0 0
      daemon/execdriver/termconsole.go
  30. 1 1
      daemon/graphdriver/aufs/aufs.go
  31. 1 1
      daemon/graphdriver/aufs/aufs_test.go
  32. 0 0
      daemon/graphdriver/aufs/dirs.go
  33. 0 0
      daemon/graphdriver/aufs/migrate.go
  34. 0 0
      daemon/graphdriver/aufs/mount.go
  35. 0 0
      daemon/graphdriver/aufs/mount_linux.go
  36. 0 0
      daemon/graphdriver/aufs/mount_unsupported.go
  37. 1 1
      daemon/graphdriver/btrfs/btrfs.go
  38. 0 0
      daemon/graphdriver/btrfs/dummy_unsupported.go
  39. 0 0
      daemon/graphdriver/devmapper/attach_loopback.go
  40. 0 0
      daemon/graphdriver/devmapper/deviceset.go
  41. 0 0
      daemon/graphdriver/devmapper/devmapper.go
  42. 0 0
      daemon/graphdriver/devmapper/devmapper_doc.go
  43. 0 0
      daemon/graphdriver/devmapper/devmapper_log.go
  44. 0 0
      daemon/graphdriver/devmapper/devmapper_test.go
  45. 0 0
      daemon/graphdriver/devmapper/devmapper_wrapper.go
  46. 1 1
      daemon/graphdriver/devmapper/driver.go
  47. 1 1
      daemon/graphdriver/devmapper/driver_test.go
  48. 0 0
      daemon/graphdriver/devmapper/ioctl.go
  49. 0 0
      daemon/graphdriver/devmapper/mount.go
  50. 0 0
      daemon/graphdriver/devmapper/sys.go
  51. 0 0
      daemon/graphdriver/driver.go
  52. 1 1
      daemon/graphdriver/vfs/driver.go
  53. 1 1
      daemon/history.go
  54. 1 1
      daemon/network_settings.go
  55. 4 4
      daemon/networkdriver/bridge/driver.go
  56. 1 1
      daemon/networkdriver/ipallocator/allocator.go
  57. 0 0
      daemon/networkdriver/ipallocator/allocator_test.go
  58. 0 0
      daemon/networkdriver/network.go
  59. 0 0
      daemon/networkdriver/network_test.go
  60. 0 0
      daemon/networkdriver/portallocator/portallocator.go
  61. 0 0
      daemon/networkdriver/portallocator/portallocator_test.go
  62. 0 0
      daemon/networkdriver/portmapper/mapper.go
  63. 0 0
      daemon/networkdriver/portmapper/mapper_test.go
  64. 0 0
      daemon/networkdriver/utils.go
  65. 1 1
      daemon/server.go
  66. 1 1
      daemon/sorter.go
  67. 1 1
      daemon/state.go
  68. 5 5
      daemon/utils.go
  69. 1 1
      daemon/utils_test.go
  70. 6 6
      daemon/volumes.go
  71. 1 1
      daemonconfig/config.go
  72. 1 1
      graph/graph.go
  73. 2 2
      graph/tags_unit_test.go
  74. 1 1
      image/graph.go
  75. 1 1
      image/image.go
  76. 2 2
      integration-cli/docker_cli_nat_test.go
  77. 31 31
      integration/api_test.go
  78. 8 8
      integration/buildfile_test.go
  79. 16 16
      integration/commands_test.go
  80. 180 180
      integration/container_test.go
  81. 1 1
      integration/graph_test.go
  82. 132 132
      integration/runtime_test.go
  83. 15 15
      integration/server_test.go
  84. 2 2
      integration/sorter_test.go
  85. 19 19
      integration/utils_test.go
  86. 1 1
      integration/z_final_test.go
  87. 0 7
      runtime/runtime_btrfs.go
  88. 0 7
      runtime/runtime_devicemapper.go
  89. 18 18
      server/buildfile.go
  90. 115 115
      server/server.go
  91. 3 3
      sysinit/sysinit.go

+ 1 - 1
builtins/builtins.go

@@ -2,8 +2,8 @@ package builtins
 
 
 import (
 import (
 	api "github.com/dotcloud/docker/api/server"
 	api "github.com/dotcloud/docker/api/server"
+	"github.com/dotcloud/docker/daemon/networkdriver/bridge"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/runtime/networkdriver/bridge"
 	"github.com/dotcloud/docker/server"
 	"github.com/dotcloud/docker/server"
 )
 )
 
 

+ 1 - 1
contrib/docker-device-tool/device_tool.go

@@ -3,7 +3,7 @@ package main
 import (
 import (
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/graphdriver/devmapper"
+	"github.com/dotcloud/docker/daemon/graphdriver/devmapper"
 	"os"
 	"os"
 	"path"
 	"path"
 	"sort"
 	"sort"

+ 43 - 43
runtime/container.go → daemon/container.go

@@ -1,17 +1,17 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/execdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/links"
 	"github.com/dotcloud/docker/links"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime/execdriver"
-	"github.com/dotcloud/docker/runtime/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -64,7 +64,7 @@ type Container struct {
 	stdin     io.ReadCloser
 	stdin     io.ReadCloser
 	stdinPipe io.WriteCloser
 	stdinPipe io.WriteCloser
 
 
-	runtime *Runtime
+	daemon *Daemon
 
 
 	waitLock chan struct{}
 	waitLock chan struct{}
 	Volumes  map[string]string
 	Volumes  map[string]string
@@ -325,7 +325,7 @@ func populateCommand(c *Container, env []string) {
 	)
 	)
 
 
 	en = &execdriver.Network{
 	en = &execdriver.Network{
-		Mtu:       c.runtime.config.Mtu,
+		Mtu:       c.daemon.config.Mtu,
 		Interface: nil,
 		Interface: nil,
 	}
 	}
 
 
@@ -389,7 +389,7 @@ func (container *Container) Start() (err error) {
 	if err := container.initializeNetworking(); err != nil {
 	if err := container.initializeNetworking(); err != nil {
 		return err
 		return err
 	}
 	}
-	container.verifyRuntimeSettings()
+	container.verifyDaemonSettings()
 	if err := prepareVolumesForContainer(container); err != nil {
 	if err := prepareVolumesForContainer(container); err != nil {
 		return err
 		return err
 	}
 	}
@@ -397,7 +397,7 @@ func (container *Container) Start() (err error) {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	env := container.createRuntimeEnvironment(linkedEnv)
+	env := container.createDaemonEnvironment(linkedEnv)
 	// TODO: This is only needed for lxc so we should look for a way to
 	// TODO: This is only needed for lxc so we should look for a way to
 	// remove this dep
 	// remove this dep
 	if err := container.generateEnvConfig(env); err != nil {
 	if err := container.generateEnvConfig(env); err != nil {
@@ -496,11 +496,11 @@ func (container *Container) allocateNetwork() error {
 	var (
 	var (
 		env *engine.Env
 		env *engine.Env
 		err error
 		err error
-		eng = container.runtime.eng
+		eng = container.daemon.eng
 	)
 	)
 
 
 	if container.State.IsGhost() {
 	if container.State.IsGhost() {
-		if container.runtime.config.DisableNetwork {
+		if container.daemon.config.DisableNetwork {
 			env = &engine.Env{}
 			env = &engine.Env{}
 		} else {
 		} else {
 			currentIP := container.NetworkSettings.IPAddress
 			currentIP := container.NetworkSettings.IPAddress
@@ -610,7 +610,7 @@ func (container *Container) releaseNetwork() {
 	if container.Config.NetworkDisabled {
 	if container.Config.NetworkDisabled {
 		return
 		return
 	}
 	}
-	eng := container.runtime.eng
+	eng := container.daemon.eng
 
 
 	eng.Job("release_interface", container.ID).Run()
 	eng.Job("release_interface", container.ID).Run()
 	container.NetworkSettings = &NetworkSettings{}
 	container.NetworkSettings = &NetworkSettings{}
@@ -623,12 +623,12 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
 	)
 	)
 
 
 	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
 	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
-	exitCode, err = container.runtime.Run(container, pipes, callback)
+	exitCode, err = container.daemon.Run(container, pipes, callback)
 	if err != nil {
 	if err != nil {
 		utils.Errorf("Error running container: %s", err)
 		utils.Errorf("Error running container: %s", err)
 	}
 	}
 
 
-	if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() {
+	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
 		container.State.SetStopped(exitCode)
 		container.State.SetStopped(exitCode)
 
 
 		// FIXME: there is a race condition here which causes this to fail during the unit tests.
 		// FIXME: there is a race condition here which causes this to fail during the unit tests.
@@ -651,8 +651,8 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
 		container.stdin, container.stdinPipe = io.Pipe()
 		container.stdin, container.stdinPipe = io.Pipe()
 	}
 	}
 
 
-	if container.runtime != nil && container.runtime.srv != nil {
-		container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
+	if container.daemon != nil && container.daemon.srv != nil {
+		container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
 	}
 	}
 
 
 	close(container.waitLock)
 	close(container.waitLock)
@@ -698,7 +698,7 @@ func (container *Container) KillSig(sig int) error {
 	if !container.State.IsRunning() {
 	if !container.State.IsRunning() {
 		return nil
 		return nil
 	}
 	}
-	return container.runtime.Kill(container, sig)
+	return container.daemon.Kill(container, sig)
 }
 }
 
 
 func (container *Container) Kill() error {
 func (container *Container) Kill() error {
@@ -775,10 +775,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if container.runtime == nil {
+	if container.daemon == nil {
 		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
 		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
 	}
 	}
-	archive, err := container.runtime.Diff(container)
+	archive, err := container.daemon.Diff(container)
 	if err != nil {
 	if err != nil {
 		container.Unmount()
 		container.Unmount()
 		return nil, err
 		return nil, err
@@ -825,22 +825,22 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
 }
 }
 
 
 func (container *Container) Mount() error {
 func (container *Container) Mount() error {
-	return container.runtime.Mount(container)
+	return container.daemon.Mount(container)
 }
 }
 
 
 func (container *Container) Changes() ([]archive.Change, error) {
 func (container *Container) Changes() ([]archive.Change, error) {
-	return container.runtime.Changes(container)
+	return container.daemon.Changes(container)
 }
 }
 
 
 func (container *Container) GetImage() (*image.Image, error) {
 func (container *Container) GetImage() (*image.Image, error) {
-	if container.runtime == nil {
+	if container.daemon == nil {
 		return nil, fmt.Errorf("Can't get image of unregistered container")
 		return nil, fmt.Errorf("Can't get image of unregistered container")
 	}
 	}
-	return container.runtime.graph.Get(container.Image)
+	return container.daemon.graph.Get(container.Image)
 }
 }
 
 
 func (container *Container) Unmount() error {
 func (container *Container) Unmount() error {
-	return container.runtime.Unmount(container)
+	return container.daemon.Unmount(container)
 }
 }
 
 
 func (container *Container) logPath(name string) string {
 func (container *Container) logPath(name string) string {
@@ -893,7 +893,7 @@ func (container *Container) GetSize() (int64, int64) {
 	var (
 	var (
 		sizeRw, sizeRootfs int64
 		sizeRw, sizeRootfs int64
 		err                error
 		err                error
-		driver             = container.runtime.driver
+		driver             = container.daemon.driver
 	)
 	)
 
 
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
@@ -902,7 +902,7 @@ func (container *Container) GetSize() (int64, int64) {
 	}
 	}
 	defer container.Unmount()
 	defer container.Unmount()
 
 
-	if differ, ok := container.runtime.driver.(graphdriver.Differ); ok {
+	if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
 		sizeRw, err = differ.DiffSize(container.ID)
 		sizeRw, err = differ.DiffSize(container.ID)
 		if err != nil {
 		if err != nil {
 			utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
 			utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
@@ -999,28 +999,28 @@ func (container *Container) setupContainerDns() error {
 		return nil
 		return nil
 	}
 	}
 	var (
 	var (
-		config  = container.hostConfig
-		runtime = container.runtime
+		config = container.hostConfig
+		daemon = container.daemon
 	)
 	)
 	resolvConf, err := utils.GetResolvConf()
 	resolvConf, err := utils.GetResolvConf()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	// If custom dns exists, then create a resolv.conf for the container
 	// If custom dns exists, then create a resolv.conf for the container
-	if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 {
+	if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
 		var (
 		var (
 			dns       = utils.GetNameservers(resolvConf)
 			dns       = utils.GetNameservers(resolvConf)
 			dnsSearch = utils.GetSearchDomains(resolvConf)
 			dnsSearch = utils.GetSearchDomains(resolvConf)
 		)
 		)
 		if len(config.Dns) > 0 {
 		if len(config.Dns) > 0 {
 			dns = config.Dns
 			dns = config.Dns
-		} else if len(runtime.config.Dns) > 0 {
-			dns = runtime.config.Dns
+		} else if len(daemon.config.Dns) > 0 {
+			dns = daemon.config.Dns
 		}
 		}
 		if len(config.DnsSearch) > 0 {
 		if len(config.DnsSearch) > 0 {
 			dnsSearch = config.DnsSearch
 			dnsSearch = config.DnsSearch
-		} else if len(runtime.config.DnsSearch) > 0 {
-			dnsSearch = runtime.config.DnsSearch
+		} else if len(daemon.config.DnsSearch) > 0 {
+			dnsSearch = daemon.config.DnsSearch
 		}
 		}
 		container.ResolvConfPath = path.Join(container.root, "resolv.conf")
 		container.ResolvConfPath = path.Join(container.root, "resolv.conf")
 		f, err := os.Create(container.ResolvConfPath)
 		f, err := os.Create(container.ResolvConfPath)
@@ -1045,7 +1045,7 @@ func (container *Container) setupContainerDns() error {
 }
 }
 
 
 func (container *Container) initializeNetworking() error {
 func (container *Container) initializeNetworking() error {
-	if container.runtime.config.DisableNetwork {
+	if container.daemon.config.DisableNetwork {
 		container.Config.NetworkDisabled = true
 		container.Config.NetworkDisabled = true
 		container.buildHostnameAndHostsFiles("127.0.1.1")
 		container.buildHostnameAndHostsFiles("127.0.1.1")
 	} else {
 	} else {
@@ -1058,26 +1058,26 @@ func (container *Container) initializeNetworking() error {
 }
 }
 
 
 // Make sure the config is compatible with the current kernel
 // Make sure the config is compatible with the current kernel
-func (container *Container) verifyRuntimeSettings() {
-	if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit {
+func (container *Container) verifyDaemonSettings() {
+	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
 		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		container.Config.Memory = 0
 		container.Config.Memory = 0
 	}
 	}
-	if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit {
+	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
 		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		container.Config.MemorySwap = -1
 		container.Config.MemorySwap = -1
 	}
 	}
-	if container.runtime.sysInfo.IPv4ForwardingDisabled {
+	if container.daemon.sysInfo.IPv4ForwardingDisabled {
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 	}
 	}
 }
 }
 
 
 func (container *Container) setupLinkedContainers() ([]string, error) {
 func (container *Container) setupLinkedContainers() ([]string, error) {
 	var (
 	var (
-		env     []string
-		runtime = container.runtime
+		env    []string
+		daemon = container.daemon
 	)
 	)
-	children, err := runtime.Children(container.Name)
+	children, err := daemon.Children(container.Name)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -1105,7 +1105,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
 				linkAlias,
 				linkAlias,
 				child.Config.Env,
 				child.Config.Env,
 				child.Config.ExposedPorts,
 				child.Config.ExposedPorts,
-				runtime.eng)
+				daemon.eng)
 
 
 			if err != nil {
 			if err != nil {
 				rollback()
 				rollback()
@@ -1126,7 +1126,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
 	return env, nil
 	return env, nil
 }
 }
 
 
-func (container *Container) createRuntimeEnvironment(linkedEnv []string) []string {
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
 	// Setup environment
 	// Setup environment
 	env := []string{
 	env := []string{
 		"HOME=/",
 		"HOME=/",
@@ -1167,10 +1167,10 @@ func (container *Container) setupWorkingDirectory() error {
 
 
 func (container *Container) startLoggingToDisk() error {
 func (container *Container) startLoggingToDisk() error {
 	// Setup logging of stdout and stderr to disk
 	// Setup logging of stdout and stderr to disk
-	if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
+	if err := container.daemon.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
 		return err
 		return err
 	}
 	}
-	if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
+	if err := container.daemon.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil

+ 1 - 1
runtime/container_unit_test.go → daemon/container_unit_test.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/nat"

+ 172 - 172
runtime/runtime.go → daemon/daemon.go

@@ -1,9 +1,16 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"container/list"
 	"container/list"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
+	"github.com/dotcloud/docker/daemon/execdriver/lxc"
+	"github.com/dotcloud/docker/daemon/graphdriver"
+	_ "github.com/dotcloud/docker/daemon/graphdriver/vfs"
+	_ "github.com/dotcloud/docker/daemon/networkdriver/bridge"
+	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
 	"github.com/dotcloud/docker/daemonconfig"
 	"github.com/dotcloud/docker/daemonconfig"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
@@ -14,13 +21,6 @@ import (
 	"github.com/dotcloud/docker/pkg/selinux"
 	"github.com/dotcloud/docker/pkg/selinux"
 	"github.com/dotcloud/docker/pkg/sysinfo"
 	"github.com/dotcloud/docker/pkg/sysinfo"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime/execdriver"
-	"github.com/dotcloud/docker/runtime/execdriver/execdrivers"
-	"github.com/dotcloud/docker/runtime/execdriver/lxc"
-	"github.com/dotcloud/docker/runtime/graphdriver"
-	_ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
-	_ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
-	"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -44,7 +44,7 @@ var (
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
 )
 )
 
 
-type Runtime struct {
+type Daemon struct {
 	repository     string
 	repository     string
 	sysInitPath    string
 	sysInitPath    string
 	containers     *list.List
 	containers     *list.List
@@ -76,17 +76,17 @@ func remountPrivate(mountPoint string) error {
 	return mount.ForceMount("", mountPoint, "none", "private")
 	return mount.ForceMount("", mountPoint, "none", "private")
 }
 }
 
 
-// List returns an array of all containers registered in the runtime.
-func (runtime *Runtime) List() []*Container {
+// List returns an array of all containers registered in the daemon.
+func (daemon *Daemon) List() []*Container {
 	containers := new(History)
 	containers := new(History)
-	for e := runtime.containers.Front(); e != nil; e = e.Next() {
+	for e := daemon.containers.Front(); e != nil; e = e.Next() {
 		containers.Add(e.Value.(*Container))
 		containers.Add(e.Value.(*Container))
 	}
 	}
 	return *containers
 	return *containers
 }
 }
 
 
-func (runtime *Runtime) getContainerElement(id string) *list.Element {
-	for e := runtime.containers.Front(); e != nil; e = e.Next() {
+func (daemon *Daemon) getContainerElement(id string) *list.Element {
+	for e := daemon.containers.Front(); e != nil; e = e.Next() {
 		container := e.Value.(*Container)
 		container := e.Value.(*Container)
 		if container.ID == id {
 		if container.ID == id {
 			return e
 			return e
@@ -97,17 +97,17 @@ func (runtime *Runtime) getContainerElement(id string) *list.Element {
 
 
 // Get looks for a container by the specified ID or name, and returns it.
 // Get looks for a container by the specified ID or name, and returns it.
 // If the container is not found, or if an error occurs, nil is returned.
 // If the container is not found, or if an error occurs, nil is returned.
-func (runtime *Runtime) Get(name string) *Container {
-	if c, _ := runtime.GetByName(name); c != nil {
+func (daemon *Daemon) Get(name string) *Container {
+	if c, _ := daemon.GetByName(name); c != nil {
 		return c
 		return c
 	}
 	}
 
 
-	id, err := runtime.idIndex.Get(name)
+	id, err := daemon.idIndex.Get(name)
 	if err != nil {
 	if err != nil {
 		return nil
 		return nil
 	}
 	}
 
 
-	e := runtime.getContainerElement(id)
+	e := daemon.getContainerElement(id)
 	if e == nil {
 	if e == nil {
 		return nil
 		return nil
 	}
 	}
@@ -116,18 +116,18 @@ func (runtime *Runtime) Get(name string) *Container {
 
 
 // Exists returns a true if a container of the specified ID or name exists,
 // Exists returns a true if a container of the specified ID or name exists,
 // false otherwise.
 // false otherwise.
-func (runtime *Runtime) Exists(id string) bool {
-	return runtime.Get(id) != nil
+func (daemon *Daemon) Exists(id string) bool {
+	return daemon.Get(id) != nil
 }
 }
 
 
-func (runtime *Runtime) containerRoot(id string) string {
-	return path.Join(runtime.repository, id)
+func (daemon *Daemon) containerRoot(id string) string {
+	return path.Join(daemon.repository, id)
 }
 }
 
 
 // Load reads the contents of a container from disk
 // Load reads the contents of a container from disk
 // This is typically done at startup.
 // This is typically done at startup.
-func (runtime *Runtime) load(id string) (*Container, error) {
-	container := &Container{root: runtime.containerRoot(id)}
+func (daemon *Daemon) load(id string) (*Container, error) {
+	container := &Container{root: daemon.containerRoot(id)}
 	if err := container.FromDisk(); err != nil {
 	if err := container.FromDisk(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -140,19 +140,19 @@ func (runtime *Runtime) load(id string) (*Container, error) {
 	return container, nil
 	return container, nil
 }
 }
 
 
-// Register makes a container object usable by the runtime as <container.ID>
-func (runtime *Runtime) Register(container *Container) error {
-	if container.runtime != nil || runtime.Exists(container.ID) {
+// Register makes a container object usable by the daemon as <container.ID>
+func (daemon *Daemon) Register(container *Container) error {
+	if container.daemon != nil || daemon.Exists(container.ID) {
 		return fmt.Errorf("Container is already loaded")
 		return fmt.Errorf("Container is already loaded")
 	}
 	}
 	if err := validateID(container.ID); err != nil {
 	if err := validateID(container.ID); err != nil {
 		return err
 		return err
 	}
 	}
-	if err := runtime.ensureName(container); err != nil {
+	if err := daemon.ensureName(container); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	container.runtime = runtime
+	container.daemon = daemon
 
 
 	// Attach to stdout and stderr
 	// Attach to stdout and stderr
 	container.stderr = utils.NewWriteBroadcaster()
 	container.stderr = utils.NewWriteBroadcaster()
@@ -164,8 +164,8 @@ func (runtime *Runtime) Register(container *Container) error {
 		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 	}
 	// done
 	// done
-	runtime.containers.PushBack(container)
-	runtime.idIndex.Add(container.ID)
+	daemon.containers.PushBack(container)
+	daemon.idIndex.Add(container.ID)
 
 
 	// FIXME: if the container is supposed to be running but is not, auto restart it?
 	// FIXME: if the container is supposed to be running but is not, auto restart it?
 	//        if so, then we need to restart monitor and init a new lock
 	//        if so, then we need to restart monitor and init a new lock
@@ -192,7 +192,7 @@ func (runtime *Runtime) Register(container *Container) error {
 				if err != nil {
 				if err != nil {
 					utils.Debugf("cannot find existing process for %d", existingPid)
 					utils.Debugf("cannot find existing process for %d", existingPid)
 				}
 				}
-				runtime.execDriver.Terminate(cmd)
+				daemon.execDriver.Terminate(cmd)
 			}
 			}
 			if err := container.Unmount(); err != nil {
 			if err := container.Unmount(); err != nil {
 				utils.Debugf("ghost unmount error %s", err)
 				utils.Debugf("ghost unmount error %s", err)
@@ -202,10 +202,10 @@ func (runtime *Runtime) Register(container *Container) error {
 			}
 			}
 		}
 		}
 
 
-		info := runtime.execDriver.Info(container.ID)
+		info := daemon.execDriver.Info(container.ID)
 		if !info.IsRunning() {
 		if !info.IsRunning() {
 			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
 			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
-			if runtime.config.AutoRestart {
+			if daemon.config.AutoRestart {
 				utils.Debugf("Restarting")
 				utils.Debugf("Restarting")
 				if err := container.Unmount(); err != nil {
 				if err := container.Unmount(); err != nil {
 					utils.Debugf("restart unmount error %s", err)
 					utils.Debugf("restart unmount error %s", err)
@@ -234,9 +234,9 @@ func (runtime *Runtime) Register(container *Container) error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) ensureName(container *Container) error {
+func (daemon *Daemon) ensureName(container *Container) error {
 	if container.Name == "" {
 	if container.Name == "" {
-		name, err := generateRandomName(runtime)
+		name, err := generateRandomName(daemon)
 		if err != nil {
 		if err != nil {
 			name = utils.TruncateID(container.ID)
 			name = utils.TruncateID(container.ID)
 		}
 		}
@@ -245,8 +245,8 @@ func (runtime *Runtime) ensureName(container *Container) error {
 		if err := container.ToDisk(); err != nil {
 		if err := container.ToDisk(); err != nil {
 			utils.Debugf("Error saving container name %s", err)
 			utils.Debugf("Error saving container name %s", err)
 		}
 		}
-		if !runtime.containerGraph.Exists(name) {
-			if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
+		if !daemon.containerGraph.Exists(name) {
+			if _, err := daemon.containerGraph.Set(name, container.ID); err != nil {
 				utils.Debugf("Setting default id - %s", err)
 				utils.Debugf("Setting default id - %s", err)
 			}
 			}
 		}
 		}
@@ -254,7 +254,7 @@ func (runtime *Runtime) ensureName(container *Container) error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
+func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -263,13 +263,13 @@ func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream strin
 	return nil
 	return nil
 }
 }
 
 
-// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem.
-func (runtime *Runtime) Destroy(container *Container) error {
+// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
+func (daemon *Daemon) Destroy(container *Container) error {
 	if container == nil {
 	if container == nil {
 		return fmt.Errorf("The given container is <nil>")
 		return fmt.Errorf("The given container is <nil>")
 	}
 	}
 
 
-	element := runtime.getContainerElement(container.ID)
+	element := daemon.getContainerElement(container.ID)
 	if element == nil {
 	if element == nil {
 		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
 		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
 	}
 	}
@@ -278,42 +278,42 @@ func (runtime *Runtime) Destroy(container *Container) error {
 		return err
 		return err
 	}
 	}
 
 
-	if err := runtime.driver.Remove(container.ID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
+	if err := daemon.driver.Remove(container.ID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
 	}
 	}
 
 
 	initID := fmt.Sprintf("%s-init", container.ID)
 	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := runtime.driver.Remove(initID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err)
+	if err := daemon.driver.Remove(initID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
 	}
 	}
 
 
-	if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
+	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
 		utils.Debugf("Unable to remove container from link graph: %s", err)
 		utils.Debugf("Unable to remove container from link graph: %s", err)
 	}
 	}
 
 
 	// Deregister the container before removing its directory, to avoid race conditions
 	// Deregister the container before removing its directory, to avoid race conditions
-	runtime.idIndex.Delete(container.ID)
-	runtime.containers.Remove(element)
+	daemon.idIndex.Delete(container.ID)
+	daemon.containers.Remove(element)
 	if err := os.RemoveAll(container.root); err != nil {
 	if err := os.RemoveAll(container.root); err != nil {
 		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
 		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) restore() error {
+func (daemon *Daemon) restore() error {
 	if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 	if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 		fmt.Printf("Loading containers: ")
 		fmt.Printf("Loading containers: ")
 	}
 	}
-	dir, err := ioutil.ReadDir(runtime.repository)
+	dir, err := ioutil.ReadDir(daemon.repository)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	containers := make(map[string]*Container)
 	containers := make(map[string]*Container)
-	currentDriver := runtime.driver.String()
+	currentDriver := daemon.driver.String()
 
 
 	for _, v := range dir {
 	for _, v := range dir {
 		id := v.Name()
 		id := v.Name()
-		container, err := runtime.load(id)
+		container, err := daemon.load(id)
 		if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 		if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 			fmt.Print(".")
 			fmt.Print(".")
 		}
 		}
@@ -332,12 +332,12 @@ func (runtime *Runtime) restore() error {
 	}
 	}
 
 
 	register := func(container *Container) {
 	register := func(container *Container) {
-		if err := runtime.Register(container); err != nil {
+		if err := daemon.Register(container); err != nil {
 			utils.Debugf("Failed to register container %s: %s", container.ID, err)
 			utils.Debugf("Failed to register container %s: %s", container.ID, err)
 		}
 		}
 	}
 	}
 
 
-	if entities := runtime.containerGraph.List("/", -1); entities != nil {
+	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 		for _, p := range entities.Paths() {
 			if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 			if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
 				fmt.Print(".")
 				fmt.Print(".")
@@ -353,12 +353,12 @@ func (runtime *Runtime) restore() error {
 	// Any containers that are left over do not exist in the graph
 	// Any containers that are left over do not exist in the graph
 	for _, container := range containers {
 	for _, container := range containers {
 		// Try to set the default name for a container if it exists prior to links
 		// Try to set the default name for a container if it exists prior to links
-		container.Name, err = generateRandomName(runtime)
+		container.Name, err = generateRandomName(daemon)
 		if err != nil {
 		if err != nil {
 			container.Name = utils.TruncateID(container.ID)
 			container.Name = utils.TruncateID(container.ID)
 		}
 		}
 
 
-		if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
+		if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil {
 			utils.Debugf("Setting default id - %s", err)
 			utils.Debugf("Setting default id - %s", err)
 		}
 		}
 		register(container)
 		register(container)
@@ -372,38 +372,38 @@ func (runtime *Runtime) restore() error {
 }
 }
 
 
 // Create creates a new container from the given configuration with a given name.
 // Create creates a new container from the given configuration with a given name.
-func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) {
+func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
 	var (
 	var (
 		container *Container
 		container *Container
 		warnings  []string
 		warnings  []string
 	)
 	)
 
 
-	img, err := runtime.repositories.LookupImage(config.Image)
+	img, err := daemon.repositories.LookupImage(config.Image)
 	if err != nil {
 	if err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if err := runtime.checkImageDepth(img); err != nil {
+	if err := daemon.checkImageDepth(img); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if warnings, err = runtime.mergeAndVerifyConfig(config, img); err != nil {
+	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if container, err = runtime.newContainer(name, config, img); err != nil {
+	if container, err = daemon.newContainer(name, config, img); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if err := runtime.createRootfs(container, img); err != nil {
+	if err := daemon.createRootfs(container, img); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 	if err := container.ToDisk(); err != nil {
 	if err := container.ToDisk(); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if err := runtime.Register(container); err != nil {
+	if err := daemon.Register(container); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 	return container, warnings, nil
 	return container, warnings, nil
 }
 }
 
 
-func (runtime *Runtime) checkImageDepth(img *image.Image) error {
+func (daemon *Daemon) checkImageDepth(img *image.Image) error {
 	// We add 2 layers to the depth because the container's rw and
 	// We add 2 layers to the depth because the container's rw and
 	// init layer add to the restriction
 	// init layer add to the restriction
 	depth, err := img.Depth()
 	depth, err := img.Depth()
@@ -416,7 +416,7 @@ func (runtime *Runtime) checkImageDepth(img *image.Image) error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
+func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 	if config != nil {
 	if config != nil {
 		if config.PortSpecs != nil {
 		if config.PortSpecs != nil {
 			for _, p := range config.PortSpecs {
 			for _, p := range config.PortSpecs {
@@ -429,9 +429,9 @@ func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
 	return false
 	return false
 }
 }
 
 
-func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
+func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
 	warnings := []string{}
 	warnings := []string{}
-	if runtime.checkDeprecatedExpose(img.Config) || runtime.checkDeprecatedExpose(config) {
+	if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) {
 		warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
 		warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
 	}
 	}
 	if img.Config != nil {
 	if img.Config != nil {
@@ -445,14 +445,14 @@ func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *imag
 	return warnings, nil
 	return warnings, nil
 }
 }
 
 
-func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
+func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
 	var (
 	var (
 		err error
 		err error
 		id  = utils.GenerateRandomID()
 		id  = utils.GenerateRandomID()
 	)
 	)
 
 
 	if name == "" {
 	if name == "" {
-		name, err = generateRandomName(runtime)
+		name, err = generateRandomName(daemon)
 		if err != nil {
 		if err != nil {
 			name = utils.TruncateID(id)
 			name = utils.TruncateID(id)
 		}
 		}
@@ -465,19 +465,19 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
 		name = "/" + name
 		name = "/" + name
 	}
 	}
 	// Set the enitity in the graph using the default name specified
 	// Set the enitity in the graph using the default name specified
-	if _, err := runtime.containerGraph.Set(name, id); err != nil {
+	if _, err := daemon.containerGraph.Set(name, id); err != nil {
 		if !graphdb.IsNonUniqueNameError(err) {
 		if !graphdb.IsNonUniqueNameError(err) {
 			return "", "", err
 			return "", "", err
 		}
 		}
 
 
-		conflictingContainer, err := runtime.GetByName(name)
+		conflictingContainer, err := daemon.GetByName(name)
 		if err != nil {
 		if err != nil {
 			if strings.Contains(err.Error(), "Could not find entity") {
 			if strings.Contains(err.Error(), "Could not find entity") {
 				return "", "", err
 				return "", "", err
 			}
 			}
 
 
 			// Remove name and continue starting the container
 			// Remove name and continue starting the container
-			if err := runtime.containerGraph.Delete(name); err != nil {
+			if err := daemon.containerGraph.Delete(name); err != nil {
 				return "", "", err
 				return "", "", err
 			}
 			}
 		} else {
 		} else {
@@ -490,7 +490,7 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
 	return id, name, nil
 	return id, name, nil
 }
 }
 
 
-func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
+func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) {
 	// Generate default hostname
 	// Generate default hostname
 	// FIXME: the lxc template no longer needs to set a default hostname
 	// FIXME: the lxc template no longer needs to set a default hostname
 	if config.Hostname == "" {
 	if config.Hostname == "" {
@@ -498,7 +498,7 @@ func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
 	}
 	}
 }
 }
 
 
-func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
+func (daemon *Daemon) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
 	var (
 	var (
 		entrypoint string
 		entrypoint string
 		args       []string
 		args       []string
@@ -513,18 +513,18 @@ func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string,
 	return entrypoint, args
 	return entrypoint, args
 }
 }
 
 
-func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
+func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
 	var (
 	var (
 		id  string
 		id  string
 		err error
 		err error
 	)
 	)
-	id, name, err = runtime.generateIdAndName(name)
+	id, name, err = daemon.generateIdAndName(name)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	runtime.generateHostname(id, config)
-	entrypoint, args := runtime.getEntrypointAndArgs(config)
+	daemon.generateHostname(id, config)
+	entrypoint, args := daemon.getEntrypointAndArgs(config)
 
 
 	container := &Container{
 	container := &Container{
 		// FIXME: we should generate the ID here instead of receiving it as an argument
 		// FIXME: we should generate the ID here instead of receiving it as an argument
@@ -537,34 +537,34 @@ func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img
 		Image:           img.ID, // Always use the resolved image id
 		Image:           img.ID, // Always use the resolved image id
 		NetworkSettings: &NetworkSettings{},
 		NetworkSettings: &NetworkSettings{},
 		Name:            name,
 		Name:            name,
-		Driver:          runtime.driver.String(),
-		ExecDriver:      runtime.execDriver.Name(),
+		Driver:          daemon.driver.String(),
+		ExecDriver:      daemon.execDriver.Name(),
 	}
 	}
-	container.root = runtime.containerRoot(container.ID)
+	container.root = daemon.containerRoot(container.ID)
 	return container, nil
 	return container, nil
 }
 }
 
 
-func (runtime *Runtime) createRootfs(container *Container, img *image.Image) error {
+func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error {
 	// Step 1: create the container directory.
 	// Step 1: create the container directory.
 	// This doubles as a barrier to avoid race conditions.
 	// This doubles as a barrier to avoid race conditions.
 	if err := os.Mkdir(container.root, 0700); err != nil {
 	if err := os.Mkdir(container.root, 0700); err != nil {
 		return err
 		return err
 	}
 	}
 	initID := fmt.Sprintf("%s-init", container.ID)
 	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := runtime.driver.Create(initID, img.ID, ""); err != nil {
+	if err := daemon.driver.Create(initID, img.ID, ""); err != nil {
 		return err
 		return err
 	}
 	}
-	initPath, err := runtime.driver.Get(initID)
+	initPath, err := daemon.driver.Get(initID)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	defer runtime.driver.Put(initID)
+	defer daemon.driver.Put(initID)
 
 
 	if err := graph.SetupInitLayer(initPath); err != nil {
 	if err := graph.SetupInitLayer(initPath); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if err := runtime.driver.Create(container.ID, initID, ""); err != nil {
+	if err := daemon.driver.Create(container.ID, initID, ""); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -572,7 +572,7 @@ func (runtime *Runtime) createRootfs(container *Container, img *image.Image) err
 
 
 // Commit creates a new filesystem image from the current state of a container.
 // Commit creates a new filesystem image from the current state of a container.
 // The image can optionally be tagged into a repository
 // The image can optionally be tagged into a repository
-func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
+func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
 	// FIXME: freeze the container before copying it to avoid data corruption?
 	// FIXME: freeze the container before copying it to avoid data corruption?
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return nil, err
 		return nil, err
@@ -595,13 +595,13 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
 		containerImage = container.Image
 		containerImage = container.Image
 		containerConfig = container.Config
 		containerConfig = container.Config
 	}
 	}
-	img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
+	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	// Register the image if needed
 	// Register the image if needed
 	if repository != "" {
 	if repository != "" {
-		if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil {
+		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
 			return img, err
 			return img, err
 		}
 		}
 	}
 	}
@@ -618,31 +618,31 @@ func GetFullContainerName(name string) (string, error) {
 	return name, nil
 	return name, nil
 }
 }
 
 
-func (runtime *Runtime) GetByName(name string) (*Container, error) {
+func (daemon *Daemon) GetByName(name string) (*Container, error) {
 	fullName, err := GetFullContainerName(name)
 	fullName, err := GetFullContainerName(name)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	entity := runtime.containerGraph.Get(fullName)
+	entity := daemon.containerGraph.Get(fullName)
 	if entity == nil {
 	if entity == nil {
 		return nil, fmt.Errorf("Could not find entity for %s", name)
 		return nil, fmt.Errorf("Could not find entity for %s", name)
 	}
 	}
-	e := runtime.getContainerElement(entity.ID())
+	e := daemon.getContainerElement(entity.ID())
 	if e == nil {
 	if e == nil {
 		return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
 		return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
 	}
 	}
 	return e.Value.(*Container), nil
 	return e.Value.(*Container), nil
 }
 }
 
 
-func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
+func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
 	name, err := GetFullContainerName(name)
 	name, err := GetFullContainerName(name)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	children := make(map[string]*Container)
 	children := make(map[string]*Container)
 
 
-	err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
-		c := runtime.Get(e.ID())
+	err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
+		c := daemon.Get(e.ID())
 		if c == nil {
 		if c == nil {
 			return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
 			return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
 		}
 		}
@@ -656,25 +656,25 @@ func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
 	return children, nil
 	return children, nil
 }
 }
 
 
-func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error {
+func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error {
 	fullName := path.Join(parent.Name, alias)
 	fullName := path.Join(parent.Name, alias)
-	if !runtime.containerGraph.Exists(fullName) {
-		_, err := runtime.containerGraph.Set(fullName, child.ID)
+	if !daemon.containerGraph.Exists(fullName) {
+		_, err := daemon.containerGraph.Set(fullName, child.ID)
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 // FIXME: harmonize with NewGraph()
 // FIXME: harmonize with NewGraph()
-func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
-	runtime, err := NewRuntimeFromDirectory(config, eng)
+func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
+	daemon, err := NewDaemonFromDirectory(config, eng)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return runtime, nil
+	return daemon, nil
 }
 }
 
 
-func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
+func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
 	if !config.EnableSelinuxSupport {
 	if !config.EnableSelinuxSupport {
 		selinux.SetDisabled()
 		selinux.SetDisabled()
 	}
 	}
@@ -693,9 +693,9 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	runtimeRepo := path.Join(config.Root, "containers")
+	daemonRepo := path.Join(config.Root, "containers")
 
 
-	if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
+	if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
@@ -774,8 +774,8 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	runtime := &Runtime{
-		repository:     runtimeRepo,
+	daemon := &Daemon{
+		repository:     daemonRepo,
 		containers:     list.New(),
 		containers:     list.New(),
 		graph:          g,
 		graph:          g,
 		repositories:   repositories,
 		repositories:   repositories,
@@ -790,19 +790,19 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
 		eng:            eng,
 		eng:            eng,
 	}
 	}
 
 
-	if err := runtime.checkLocaldns(); err != nil {
+	if err := daemon.checkLocaldns(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := runtime.restore(); err != nil {
+	if err := daemon.restore(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return runtime, nil
+	return daemon, nil
 }
 }
 
 
-func (runtime *Runtime) shutdown() error {
+func (daemon *Daemon) shutdown() error {
 	group := sync.WaitGroup{}
 	group := sync.WaitGroup{}
 	utils.Debugf("starting clean shutdown of all containers...")
 	utils.Debugf("starting clean shutdown of all containers...")
-	for _, container := range runtime.List() {
+	for _, container := range daemon.List() {
 		c := container
 		c := container
 		if c.State.IsRunning() {
 		if c.State.IsRunning() {
 			utils.Debugf("stopping %s", c.ID)
 			utils.Debugf("stopping %s", c.ID)
@@ -823,22 +823,22 @@ func (runtime *Runtime) shutdown() error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) Close() error {
+func (daemon *Daemon) Close() error {
 	errorsStrings := []string{}
 	errorsStrings := []string{}
-	if err := runtime.shutdown(); err != nil {
-		utils.Errorf("runtime.shutdown(): %s", err)
+	if err := daemon.shutdown(); err != nil {
+		utils.Errorf("daemon.shutdown(): %s", err)
 		errorsStrings = append(errorsStrings, err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 	}
 	}
 	if err := portallocator.ReleaseAll(); err != nil {
 	if err := portallocator.ReleaseAll(); err != nil {
 		utils.Errorf("portallocator.ReleaseAll(): %s", err)
 		utils.Errorf("portallocator.ReleaseAll(): %s", err)
 		errorsStrings = append(errorsStrings, err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 	}
 	}
-	if err := runtime.driver.Cleanup(); err != nil {
-		utils.Errorf("runtime.driver.Cleanup(): %s", err.Error())
+	if err := daemon.driver.Cleanup(); err != nil {
+		utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 	}
 	}
-	if err := runtime.containerGraph.Close(); err != nil {
-		utils.Errorf("runtime.containerGraph.Close(): %s", err.Error())
+	if err := daemon.containerGraph.Close(); err != nil {
+		utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 		errorsStrings = append(errorsStrings, err.Error())
 	}
 	}
 	if len(errorsStrings) > 0 {
 	if len(errorsStrings) > 0 {
@@ -847,55 +847,55 @@ func (runtime *Runtime) Close() error {
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) Mount(container *Container) error {
-	dir, err := runtime.driver.Get(container.ID)
+func (daemon *Daemon) Mount(container *Container) error {
+	dir, err := daemon.driver.Get(container.ID)
 	if err != nil {
 	if err != nil {
-		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err)
+		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
 	}
 	}
 	if container.basefs == "" {
 	if container.basefs == "" {
 		container.basefs = dir
 		container.basefs = dir
 	} else if container.basefs != dir {
 	} else if container.basefs != dir {
 		return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
 		return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
-			runtime.driver, container.ID, container.basefs, dir)
+			daemon.driver, container.ID, container.basefs, dir)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) Unmount(container *Container) error {
-	runtime.driver.Put(container.ID)
+func (daemon *Daemon) Unmount(container *Container) error {
+	daemon.driver.Put(container.ID)
 	return nil
 	return nil
 }
 }
 
 
-func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) {
-	if differ, ok := runtime.driver.(graphdriver.Differ); ok {
+func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
+	if differ, ok := daemon.driver.(graphdriver.Differ); ok {
 		return differ.Changes(container.ID)
 		return differ.Changes(container.ID)
 	}
 	}
-	cDir, err := runtime.driver.Get(container.ID)
+	cDir, err := daemon.driver.Get(container.ID)
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
+		return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
 	}
 	}
-	defer runtime.driver.Put(container.ID)
-	initDir, err := runtime.driver.Get(container.ID + "-init")
+	defer daemon.driver.Put(container.ID)
+	initDir, err := daemon.driver.Get(container.ID + "-init")
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
+		return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
 	}
 	}
-	defer runtime.driver.Put(container.ID + "-init")
+	defer daemon.driver.Put(container.ID + "-init")
 	return archive.ChangesDirs(cDir, initDir)
 	return archive.ChangesDirs(cDir, initDir)
 }
 }
 
 
-func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
-	if differ, ok := runtime.driver.(graphdriver.Differ); ok {
+func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
+	if differ, ok := daemon.driver.(graphdriver.Differ); ok {
 		return differ.Diff(container.ID)
 		return differ.Diff(container.ID)
 	}
 	}
 
 
-	changes, err := runtime.Changes(container)
+	changes, err := daemon.Changes(container)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	cDir, err := runtime.driver.Get(container.ID)
+	cDir, err := daemon.driver.Get(container.ID)
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
+		return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
 	}
 	}
 
 
 	archive, err := archive.ExportChanges(cDir, changes)
 	archive, err := archive.ExportChanges(cDir, changes)
@@ -904,26 +904,26 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
 	}
 	}
 	return utils.NewReadCloserWrapper(archive, func() error {
 	return utils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
 		err := archive.Close()
-		runtime.driver.Put(container.ID)
+		daemon.driver.Put(container.ID)
 		return err
 		return err
 	}), nil
 	}), nil
 }
 }
 
 
-func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
-	return runtime.execDriver.Run(c.command, pipes, startCallback)
+func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+	return daemon.execDriver.Run(c.command, pipes, startCallback)
 }
 }
 
 
-func (runtime *Runtime) Kill(c *Container, sig int) error {
-	return runtime.execDriver.Kill(c.command, sig)
+func (daemon *Daemon) Kill(c *Container, sig int) error {
+	return daemon.execDriver.Kill(c.command, sig)
 }
 }
 
 
 // Nuke kills all containers then removes all content
 // Nuke kills all containers then removes all content
 // from the content root, including images, volumes and
 // from the content root, including images, volumes and
 // container filesystems.
 // container filesystems.
-// Again: this will remove your entire docker runtime!
-func (runtime *Runtime) Nuke() error {
+// Again: this will remove your entire docker daemon!
+func (daemon *Daemon) Nuke() error {
 	var wg sync.WaitGroup
 	var wg sync.WaitGroup
-	for _, container := range runtime.List() {
+	for _, container := range daemon.List() {
 		wg.Add(1)
 		wg.Add(1)
 		go func(c *Container) {
 		go func(c *Container) {
 			c.Kill()
 			c.Kill()
@@ -931,63 +931,63 @@ func (runtime *Runtime) Nuke() error {
 		}(container)
 		}(container)
 	}
 	}
 	wg.Wait()
 	wg.Wait()
-	runtime.Close()
+	daemon.Close()
 
 
-	return os.RemoveAll(runtime.config.Root)
+	return os.RemoveAll(daemon.config.Root)
 }
 }
 
 
 // FIXME: this is a convenience function for integration tests
 // FIXME: this is a convenience function for integration tests
-// which need direct access to runtime.graph.
+// which need direct access to daemon.graph.
 // Once the tests switch to using engine and jobs, this method
 // Once the tests switch to using engine and jobs, this method
 // can go away.
 // can go away.
-func (runtime *Runtime) Graph() *graph.Graph {
-	return runtime.graph
+func (daemon *Daemon) Graph() *graph.Graph {
+	return daemon.graph
 }
 }
 
 
-func (runtime *Runtime) Repositories() *graph.TagStore {
-	return runtime.repositories
+func (daemon *Daemon) Repositories() *graph.TagStore {
+	return daemon.repositories
 }
 }
 
 
-func (runtime *Runtime) Config() *daemonconfig.Config {
-	return runtime.config
+func (daemon *Daemon) Config() *daemonconfig.Config {
+	return daemon.config
 }
 }
 
 
-func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
-	return runtime.sysInfo
+func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo {
+	return daemon.sysInfo
 }
 }
 
 
-func (runtime *Runtime) SystemInitPath() string {
-	return runtime.sysInitPath
+func (daemon *Daemon) SystemInitPath() string {
+	return daemon.sysInitPath
 }
 }
 
 
-func (runtime *Runtime) GraphDriver() graphdriver.Driver {
-	return runtime.driver
+func (daemon *Daemon) GraphDriver() graphdriver.Driver {
+	return daemon.driver
 }
 }
 
 
-func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
-	return runtime.execDriver
+func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
+	return daemon.execDriver
 }
 }
 
 
-func (runtime *Runtime) Volumes() *graph.Graph {
-	return runtime.volumes
+func (daemon *Daemon) Volumes() *graph.Graph {
+	return daemon.volumes
 }
 }
 
 
-func (runtime *Runtime) ContainerGraph() *graphdb.Database {
-	return runtime.containerGraph
+func (daemon *Daemon) ContainerGraph() *graphdb.Database {
+	return daemon.containerGraph
 }
 }
 
 
-func (runtime *Runtime) SetServer(server Server) {
-	runtime.srv = server
+func (daemon *Daemon) SetServer(server Server) {
+	daemon.srv = server
 }
 }
 
 
-func (runtime *Runtime) checkLocaldns() error {
+func (daemon *Daemon) checkLocaldns() error {
 	resolvConf, err := utils.GetResolvConf()
 	resolvConf, err := utils.GetResolvConf()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	if len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
+	if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
 		log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
 		log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
-		runtime.config.Dns = DefaultDns
+		daemon.config.Dns = DefaultDns
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 3 - 3
runtime/runtime_aufs.go → daemon/daemon_aufs.go

@@ -1,11 +1,11 @@
 // +build !exclude_graphdriver_aufs
 // +build !exclude_graphdriver_aufs
 
 
-package runtime
+package daemon
 
 
 import (
 import (
+	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver/aufs"
 	"github.com/dotcloud/docker/graph"
 	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/runtime/graphdriver"
-	"github.com/dotcloud/docker/runtime/graphdriver/aufs"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 )
 )
 
 

+ 7 - 0
daemon/daemon_btrfs.go

@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_btrfs
+
+package daemon
+
+import (
+	_ "github.com/dotcloud/docker/daemon/graphdriver/btrfs"
+)

+ 7 - 0
daemon/daemon_devicemapper.go

@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_devicemapper
+
+package daemon
+
+import (
+	_ "github.com/dotcloud/docker/daemon/graphdriver/devmapper"
+)

+ 2 - 2
runtime/runtime_no_aufs.go → daemon/daemon_no_aufs.go

@@ -1,9 +1,9 @@
 // +build exclude_graphdriver_aufs
 // +build exclude_graphdriver_aufs
 
 
-package runtime
+package daemon
 
 
 import (
 import (
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 )
 )
 
 
 func migrateIfAufs(driver graphdriver.Driver, root string) error {
 func migrateIfAufs(driver graphdriver.Driver, root string) error {

+ 0 - 0
runtime/execdriver/MAINTAINERS → daemon/execdriver/MAINTAINERS


+ 0 - 0
runtime/execdriver/driver.go → daemon/execdriver/driver.go


+ 3 - 3
runtime/execdriver/execdrivers/execdrivers.go → daemon/execdriver/execdrivers/execdrivers.go

@@ -2,10 +2,10 @@ package execdrivers
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver/lxc"
+	"github.com/dotcloud/docker/daemon/execdriver/native"
 	"github.com/dotcloud/docker/pkg/sysinfo"
 	"github.com/dotcloud/docker/pkg/sysinfo"
-	"github.com/dotcloud/docker/runtime/execdriver"
-	"github.com/dotcloud/docker/runtime/execdriver/lxc"
-	"github.com/dotcloud/docker/runtime/execdriver/native"
 	"path"
 	"path"
 )
 )
 
 

+ 1 - 1
runtime/execdriver/lxc/driver.go → daemon/execdriver/lxc/driver.go

@@ -2,9 +2,9 @@ package lxc
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"github.com/dotcloud/docker/pkg/cgroups"
 	"github.com/dotcloud/docker/pkg/cgroups"
 	"github.com/dotcloud/docker/pkg/label"
 	"github.com/dotcloud/docker/pkg/label"
-	"github.com/dotcloud/docker/runtime/execdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"

+ 0 - 0
runtime/execdriver/lxc/info.go → daemon/execdriver/lxc/info.go


+ 0 - 0
runtime/execdriver/lxc/info_test.go → daemon/execdriver/lxc/info_test.go


+ 1 - 1
runtime/execdriver/lxc/init.go → daemon/execdriver/lxc/init.go

@@ -3,9 +3,9 @@ package lxc
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"github.com/dotcloud/docker/pkg/netlink"
 	"github.com/dotcloud/docker/pkg/netlink"
 	"github.com/dotcloud/docker/pkg/user"
 	"github.com/dotcloud/docker/pkg/user"
-	"github.com/dotcloud/docker/runtime/execdriver"
 	"github.com/syndtr/gocapability/capability"
 	"github.com/syndtr/gocapability/capability"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"

+ 0 - 0
runtime/execdriver/lxc/lxc_init_linux.go → daemon/execdriver/lxc/lxc_init_linux.go


+ 0 - 0
runtime/execdriver/lxc/lxc_init_unsupported.go → daemon/execdriver/lxc/lxc_init_unsupported.go


+ 1 - 1
runtime/execdriver/lxc/lxc_template.go → daemon/execdriver/lxc/lxc_template.go

@@ -1,8 +1,8 @@
 package lxc
 package lxc
 
 
 import (
 import (
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"github.com/dotcloud/docker/pkg/label"
 	"github.com/dotcloud/docker/pkg/label"
-	"github.com/dotcloud/docker/runtime/execdriver"
 	"strings"
 	"strings"
 	"text/template"
 	"text/template"
 )
 )

+ 1 - 1
runtime/execdriver/lxc/lxc_template_unit_test.go → daemon/execdriver/lxc/lxc_template_unit_test.go

@@ -3,7 +3,7 @@ package lxc
 import (
 import (
 	"bufio"
 	"bufio"
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"io/ioutil"
 	"io/ioutil"
 	"math/rand"
 	"math/rand"
 	"os"
 	"os"

+ 0 - 0
runtime/execdriver/native/configuration/parse.go → daemon/execdriver/native/configuration/parse.go


+ 1 - 1
runtime/execdriver/native/configuration/parse_test.go → daemon/execdriver/native/configuration/parse_test.go

@@ -1,7 +1,7 @@
 package configuration
 package configuration
 
 
 import (
 import (
-	"github.com/dotcloud/docker/runtime/execdriver/native/template"
+	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 	"testing"
 	"testing"
 )
 )
 
 

+ 3 - 3
runtime/execdriver/native/create.go → daemon/execdriver/native/create.go

@@ -4,12 +4,12 @@ import (
 	"fmt"
 	"fmt"
 	"os"
 	"os"
 
 
+	"github.com/dotcloud/docker/daemon/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
+	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 	"github.com/dotcloud/docker/pkg/label"
 	"github.com/dotcloud/docker/pkg/label"
 	"github.com/dotcloud/docker/pkg/libcontainer"
 	"github.com/dotcloud/docker/pkg/libcontainer"
 	"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
 	"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
-	"github.com/dotcloud/docker/runtime/execdriver"
-	"github.com/dotcloud/docker/runtime/execdriver/native/configuration"
-	"github.com/dotcloud/docker/runtime/execdriver/native/template"
 )
 )
 
 
 // createContainer populates and configures the container type with the
 // createContainer populates and configures the container type with the

+ 1 - 1
runtime/execdriver/native/driver.go → daemon/execdriver/native/driver.go

@@ -3,12 +3,12 @@ package native
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"github.com/dotcloud/docker/pkg/cgroups"
 	"github.com/dotcloud/docker/pkg/cgroups"
 	"github.com/dotcloud/docker/pkg/libcontainer"
 	"github.com/dotcloud/docker/pkg/libcontainer"
 	"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
 	"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
 	"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
 	"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
 	"github.com/dotcloud/docker/pkg/system"
 	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/runtime/execdriver"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"

+ 0 - 0
runtime/execdriver/native/info.go → daemon/execdriver/native/info.go


+ 0 - 0
runtime/execdriver/native/template/default_template.go → daemon/execdriver/native/template/default_template.go


+ 1 - 1
runtime/execdriver/native/term.go → daemon/execdriver/native/term.go

@@ -5,7 +5,7 @@
 package native
 package native
 
 
 import (
 import (
-	"github.com/dotcloud/docker/runtime/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"io"
 	"io"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"

+ 0 - 0
runtime/execdriver/pipes.go → daemon/execdriver/pipes.go


+ 0 - 0
runtime/execdriver/termconsole.go → daemon/execdriver/termconsole.go


+ 1 - 1
runtime/graphdriver/aufs/aufs.go → daemon/graphdriver/aufs/aufs.go

@@ -24,8 +24,8 @@ import (
 	"bufio"
 	"bufio"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	mountpk "github.com/dotcloud/docker/pkg/mount"
 	mountpk "github.com/dotcloud/docker/pkg/mount"
-	"github.com/dotcloud/docker/runtime/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"

+ 1 - 1
runtime/graphdriver/aufs/aufs_test.go → daemon/graphdriver/aufs/aufs_test.go

@@ -5,7 +5,7 @@ import (
 	"encoding/hex"
 	"encoding/hex"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path"
 	"path"

+ 0 - 0
runtime/graphdriver/aufs/dirs.go → daemon/graphdriver/aufs/dirs.go


+ 0 - 0
runtime/graphdriver/aufs/migrate.go → daemon/graphdriver/aufs/migrate.go


+ 0 - 0
runtime/graphdriver/aufs/mount.go → daemon/graphdriver/aufs/mount.go


+ 0 - 0
runtime/graphdriver/aufs/mount_linux.go → daemon/graphdriver/aufs/mount_linux.go


+ 0 - 0
runtime/graphdriver/aufs/mount_unsupported.go → daemon/graphdriver/aufs/mount_unsupported.go


+ 1 - 1
runtime/graphdriver/btrfs/btrfs.go → daemon/graphdriver/btrfs/btrfs.go

@@ -11,7 +11,7 @@ import "C"
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"os"
 	"os"
 	"path"
 	"path"
 	"syscall"
 	"syscall"

+ 0 - 0
runtime/graphdriver/btrfs/dummy_unsupported.go → daemon/graphdriver/btrfs/dummy_unsupported.go


+ 0 - 0
runtime/graphdriver/devmapper/attach_loopback.go → daemon/graphdriver/devmapper/attach_loopback.go


+ 0 - 0
runtime/graphdriver/devmapper/deviceset.go → daemon/graphdriver/devmapper/deviceset.go


+ 0 - 0
runtime/graphdriver/devmapper/devmapper.go → daemon/graphdriver/devmapper/devmapper.go


+ 0 - 0
runtime/graphdriver/devmapper/devmapper_doc.go → daemon/graphdriver/devmapper/devmapper_doc.go


+ 0 - 0
runtime/graphdriver/devmapper/devmapper_log.go → daemon/graphdriver/devmapper/devmapper_log.go


+ 0 - 0
runtime/graphdriver/devmapper/devmapper_test.go → daemon/graphdriver/devmapper/devmapper_test.go


+ 0 - 0
runtime/graphdriver/devmapper/devmapper_wrapper.go → daemon/graphdriver/devmapper/devmapper_wrapper.go


+ 1 - 1
runtime/graphdriver/devmapper/driver.go → daemon/graphdriver/devmapper/driver.go

@@ -4,7 +4,7 @@ package devmapper
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"

+ 1 - 1
runtime/graphdriver/devmapper/driver_test.go → daemon/graphdriver/devmapper/driver_test.go

@@ -4,7 +4,7 @@ package devmapper
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"io/ioutil"
 	"io/ioutil"
 	"path"
 	"path"
 	"runtime"
 	"runtime"

+ 0 - 0
runtime/graphdriver/devmapper/ioctl.go → daemon/graphdriver/devmapper/ioctl.go


+ 0 - 0
runtime/graphdriver/devmapper/mount.go → daemon/graphdriver/devmapper/mount.go


+ 0 - 0
runtime/graphdriver/devmapper/sys.go → daemon/graphdriver/devmapper/sys.go


+ 0 - 0
runtime/graphdriver/driver.go → daemon/graphdriver/driver.go


+ 1 - 1
runtime/graphdriver/vfs/driver.go → daemon/graphdriver/vfs/driver.go

@@ -2,7 +2,7 @@ package vfs
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"path"
 	"path"

+ 1 - 1
runtime/history.go → daemon/history.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"sort"
 	"sort"

+ 1 - 1
runtime/network_settings.go → daemon/network_settings.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"

+ 4 - 4
runtime/networkdriver/bridge/driver.go → daemon/networkdriver/bridge/driver.go

@@ -2,13 +2,13 @@ package bridge
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon/networkdriver"
+	"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
+	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
+	"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/pkg/iptables"
 	"github.com/dotcloud/docker/pkg/iptables"
 	"github.com/dotcloud/docker/pkg/netlink"
 	"github.com/dotcloud/docker/pkg/netlink"
-	"github.com/dotcloud/docker/runtime/networkdriver"
-	"github.com/dotcloud/docker/runtime/networkdriver/ipallocator"
-	"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
-	"github.com/dotcloud/docker/runtime/networkdriver/portmapper"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"

+ 1 - 1
runtime/networkdriver/ipallocator/allocator.go → daemon/networkdriver/ipallocator/allocator.go

@@ -3,8 +3,8 @@ package ipallocator
 import (
 import (
 	"encoding/binary"
 	"encoding/binary"
 	"errors"
 	"errors"
+	"github.com/dotcloud/docker/daemon/networkdriver"
 	"github.com/dotcloud/docker/pkg/collections"
 	"github.com/dotcloud/docker/pkg/collections"
-	"github.com/dotcloud/docker/runtime/networkdriver"
 	"net"
 	"net"
 	"sync"
 	"sync"
 )
 )

+ 0 - 0
runtime/networkdriver/ipallocator/allocator_test.go → daemon/networkdriver/ipallocator/allocator_test.go


+ 0 - 0
runtime/networkdriver/network.go → daemon/networkdriver/network.go


+ 0 - 0
runtime/networkdriver/network_test.go → daemon/networkdriver/network_test.go


+ 0 - 0
runtime/networkdriver/portallocator/portallocator.go → daemon/networkdriver/portallocator/portallocator.go


+ 0 - 0
runtime/networkdriver/portallocator/portallocator_test.go → daemon/networkdriver/portallocator/portallocator_test.go


+ 0 - 0
runtime/networkdriver/portmapper/mapper.go → daemon/networkdriver/portmapper/mapper.go


+ 0 - 0
runtime/networkdriver/portmapper/mapper_test.go → daemon/networkdriver/portmapper/mapper_test.go


+ 0 - 0
runtime/networkdriver/utils.go → daemon/networkdriver/utils.go


+ 1 - 1
runtime/server.go → daemon/server.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"

+ 1 - 1
runtime/sorter.go → daemon/sorter.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import "sort"
 import "sort"
 
 

+ 1 - 1
runtime/state.go → daemon/state.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"

+ 5 - 5
runtime/utils.go → daemon/utils.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"
@@ -51,14 +51,14 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[
 }
 }
 
 
 type checker struct {
 type checker struct {
-	runtime *Runtime
+	daemon *Daemon
 }
 }
 
 
 func (c *checker) Exists(name string) bool {
 func (c *checker) Exists(name string) bool {
-	return c.runtime.containerGraph.Exists("/" + name)
+	return c.daemon.containerGraph.Exists("/" + name)
 }
 }
 
 
 // Generate a random and unique name
 // Generate a random and unique name
-func generateRandomName(runtime *Runtime) (string, error) {
-	return namesgenerator.GenerateRandomName(&checker{runtime})
+func generateRandomName(daemon *Daemon) (string, error) {
+	return namesgenerator.GenerateRandomName(&checker{daemon})
 }
 }

+ 1 - 1
runtime/utils_test.go → daemon/utils_test.go

@@ -1,4 +1,4 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"testing"
 	"testing"

+ 6 - 6
runtime/volumes.go → daemon/volumes.go

@@ -1,9 +1,9 @@
-package runtime
+package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/runtime/execdriver"
+	"github.com/dotcloud/docker/daemon/execdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
@@ -40,7 +40,7 @@ func setupMountsForContainer(container *Container) error {
 	}
 	}
 
 
 	mounts := []execdriver.Mount{
 	mounts := []execdriver.Mount{
-		{container.runtime.sysInitPath, "/.dockerinit", false, true},
+		{container.daemon.sysInitPath, "/.dockerinit", false, true},
 		{envPath, "/.dockerenv", false, true},
 		{envPath, "/.dockerenv", false, true},
 		{container.ResolvConfPath, "/etc/resolv.conf", false, true},
 		{container.ResolvConfPath, "/etc/resolv.conf", false, true},
 	}
 	}
@@ -85,7 +85,7 @@ func applyVolumesFrom(container *Container) error {
 				}
 				}
 			}
 			}
 
 
-			c := container.runtime.Get(specParts[0])
+			c := container.daemon.Get(specParts[0])
 			if c == nil {
 			if c == nil {
 				return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
 				return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
 			}
 			}
@@ -167,7 +167,7 @@ func createVolumes(container *Container) error {
 		return err
 		return err
 	}
 	}
 
 
-	volumesDriver := container.runtime.volumes.Driver()
+	volumesDriver := container.daemon.volumes.Driver()
 	// Create the requested volumes if they don't exist
 	// Create the requested volumes if they don't exist
 	for volPath := range container.Config.Volumes {
 	for volPath := range container.Config.Volumes {
 		volPath = filepath.Clean(volPath)
 		volPath = filepath.Clean(volPath)
@@ -200,7 +200,7 @@ func createVolumes(container *Container) error {
 			// Do not pass a container as the parameter for the volume creation.
 			// Do not pass a container as the parameter for the volume creation.
 			// The graph driver using the container's information ( Image ) to
 			// The graph driver using the container's information ( Image ) to
 			// create the parent.
 			// create the parent.
-			c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil)
+			c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}

+ 1 - 1
daemonconfig/config.go

@@ -1,8 +1,8 @@
 package daemonconfig
 package daemonconfig
 
 
 import (
 import (
+	"github.com/dotcloud/docker/daemon/networkdriver"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/runtime/networkdriver"
 	"net"
 	"net"
 )
 )
 
 

+ 1 - 1
graph/graph.go

@@ -3,10 +3,10 @@ package graph
 import (
 import (
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"

+ 2 - 2
graph/tags_unit_test.go

@@ -2,9 +2,9 @@ package graph
 
 
 import (
 import (
 	"bytes"
 	"bytes"
+	"github.com/dotcloud/docker/daemon/graphdriver"
+	_ "github.com/dotcloud/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/runtime/graphdriver"
-	_ "github.com/dotcloud/docker/runtime/graphdriver/vfs" // import the vfs driver so it is used in the tests
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"io"
 	"io"

+ 1 - 1
image/graph.go

@@ -1,7 +1,7 @@
 package image
 package image
 
 
 import (
 import (
-	"github.com/dotcloud/docker/runtime/graphdriver"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 )
 )
 
 
 type Graph interface {
 type Graph interface {

+ 1 - 1
image/image.go

@@ -4,8 +4,8 @@ import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"

+ 2 - 2
integration-cli/docker_cli_nat_test.go

@@ -3,7 +3,7 @@ package main
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime"
+	"github.com/dotcloud/docker/daemon"
 	"net"
 	"net"
 	"os/exec"
 	"os/exec"
 	"path/filepath"
 	"path/filepath"
@@ -47,7 +47,7 @@ func TestNetworkNat(t *testing.T) {
 	inspectOut, _, err := runCommandWithOutput(inspectCmd)
 	inspectOut, _, err := runCommandWithOutput(inspectCmd)
 	errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err))
 	errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err))
 
 
-	containers := []*runtime.Container{}
+	containers := []*daemon.Container{}
 	if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil {
 	if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil {
 		t.Fatalf("Error inspecting the container: %s", err)
 		t.Fatalf("Error inspecting the container: %s", err)
 	}
 	}

+ 31 - 31
integration/api_test.go

@@ -16,10 +16,10 @@ import (
 
 
 	"github.com/dotcloud/docker/api"
 	"github.com/dotcloud/docker/api"
 	"github.com/dotcloud/docker/api/server"
 	"github.com/dotcloud/docker/api/server"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 )
@@ -27,10 +27,10 @@ import (
 func TestGetEvents(t *testing.T) {
 func TestGetEvents(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
-	// FIXME: we might not need runtime, why not simply nuke
+	// FIXME: we might not need daemon, why not simply nuke
 	// the engine?
 	// the engine?
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	var events []*utils.JSONMessage
 	var events []*utils.JSONMessage
 	for _, parts := range [][3]string{
 	for _, parts := range [][3]string{
@@ -72,7 +72,7 @@ func TestGetEvents(t *testing.T) {
 
 
 func TestGetImagesJSON(t *testing.T) {
 func TestGetImagesJSON(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	job := eng.Job("images")
 	job := eng.Job("images")
 	initialImages, err := job.Stdout.AddListTable()
 	initialImages, err := job.Stdout.AddListTable()
@@ -175,7 +175,7 @@ func TestGetImagesJSON(t *testing.T) {
 
 
 func TestGetImagesHistory(t *testing.T) {
 func TestGetImagesHistory(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 
@@ -199,7 +199,7 @@ func TestGetImagesHistory(t *testing.T) {
 
 
 func TestGetImagesByName(t *testing.T) {
 func TestGetImagesByName(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil)
 	req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil)
 	if err != nil {
 	if err != nil {
@@ -223,7 +223,7 @@ func TestGetImagesByName(t *testing.T) {
 
 
 func TestGetContainersJSON(t *testing.T) {
 func TestGetContainersJSON(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	job := eng.Job("containers")
 	job := eng.Job("containers")
 	job.SetenvBool("all", true)
 	job.SetenvBool("all", true)
@@ -269,7 +269,7 @@ func TestGetContainersJSON(t *testing.T) {
 
 
 func TestGetContainersExport(t *testing.T) {
 func TestGetContainersExport(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
@@ -317,7 +317,7 @@ func TestGetContainersExport(t *testing.T) {
 
 
 func TestSaveImageAndThenLoad(t *testing.T) {
 func TestSaveImageAndThenLoad(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// save image
 	// save image
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
@@ -388,7 +388,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
 
 
 func TestGetContainersChanges(t *testing.T) {
 func TestGetContainersChanges(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
@@ -428,7 +428,7 @@ func TestGetContainersChanges(t *testing.T) {
 
 
 func TestGetContainersTop(t *testing.T) {
 func TestGetContainersTop(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -439,7 +439,7 @@ func TestGetContainersTop(t *testing.T) {
 		t,
 		t,
 	)
 	)
 	defer func() {
 	defer func() {
-		// Make sure the process dies before destroying runtime
+		// Make sure the process dies before destroying daemon
 		containerKill(eng, containerID, t)
 		containerKill(eng, containerID, t)
 		containerWait(eng, containerID, t)
 		containerWait(eng, containerID, t)
 	}()
 	}()
@@ -504,7 +504,7 @@ func TestGetContainersTop(t *testing.T) {
 
 
 func TestGetContainersByName(t *testing.T) {
 func TestGetContainersByName(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
@@ -524,7 +524,7 @@ func TestGetContainersByName(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	assertHttpNotError(r, t)
 	assertHttpNotError(r, t)
-	outContainer := &runtime.Container{}
+	outContainer := &daemon.Container{}
 	if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
 	if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -535,7 +535,7 @@ func TestGetContainersByName(t *testing.T) {
 
 
 func TestPostCommit(t *testing.T) {
 func TestPostCommit(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
@@ -574,7 +574,7 @@ func TestPostCommit(t *testing.T) {
 
 
 func TestPostContainersCreate(t *testing.T) {
 func TestPostContainersCreate(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	configJSON, err := json.Marshal(&runconfig.Config{
 	configJSON, err := json.Marshal(&runconfig.Config{
 		Image:  unitTestImageID,
 		Image:  unitTestImageID,
@@ -615,7 +615,7 @@ func TestPostContainersCreate(t *testing.T) {
 
 
 func TestPostContainersKill(t *testing.T) {
 func TestPostContainersKill(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -654,7 +654,7 @@ func TestPostContainersKill(t *testing.T) {
 
 
 func TestPostContainersRestart(t *testing.T) {
 func TestPostContainersRestart(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -699,7 +699,7 @@ func TestPostContainersRestart(t *testing.T) {
 
 
 func TestPostContainersStart(t *testing.T) {
 func TestPostContainersStart(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(
 	containerID := createTestContainer(
 		eng,
 		eng,
@@ -752,7 +752,7 @@ func TestPostContainersStart(t *testing.T) {
 // Expected behaviour: using / as a bind mount source should throw an error
 // Expected behaviour: using / as a bind mount source should throw an error
 func TestRunErrorBindMountRootSource(t *testing.T) {
 func TestRunErrorBindMountRootSource(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(
 	containerID := createTestContainer(
 		eng,
 		eng,
@@ -787,7 +787,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
 
 
 func TestPostContainersStop(t *testing.T) {
 func TestPostContainersStop(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -827,7 +827,7 @@ func TestPostContainersStop(t *testing.T) {
 
 
 func TestPostContainersWait(t *testing.T) {
 func TestPostContainersWait(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -865,7 +865,7 @@ func TestPostContainersWait(t *testing.T) {
 
 
 func TestPostContainersAttach(t *testing.T) {
 func TestPostContainersAttach(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -943,7 +943,7 @@ func TestPostContainersAttach(t *testing.T) {
 
 
 func TestPostContainersAttachStderr(t *testing.T) {
 func TestPostContainersAttachStderr(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -1024,7 +1024,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
 // FIXME: Test deleting volume in use by other container
 // FIXME: Test deleting volume in use by other container
 func TestDeleteContainers(t *testing.T) {
 func TestDeleteContainers(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
 		&runconfig.Config{
 		&runconfig.Config{
@@ -1050,7 +1050,7 @@ func TestDeleteContainers(t *testing.T) {
 
 
 func TestOptionsRoute(t *testing.T) {
 func TestOptionsRoute(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 	req, err := http.NewRequest("OPTIONS", "/", nil)
 	req, err := http.NewRequest("OPTIONS", "/", nil)
@@ -1068,7 +1068,7 @@ func TestOptionsRoute(t *testing.T) {
 
 
 func TestGetEnabledCors(t *testing.T) {
 func TestGetEnabledCors(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 
@@ -1103,7 +1103,7 @@ func TestDeleteImages(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	//we expect errors, so we disable stderr
 	//we expect errors, so we disable stderr
 	eng.Stderr = ioutil.Discard
 	eng.Stderr = ioutil.Discard
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	initialImages := getImages(eng, t, true, "")
 	initialImages := getImages(eng, t, true, "")
 
 
@@ -1160,7 +1160,7 @@ func TestDeleteImages(t *testing.T) {
 
 
 func TestPostContainersCopy(t *testing.T) {
 func TestPostContainersCopy(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// Create a container and remove a file
 	// Create a container and remove a file
 	containerID := createTestContainer(eng,
 	containerID := createTestContainer(eng,
@@ -1218,7 +1218,7 @@ func TestPostContainersCopy(t *testing.T) {
 
 
 func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
 func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	r := httptest.NewRecorder()
 	r := httptest.NewRecorder()
 
 

+ 8 - 8
integration/buildfile_test.go

@@ -365,7 +365,7 @@ func TestBuild(t *testing.T) {
 func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) {
 func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) {
 	if eng == nil {
 	if eng == nil {
 		eng = NewTestEngine(t)
 		eng = NewTestEngine(t)
-		runtime := mkRuntimeFromEngine(eng, t)
+		runtime := mkDaemonFromEngine(eng, t)
 		// FIXME: we might not need runtime, why not simply nuke
 		// FIXME: we might not need runtime, why not simply nuke
 		// the engine?
 		// the engine?
 		defer nuke(runtime)
 		defer nuke(runtime)
@@ -547,7 +547,7 @@ func TestBuildEntrypoint(t *testing.T) {
 // utilizing cache
 // utilizing cache
 func TestBuildEntrypointRunCleanup(t *testing.T) {
 func TestBuildEntrypointRunCleanup(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	img, err := buildImage(testContextTemplate{`
 	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
@@ -576,7 +576,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
 
 
 func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) {
 func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	img, err := buildImage(template, t, eng, true)
 	img, err := buildImage(template, t, eng, true)
 	if err != nil {
 	if err != nil {
@@ -660,7 +660,7 @@ func TestBuildADDLocalFileWithCache(t *testing.T) {
 		},
 		},
 		nil}
 		nil}
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	id1 := checkCacheBehaviorFromEngime(t, template, true, eng)
 	id1 := checkCacheBehaviorFromEngime(t, template, true, eng)
 	template.files = append(template.files, [2]string{"bar", "hello2"})
 	template.files = append(template.files, [2]string{"bar", "hello2"})
@@ -796,7 +796,7 @@ func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) {
 
 
 func TestForbiddenContextPath(t *testing.T) {
 func TestForbiddenContextPath(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
 
 
 	context := testContextTemplate{`
 	context := testContextTemplate{`
@@ -844,7 +844,7 @@ func TestForbiddenContextPath(t *testing.T) {
 
 
 func TestBuildADDFileNotFound(t *testing.T) {
 func TestBuildADDFileNotFound(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	context := testContextTemplate{`
 	context := testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
@@ -890,7 +890,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
 
 
 func TestBuildInheritance(t *testing.T) {
 func TestBuildInheritance(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	img, err := buildImage(testContextTemplate{`
 	img, err := buildImage(testContextTemplate{`
             from {IMAGE}
             from {IMAGE}
@@ -1012,7 +1012,7 @@ func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) {
 // gh #2446
 // gh #2446
 func TestBuildAddToSymlinkDest(t *testing.T) {
 func TestBuildAddToSymlinkDest(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	_, err := buildImage(testContextTemplate{`
 	_, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}

+ 16 - 16
integration/commands_test.go

@@ -4,10 +4,10 @@ import (
 	"bufio"
 	"bufio"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/api/client"
 	"github.com/dotcloud/docker/api/client"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/pkg/term"
 	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -36,7 +36,7 @@ func closeWrap(args ...io.Closer) error {
 	return nil
 	return nil
 }
 }
 
 
-func setRaw(t *testing.T, c *runtime.Container) *term.State {
+func setRaw(t *testing.T, c *daemon.Container) *term.State {
 	pty, err := c.GetPtyMaster()
 	pty, err := c.GetPtyMaster()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -48,7 +48,7 @@ func setRaw(t *testing.T, c *runtime.Container) *term.State {
 	return state
 	return state
 }
 }
 
 
-func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
+func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) {
 	pty, err := c.GetPtyMaster()
 	pty, err := c.GetPtyMaster()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -56,12 +56,12 @@ func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
 	term.RestoreTerminal(pty.Fd(), state)
 	term.RestoreTerminal(pty.Fd(), state)
 }
 }
 
 
-func waitContainerStart(t *testing.T, timeout time.Duration) *runtime.Container {
-	var container *runtime.Container
+func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container {
+	var container *daemon.Container
 
 
 	setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
 	setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
 		for {
 		for {
-			l := globalRuntime.List()
+			l := globalDaemon.List()
 			if len(l) == 1 && l[0].State.IsRunning() {
 			if len(l) == 1 && l[0].State.IsRunning() {
 				container = l[0]
 				container = l[0]
 				break
 				break
@@ -142,7 +142,7 @@ func TestRunHostname(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 		<-c
 		<-c
@@ -187,7 +187,7 @@ func TestRunWorkdir(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
 		<-c
 		<-c
@@ -232,7 +232,7 @@ func TestRunWorkdirExists(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
 	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
 		<-c
 		<-c
@@ -290,7 +290,7 @@ func TestRunExit(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	// Closing /bin/cat stdin, expect it to exit
 	// Closing /bin/cat stdin, expect it to exit
 	if err := stdin.Close(); err != nil {
 	if err := stdin.Close(); err != nil {
@@ -359,7 +359,7 @@ func TestRunDisconnect(t *testing.T) {
 	// Client disconnect after run -i should cause stdin to be closed, which should
 	// Client disconnect after run -i should cause stdin to be closed, which should
 	// cause /bin/cat to exit.
 	// cause /bin/cat to exit.
 	setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
 	setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
-		container := globalRuntime.List()[0]
+		container := globalDaemon.List()[0]
 		container.Wait()
 		container.Wait()
 		if container.State.IsRunning() {
 		if container.State.IsRunning() {
 			t.Fatalf("/bin/cat is still running after closing stdin")
 			t.Fatalf("/bin/cat is still running after closing stdin")
@@ -445,7 +445,7 @@ func TestRunAttachStdin(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	// Check output
 	// Check output
 	setTimeout(t, "Reading command output time out", 10*time.Second, func() {
 	setTimeout(t, "Reading command output time out", 10*time.Second, func() {
@@ -701,7 +701,7 @@ func TestAttachDisconnect(t *testing.T) {
 
 
 	setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
 	setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
 		for {
 		for {
-			l := globalRuntime.List()
+			l := globalDaemon.List()
 			if len(l) == 1 && l[0].State.IsRunning() {
 			if len(l) == 1 && l[0].State.IsRunning() {
 				break
 				break
 			}
 			}
@@ -709,7 +709,7 @@ func TestAttachDisconnect(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := globalDaemon.List()[0]
 
 
 	// Attach to it
 	// Attach to it
 	c1 := make(chan struct{})
 	c1 := make(chan struct{})
@@ -781,7 +781,7 @@ func TestRunAutoRemove(t *testing.T) {
 
 
 	time.Sleep(500 * time.Millisecond)
 	time.Sleep(500 * time.Millisecond)
 
 
-	if len(globalRuntime.List()) > 0 {
+	if len(globalDaemon.List()) > 0 {
 		t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID)
 		t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID)
 	}
 	}
 }
 }
@@ -798,7 +798,7 @@ func TestCmdLogs(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := cli.CmdLogs(globalRuntime.List()[0].ID); err != nil {
+	if err := cli.CmdLogs(globalDaemon.List()[0].ID); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 }
 }

+ 180 - 180
integration/container_test.go

@@ -17,11 +17,11 @@ import (
 )
 )
 
 
 func TestIDFormat(t *testing.T) {
 func TestIDFormat(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container1, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container1, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+			Image: GetTestImage(daemon).ID,
 			Cmd:   []string{"/bin/sh", "-c", "echo hello world"},
 			Cmd:   []string{"/bin/sh", "-c", "echo hello world"},
 		},
 		},
 		"",
 		"",
@@ -39,14 +39,14 @@ func TestIDFormat(t *testing.T) {
 }
 }
 
 
 func TestMultipleAttachRestart(t *testing.T) {
 func TestMultipleAttachRestart(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 	container, _, _ := mkContainer(
 	container, _, _ := mkContainer(
-		runtime,
+		daemon,
 		[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`;  echo hello; done"},
 		[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`;  echo hello; done"},
 		t,
 		t,
 	)
 	)
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	// Simulate 3 client attaching to the container and stop/restart
 	// Simulate 3 client attaching to the container and stop/restart
 
 
@@ -135,11 +135,11 @@ func TestMultipleAttachRestart(t *testing.T) {
 
 
 func TestDiff(t *testing.T) {
 func TestDiff(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 	// Create a container and remove a file
 	// Create a container and remove a file
-	container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
-	defer runtime.Destroy(container1)
+	container1, _, _ := mkContainer(daemon, []string{"_", "/bin/rm", "/etc/passwd"}, t)
+	defer daemon.Destroy(container1)
 
 
 	// The changelog should be empty and not fail before run. See #1705
 	// The changelog should be empty and not fail before run. See #1705
 	c, err := container1.Changes()
 	c, err := container1.Changes()
@@ -170,14 +170,14 @@ func TestDiff(t *testing.T) {
 	}
 	}
 
 
 	// Commit the container
 	// Commit the container
-	img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil)
+	img, err := daemon.Commit(container1, "", "", "unit test commited image - diff", "", nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
 	// Create a new container from the commited image
 	// Create a new container from the commited image
-	container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
-	defer runtime.Destroy(container2)
+	container2, _, _ := mkContainer(daemon, []string{img.ID, "cat", "/etc/passwd"}, t)
+	defer daemon.Destroy(container2)
 
 
 	if err := container2.Run(); err != nil {
 	if err := container2.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -195,8 +195,8 @@ func TestDiff(t *testing.T) {
 	}
 	}
 
 
 	// Create a new container
 	// Create a new container
-	container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
-	defer runtime.Destroy(container3)
+	container3, _, _ := mkContainer(daemon, []string{"_", "rm", "/bin/httpd"}, t)
+	defer daemon.Destroy(container3)
 
 
 	if err := container3.Run(); err != nil {
 	if err := container3.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -219,10 +219,10 @@ func TestDiff(t *testing.T) {
 }
 }
 
 
 func TestCommitAutoRun(t *testing.T) {
 func TestCommitAutoRun(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
-	defer runtime.Destroy(container1)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container1, _, _ := mkContainer(daemon, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
+	defer daemon.Destroy(container1)
 
 
 	if container1.State.IsRunning() {
 	if container1.State.IsRunning() {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
@@ -234,14 +234,14 @@ func TestCommitAutoRun(t *testing.T) {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
 	}
 	}
 
 
-	img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}})
+	img, err := daemon.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}})
 	if err != nil {
 	if err != nil {
 		t.Error(err)
 		t.Error(err)
 	}
 	}
 
 
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
-	container2, _, _ := mkContainer(runtime, []string{img.ID}, t)
-	defer runtime.Destroy(container2)
+	container2, _, _ := mkContainer(daemon, []string{img.ID}, t)
+	defer daemon.Destroy(container2)
 	stdout, err := container2.StdoutPipe()
 	stdout, err := container2.StdoutPipe()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -274,11 +274,11 @@ func TestCommitAutoRun(t *testing.T) {
 }
 }
 
 
 func TestCommitRun(t *testing.T) {
 func TestCommitRun(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
-	defer runtime.Destroy(container1)
+	container1, _, _ := mkContainer(daemon, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
+	defer daemon.Destroy(container1)
 
 
 	if container1.State.IsRunning() {
 	if container1.State.IsRunning() {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
@@ -290,14 +290,14 @@ func TestCommitRun(t *testing.T) {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
 	}
 	}
 
 
-	img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil)
+	img, err := daemon.Commit(container1, "", "", "unit test commited image", "", nil)
 	if err != nil {
 	if err != nil {
 		t.Error(err)
 		t.Error(err)
 	}
 	}
 
 
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
-	container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
-	defer runtime.Destroy(container2)
+	container2, _, _ := mkContainer(daemon, []string{img.ID, "cat", "/world"}, t)
+	defer daemon.Destroy(container2)
 	stdout, err := container2.StdoutPipe()
 	stdout, err := container2.StdoutPipe()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -330,10 +330,10 @@ func TestCommitRun(t *testing.T) {
 }
 }
 
 
 func TestStart(t *testing.T) {
 func TestStart(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, _ := mkContainer(runtime, []string{"-i", "_", "/bin/cat"}, t)
-	defer runtime.Destroy(container)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, _ := mkContainer(daemon, []string{"-i", "_", "/bin/cat"}, t)
+	defer daemon.Destroy(container)
 
 
 	cStdin, err := container.StdinPipe()
 	cStdin, err := container.StdinPipe()
 	if err != nil {
 	if err != nil {
@@ -365,10 +365,10 @@ func TestCpuShares(t *testing.T) {
 	if err1 == nil || err2 == nil {
 	if err1 == nil || err2 == nil {
 		t.Skip("Fixme. Setting cpu cgroup shares doesn't work in dind on a Fedora host.  The lxc utils are confused by the cpu,cpuacct mount.")
 		t.Skip("Fixme. Setting cpu cgroup shares doesn't work in dind on a Fedora host.  The lxc utils are confused by the cpu,cpuacct mount.")
 	}
 	}
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
-	defer runtime.Destroy(container)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, _ := mkContainer(daemon, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
+	defer daemon.Destroy(container)
 
 
 	cStdin, err := container.StdinPipe()
 	cStdin, err := container.StdinPipe()
 	if err != nil {
 	if err != nil {
@@ -395,10 +395,10 @@ func TestCpuShares(t *testing.T) {
 }
 }
 
 
 func TestRun(t *testing.T) {
 func TestRun(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
-	defer runtime.Destroy(container)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
+	defer daemon.Destroy(container)
 
 
 	if container.State.IsRunning() {
 	if container.State.IsRunning() {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
@@ -412,11 +412,11 @@ func TestRun(t *testing.T) {
 }
 }
 
 
 func TestOutput(t *testing.T) {
 func TestOutput(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+			Image: GetTestImage(daemon).ID,
 			Cmd:   []string{"echo", "-n", "foobar"},
 			Cmd:   []string{"echo", "-n", "foobar"},
 		},
 		},
 		"",
 		"",
@@ -424,7 +424,7 @@ func TestOutput(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err := container.Output()
 	output, err := container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -435,11 +435,11 @@ func TestOutput(t *testing.T) {
 }
 }
 
 
 func TestKillDifferentUser(t *testing.T) {
 func TestKillDifferentUser(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image:     GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image:     GetTestImage(daemon).ID,
 		Cmd:       []string{"cat"},
 		Cmd:       []string{"cat"},
 		OpenStdin: true,
 		OpenStdin: true,
 		User:      "daemon",
 		User:      "daemon",
@@ -449,7 +449,7 @@ func TestKillDifferentUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	// FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
 	// FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
 	// there is a side effect I'm not seeing.
 	// there is a side effect I'm not seeing.
 	// defer container.stdin.Close()
 	// defer container.stdin.Close()
@@ -495,8 +495,8 @@ func TestKillDifferentUser(t *testing.T) {
 // Test that creating a container with a volume doesn't crash. Regression test for #995.
 // Test that creating a container with a volume doesn't crash. Regression test for #995.
 func TestCreateVolume(t *testing.T) {
 func TestCreateVolume(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
 	config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
 	if err != nil {
 	if err != nil {
@@ -519,19 +519,19 @@ func TestCreateVolume(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	// FIXME: this hack can be removed once Wait is a job
 	// FIXME: this hack can be removed once Wait is a job
-	c := runtime.Get(id)
+	c := daemon.Get(id)
 	if c == nil {
 	if c == nil {
-		t.Fatalf("Couldn't retrieve container %s from runtime", id)
+		t.Fatalf("Couldn't retrieve container %s from daemon", id)
 	}
 	}
 	c.WaitTimeout(500 * time.Millisecond)
 	c.WaitTimeout(500 * time.Millisecond)
 	c.Wait()
 	c.Wait()
 }
 }
 
 
 func TestKill(t *testing.T) {
 func TestKill(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
 		"",
 		"",
@@ -539,7 +539,7 @@ func TestKill(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	if container.State.IsRunning() {
 	if container.State.IsRunning() {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
@@ -571,17 +571,17 @@ func TestKill(t *testing.T) {
 }
 }
 
 
 func TestExitCode(t *testing.T) {
 func TestExitCode(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	trueContainer, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	trueContainer, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"/bin/true"},
 		Cmd:   []string{"/bin/true"},
 	}, "")
 	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(trueContainer)
+	defer daemon.Destroy(trueContainer)
 	if err := trueContainer.Run(); err != nil {
 	if err := trueContainer.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -589,14 +589,14 @@ func TestExitCode(t *testing.T) {
 		t.Fatalf("Unexpected exit code %d (expected 0)", code)
 		t.Fatalf("Unexpected exit code %d (expected 0)", code)
 	}
 	}
 
 
-	falseContainer, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	falseContainer, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"/bin/false"},
 		Cmd:   []string{"/bin/false"},
 	}, "")
 	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(falseContainer)
+	defer daemon.Destroy(falseContainer)
 	if err := falseContainer.Run(); err != nil {
 	if err := falseContainer.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -606,10 +606,10 @@ func TestExitCode(t *testing.T) {
 }
 }
 
 
 func TestRestart(t *testing.T) {
 func TestRestart(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"echo", "-n", "foobar"},
 		Cmd:   []string{"echo", "-n", "foobar"},
 	},
 	},
 		"",
 		"",
@@ -617,7 +617,7 @@ func TestRestart(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err := container.Output()
 	output, err := container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -637,10 +637,10 @@ func TestRestart(t *testing.T) {
 }
 }
 
 
 func TestRestartStdin(t *testing.T) {
 func TestRestartStdin(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
@@ -650,7 +650,7 @@ func TestRestartStdin(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	stdin, err := container.StdinPipe()
 	stdin, err := container.StdinPipe()
 	if err != nil {
 	if err != nil {
@@ -713,12 +713,12 @@ func TestRestartStdin(t *testing.T) {
 }
 }
 
 
 func TestUser(t *testing.T) {
 func TestUser(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
 	// Default user must be root
 	// Default user must be root
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 	},
 	},
 		"",
 		"",
@@ -726,7 +726,7 @@ func TestUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err := container.Output()
 	output, err := container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -736,8 +736,8 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a username
 	// Set a username
-	container, _, err = runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err = daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "root",
 		User: "root",
@@ -747,7 +747,7 @@ func TestUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err = container.Output()
 	output, err = container.Output()
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -757,8 +757,8 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a UID
 	// Set a UID
-	container, _, err = runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err = daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "0",
 		User: "0",
@@ -768,7 +768,7 @@ func TestUser(t *testing.T) {
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err = container.Output()
 	output, err = container.Output()
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -778,8 +778,8 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a different user by uid
 	// Set a different user by uid
-	container, _, err = runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err = daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "1",
 		User: "1",
@@ -789,7 +789,7 @@ func TestUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err = container.Output()
 	output, err = container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -801,8 +801,8 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Set a different user by username
 	// Set a different user by username
-	container, _, err = runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err = daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "daemon",
 		User: "daemon",
@@ -812,7 +812,7 @@ func TestUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err = container.Output()
 	output, err = container.Output()
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 	if code := container.State.GetExitCode(); err != nil || code != 0 {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -822,8 +822,8 @@ func TestUser(t *testing.T) {
 	}
 	}
 
 
 	// Test an wrong username
 	// Test an wrong username
-	container, _, err = runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err = daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
 		User: "unknownuser",
 		User: "unknownuser",
@@ -833,7 +833,7 @@ func TestUser(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err = container.Output()
 	output, err = container.Output()
 	if container.State.GetExitCode() == 0 {
 	if container.State.GetExitCode() == 0 {
 		t.Fatal("Starting container with wrong uid should fail but it passed.")
 		t.Fatal("Starting container with wrong uid should fail but it passed.")
@@ -841,11 +841,11 @@ func TestUser(t *testing.T) {
 }
 }
 
 
 func TestMultipleContainers(t *testing.T) {
 func TestMultipleContainers(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container1, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container1, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
 		"",
 		"",
@@ -853,10 +853,10 @@ func TestMultipleContainers(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container1)
+	defer daemon.Destroy(container1)
 
 
-	container2, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container2, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"sleep", "2"},
 		Cmd:   []string{"sleep", "2"},
 	},
 	},
 		"",
 		"",
@@ -864,7 +864,7 @@ func TestMultipleContainers(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container2)
+	defer daemon.Destroy(container2)
 
 
 	// Start both containers
 	// Start both containers
 	if err := container1.Start(); err != nil {
 	if err := container1.Start(); err != nil {
@@ -897,10 +897,10 @@ func TestMultipleContainers(t *testing.T) {
 }
 }
 
 
 func TestStdin(t *testing.T) {
 func TestStdin(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
@@ -910,7 +910,7 @@ func TestStdin(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	stdin, err := container.StdinPipe()
 	stdin, err := container.StdinPipe()
 	if err != nil {
 	if err != nil {
@@ -942,10 +942,10 @@ func TestStdin(t *testing.T) {
 }
 }
 
 
 func TestTty(t *testing.T) {
 func TestTty(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"cat"},
 		Cmd:   []string{"cat"},
 
 
 		OpenStdin: true,
 		OpenStdin: true,
@@ -955,7 +955,7 @@ func TestTty(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	stdin, err := container.StdinPipe()
 	stdin, err := container.StdinPipe()
 	if err != nil {
 	if err != nil {
@@ -989,17 +989,17 @@ func TestTty(t *testing.T) {
 func TestEnv(t *testing.T) {
 func TestEnv(t *testing.T) {
 	os.Setenv("TRUE", "false")
 	os.Setenv("TRUE", "false")
 	os.Setenv("TRICKY", "tri\ncky\n")
 	os.Setenv("TRICKY", "tri\ncky\n")
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(daemon).ID, "env"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	container, _, err := runtime.Create(config, "")
+	container, _, err := daemon.Create(config, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	stdout, err := container.StdoutPipe()
 	stdout, err := container.StdoutPipe()
 	if err != nil {
 	if err != nil {
@@ -1041,11 +1041,11 @@ func TestEnv(t *testing.T) {
 }
 }
 
 
 func TestEntrypoint(t *testing.T) {
 func TestEntrypoint(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image:      GetTestImage(runtime).ID,
+			Image:      GetTestImage(daemon).ID,
 			Entrypoint: []string{"/bin/echo"},
 			Entrypoint: []string{"/bin/echo"},
 			Cmd:        []string{"-n", "foobar"},
 			Cmd:        []string{"-n", "foobar"},
 		},
 		},
@@ -1054,7 +1054,7 @@ func TestEntrypoint(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err := container.Output()
 	output, err := container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1065,11 +1065,11 @@ func TestEntrypoint(t *testing.T) {
 }
 }
 
 
 func TestEntrypointNoCmd(t *testing.T) {
 func TestEntrypointNoCmd(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image:      GetTestImage(runtime).ID,
+			Image:      GetTestImage(daemon).ID,
 			Entrypoint: []string{"/bin/echo", "foobar"},
 			Entrypoint: []string{"/bin/echo", "foobar"},
 		},
 		},
 		"",
 		"",
@@ -1077,7 +1077,7 @@ func TestEntrypointNoCmd(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	output, err := container.Output()
 	output, err := container.Output()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -1088,11 +1088,11 @@ func TestEntrypointNoCmd(t *testing.T) {
 }
 }
 
 
 func BenchmarkRunSequential(b *testing.B) {
 func BenchmarkRunSequential(b *testing.B) {
-	runtime := mkRuntime(b)
-	defer nuke(runtime)
+	daemon := mkDaemon(b)
+	defer nuke(daemon)
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		container, _, err := runtime.Create(&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+		container, _, err := daemon.Create(&runconfig.Config{
+			Image: GetTestImage(daemon).ID,
 			Cmd:   []string{"echo", "-n", "foo"},
 			Cmd:   []string{"echo", "-n", "foo"},
 		},
 		},
 			"",
 			"",
@@ -1100,7 +1100,7 @@ func BenchmarkRunSequential(b *testing.B) {
 		if err != nil {
 		if err != nil {
 			b.Fatal(err)
 			b.Fatal(err)
 		}
 		}
-		defer runtime.Destroy(container)
+		defer daemon.Destroy(container)
 		output, err := container.Output()
 		output, err := container.Output()
 		if err != nil {
 		if err != nil {
 			b.Fatal(err)
 			b.Fatal(err)
@@ -1108,15 +1108,15 @@ func BenchmarkRunSequential(b *testing.B) {
 		if string(output) != "foo" {
 		if string(output) != "foo" {
 			b.Fatalf("Unexpected output: %s", output)
 			b.Fatalf("Unexpected output: %s", output)
 		}
 		}
-		if err := runtime.Destroy(container); err != nil {
+		if err := daemon.Destroy(container); err != nil {
 			b.Fatal(err)
 			b.Fatal(err)
 		}
 		}
 	}
 	}
 }
 }
 
 
 func BenchmarkRunParallel(b *testing.B) {
 func BenchmarkRunParallel(b *testing.B) {
-	runtime := mkRuntime(b)
-	defer nuke(runtime)
+	daemon := mkDaemon(b)
+	defer nuke(daemon)
 
 
 	var tasks []chan error
 	var tasks []chan error
 
 
@@ -1124,8 +1124,8 @@ func BenchmarkRunParallel(b *testing.B) {
 		complete := make(chan error)
 		complete := make(chan error)
 		tasks = append(tasks, complete)
 		tasks = append(tasks, complete)
 		go func(i int, complete chan error) {
 		go func(i int, complete chan error) {
-			container, _, err := runtime.Create(&runconfig.Config{
-				Image: GetTestImage(runtime).ID,
+			container, _, err := daemon.Create(&runconfig.Config{
+				Image: GetTestImage(daemon).ID,
 				Cmd:   []string{"echo", "-n", "foo"},
 				Cmd:   []string{"echo", "-n", "foo"},
 			},
 			},
 				"",
 				"",
@@ -1134,7 +1134,7 @@ func BenchmarkRunParallel(b *testing.B) {
 				complete <- err
 				complete <- err
 				return
 				return
 			}
 			}
-			defer runtime.Destroy(container)
+			defer daemon.Destroy(container)
 			if err := container.Start(); err != nil {
 			if err := container.Start(); err != nil {
 				complete <- err
 				complete <- err
 				return
 				return
@@ -1146,7 +1146,7 @@ func BenchmarkRunParallel(b *testing.B) {
 			// if string(output) != "foo" {
 			// if string(output) != "foo" {
 			// 	complete <- fmt.Errorf("Unexecpted output: %v", string(output))
 			// 	complete <- fmt.Errorf("Unexecpted output: %v", string(output))
 			// }
 			// }
-			if err := runtime.Destroy(container); err != nil {
+			if err := daemon.Destroy(container); err != nil {
 				complete <- err
 				complete <- err
 				return
 				return
 			}
 			}
@@ -1176,7 +1176,7 @@ func tempDir(t *testing.T) string {
 // Test for #1737
 // Test for #1737
 func TestCopyVolumeUidGid(t *testing.T) {
 func TestCopyVolumeUidGid(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	r := mkRuntimeFromEngine(eng, t)
+	r := mkDaemonFromEngine(eng, t)
 	defer r.Nuke()
 	defer r.Nuke()
 
 
 	// Add directory not owned by root
 	// Add directory not owned by root
@@ -1210,7 +1210,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
 // Test for #1582
 // Test for #1582
 func TestCopyVolumeContent(t *testing.T) {
 func TestCopyVolumeContent(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	r := mkRuntimeFromEngine(eng, t)
+	r := mkDaemonFromEngine(eng, t)
 	defer r.Nuke()
 	defer r.Nuke()
 
 
 	// Put some content in a directory of a container and commit it
 	// Put some content in a directory of a container and commit it
@@ -1243,7 +1243,7 @@ func TestCopyVolumeContent(t *testing.T) {
 
 
 func TestBindMounts(t *testing.T) {
 func TestBindMounts(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	r := mkRuntimeFromEngine(eng, t)
+	r := mkDaemonFromEngine(eng, t)
 	defer r.Nuke()
 	defer r.Nuke()
 
 
 	tmpDir := tempDir(t)
 	tmpDir := tempDir(t)
@@ -1275,11 +1275,11 @@ func TestBindMounts(t *testing.T) {
 
 
 // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
 // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
 func TestRestartWithVolumes(t *testing.T) {
 func TestRestartWithVolumes(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image:   GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image:   GetTestImage(daemon).ID,
 		Cmd:     []string{"echo", "-n", "foobar"},
 		Cmd:     []string{"echo", "-n", "foobar"},
 		Volumes: map[string]struct{}{"/test": {}},
 		Volumes: map[string]struct{}{"/test": {}},
 	},
 	},
@@ -1288,7 +1288,7 @@ func TestRestartWithVolumes(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 
 
 	for key := range container.Config.Volumes {
 	for key := range container.Config.Volumes {
 		if key != "/test" {
 		if key != "/test" {
@@ -1318,11 +1318,11 @@ func TestRestartWithVolumes(t *testing.T) {
 }
 }
 
 
 func TestContainerNetwork(t *testing.T) {
 func TestContainerNetwork(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+			Image: GetTestImage(daemon).ID,
 			// If I change this to ping 8.8.8.8 it fails.  Any idea why? - timthelion
 			// If I change this to ping 8.8.8.8 it fails.  Any idea why? - timthelion
 			Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
 			Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
 		},
 		},
@@ -1331,7 +1331,7 @@ func TestContainerNetwork(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	if err := container.Run(); err != nil {
 	if err := container.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1342,11 +1342,11 @@ func TestContainerNetwork(t *testing.T) {
 
 
 // Issue #4681
 // Issue #4681
 func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
 func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
-	container, _, err := runtime.Create(
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
+	container, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image:           GetTestImage(runtime).ID,
+			Image:           GetTestImage(daemon).ID,
 			Cmd:             []string{"ping", "-c", "1", "127.0.0.1"},
 			Cmd:             []string{"ping", "-c", "1", "127.0.0.1"},
 			NetworkDisabled: true,
 			NetworkDisabled: true,
 		},
 		},
@@ -1355,7 +1355,7 @@ func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer runtime.Destroy(container)
+	defer daemon.Destroy(container)
 	if err := container.Run(); err != nil {
 	if err := container.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1366,10 +1366,10 @@ func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
 
 
 func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
-	config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show", "up"}, nil)
+	config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(daemon).ID, "ip", "addr", "show", "up"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1384,9 +1384,9 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	// FIXME: this hack can be removed once Wait is a job
 	// FIXME: this hack can be removed once Wait is a job
-	c := runtime.Get(id)
+	c := daemon.Get(id)
 	if c == nil {
 	if c == nil {
-		t.Fatalf("Couldn't retrieve container %s from runtime", id)
+		t.Fatalf("Couldn't retrieve container %s from daemon", id)
 	}
 	}
 	stdout, err := c.StdoutPipe()
 	stdout, err := c.StdoutPipe()
 	if err != nil {
 	if err != nil {
@@ -1419,36 +1419,36 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 
 
 func TestPrivilegedCanMknod(t *testing.T) {
 func TestPrivilegedCanMknod(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer runtime.Nuke()
-	if output, err := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
+	daemon := mkDaemonFromEngine(eng, t)
+	defer daemon.Nuke()
+	if output, err := runContainer(eng, daemon, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
 		t.Fatalf("Could not mknod into privileged container %s %v", output, err)
 		t.Fatalf("Could not mknod into privileged container %s %v", output, err)
 	}
 	}
 }
 }
 
 
 func TestPrivilegedCanMount(t *testing.T) {
 func TestPrivilegedCanMount(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer runtime.Nuke()
-	if output, _ := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
+	daemon := mkDaemonFromEngine(eng, t)
+	defer daemon.Nuke()
+	if output, _ := runContainer(eng, daemon, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
 		t.Fatal("Could not mount into privileged container")
 		t.Fatal("Could not mount into privileged container")
 	}
 	}
 }
 }
 
 
 func TestUnprivilegedCanMknod(t *testing.T) {
 func TestUnprivilegedCanMknod(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer runtime.Nuke()
-	if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
+	daemon := mkDaemonFromEngine(eng, t)
+	defer daemon.Nuke()
+	if output, _ := runContainer(eng, daemon, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
 		t.Fatal("Couldn't mknod into secure container")
 		t.Fatal("Couldn't mknod into secure container")
 	}
 	}
 }
 }
 
 
 func TestUnprivilegedCannotMount(t *testing.T) {
 func TestUnprivilegedCannotMount(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer runtime.Nuke()
-	if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
+	daemon := mkDaemonFromEngine(eng, t)
+	defer daemon.Nuke()
+	if output, _ := runContainer(eng, daemon, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
 		t.Fatal("Could mount into secure container")
 		t.Fatal("Could mount into secure container")
 	}
 	}
 }
 }

+ 1 - 1
integration/graph_test.go

@@ -3,10 +3,10 @@ package docker
 import (
 import (
 	"errors"
 	"errors"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon/graphdriver"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/graph"
 	"github.com/dotcloud/docker/graph"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/runtime/graphdriver"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"

+ 132 - 132
integration/runtime_test.go

@@ -3,11 +3,11 @@ package docker
 import (
 import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/image"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
@@ -38,8 +38,8 @@ const (
 )
 )
 
 
 var (
 var (
-	// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
-	globalRuntime          *runtime.Runtime
+	// FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted.
+	globalDaemon           *daemon.Daemon
 	globalEngine           *engine.Engine
 	globalEngine           *engine.Engine
 	globalHttpsEngine      *engine.Engine
 	globalHttpsEngine      *engine.Engine
 	globalRogueHttpsEngine *engine.Engine
 	globalRogueHttpsEngine *engine.Engine
@@ -47,17 +47,17 @@ var (
 	startGoroutines        int
 	startGoroutines        int
 )
 )
 
 
-// FIXME: nuke() is deprecated by Runtime.Nuke()
-func nuke(runtime *runtime.Runtime) error {
-	return runtime.Nuke()
+// FIXME: nuke() is deprecated by Daemon.Nuke()
+func nuke(daemon *daemon.Daemon) error {
+	return daemon.Nuke()
 }
 }
 
 
 // FIXME: cleanup and nuke are redundant.
 // FIXME: cleanup and nuke are redundant.
 func cleanup(eng *engine.Engine, t *testing.T) error {
 func cleanup(eng *engine.Engine, t *testing.T) error {
-	runtime := mkRuntimeFromEngine(eng, t)
-	for _, container := range runtime.List() {
+	daemon := mkDaemonFromEngine(eng, t)
+	for _, container := range daemon.List() {
 		container.Kill()
 		container.Kill()
-		runtime.Destroy(container)
+		daemon.Destroy(container)
 	}
 	}
 	job := eng.Job("images")
 	job := eng.Job("images")
 	images, err := job.Stdout.AddTable()
 	images, err := job.Stdout.AddTable()
@@ -119,11 +119,11 @@ func init() {
 		src.Close()
 		src.Close()
 	}
 	}
 
 
-	// Setup the base runtime, which will be duplicated for each test.
+	// Setup the base daemon, which will be duplicated for each test.
 	// (no tests are run directly in the base)
 	// (no tests are run directly in the base)
 	setupBaseImage()
 	setupBaseImage()
 
 
-	// Create the "global runtime" with a long-running daemons for integration tests
+	// Create the "global daemon" with a long-running daemons for integration tests
 	spawnGlobalDaemon()
 	spawnGlobalDaemon()
 	spawnLegitHttpsDaemon()
 	spawnLegitHttpsDaemon()
 	spawnRogueHttpsDaemon()
 	spawnRogueHttpsDaemon()
@@ -146,14 +146,14 @@ func setupBaseImage() {
 }
 }
 
 
 func spawnGlobalDaemon() {
 func spawnGlobalDaemon() {
-	if globalRuntime != nil {
-		utils.Debugf("Global runtime already exists. Skipping.")
+	if globalDaemon != nil {
+		utils.Debugf("Global daemon already exists. Skipping.")
 		return
 		return
 	}
 	}
 	t := log.New(os.Stderr, "", 0)
 	t := log.New(os.Stderr, "", 0)
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	globalEngine = eng
 	globalEngine = eng
-	globalRuntime = mkRuntimeFromEngine(eng, t)
+	globalDaemon = mkDaemonFromEngine(eng, t)
 
 
 	// Spawn a Daemon
 	// Spawn a Daemon
 	go func() {
 	go func() {
@@ -235,8 +235,8 @@ func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
 
 
 // FIXME: test that ImagePull(json=true) send correct json output
 // FIXME: test that ImagePull(json=true) send correct json output
 
 
-func GetTestImage(runtime *runtime.Runtime) *image.Image {
-	imgs, err := runtime.Graph().Map()
+func GetTestImage(daemon *daemon.Daemon) *image.Image {
+	imgs, err := daemon.Graph().Map()
 	if err != nil {
 	if err != nil {
 		log.Fatalf("Unable to get the test image: %s", err)
 		log.Fatalf("Unable to get the test image: %s", err)
 	}
 	}
@@ -245,21 +245,21 @@ func GetTestImage(runtime *runtime.Runtime) *image.Image {
 			return image
 			return image
 		}
 		}
 	}
 	}
-	log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
+	log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs)
 	return nil
 	return nil
 }
 }
 
 
-func TestRuntimeCreate(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+func TestDaemonCreate(t *testing.T) {
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
 	// Make sure we start we 0 containers
 	// Make sure we start we 0 containers
-	if len(runtime.List()) != 0 {
-		t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
+	if len(daemon.List()) != 0 {
+		t.Errorf("Expected 0 containers, %v found", len(daemon.List()))
 	}
 	}
 
 
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"ls", "-al"},
 		Cmd:   []string{"ls", "-al"},
 	},
 	},
 		"",
 		"",
@@ -269,56 +269,56 @@ func TestRuntimeCreate(t *testing.T) {
 	}
 	}
 
 
 	defer func() {
 	defer func() {
-		if err := runtime.Destroy(container); err != nil {
+		if err := daemon.Destroy(container); err != nil {
 			t.Error(err)
 			t.Error(err)
 		}
 		}
 	}()
 	}()
 
 
 	// Make sure we can find the newly created container with List()
 	// Make sure we can find the newly created container with List()
-	if len(runtime.List()) != 1 {
-		t.Errorf("Expected 1 container, %v found", len(runtime.List()))
+	if len(daemon.List()) != 1 {
+		t.Errorf("Expected 1 container, %v found", len(daemon.List()))
 	}
 	}
 
 
 	// Make sure the container List() returns is the right one
 	// Make sure the container List() returns is the right one
-	if runtime.List()[0].ID != container.ID {
-		t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
+	if daemon.List()[0].ID != container.ID {
+		t.Errorf("Unexpected container %v returned by List", daemon.List()[0])
 	}
 	}
 
 
 	// Make sure we can get the container with Get()
 	// Make sure we can get the container with Get()
-	if runtime.Get(container.ID) == nil {
+	if daemon.Get(container.ID) == nil {
 		t.Errorf("Unable to get newly created container")
 		t.Errorf("Unable to get newly created container")
 	}
 	}
 
 
 	// Make sure it is the right container
 	// Make sure it is the right container
-	if runtime.Get(container.ID) != container {
+	if daemon.Get(container.ID) != container {
 		t.Errorf("Get() returned the wrong container")
 		t.Errorf("Get() returned the wrong container")
 	}
 	}
 
 
 	// Make sure Exists returns it as existing
 	// Make sure Exists returns it as existing
-	if !runtime.Exists(container.ID) {
+	if !daemon.Exists(container.ID) {
 		t.Errorf("Exists() returned false for a newly created container")
 		t.Errorf("Exists() returned false for a newly created container")
 	}
 	}
 
 
 	// Test that conflict error displays correct details
 	// Test that conflict error displays correct details
-	testContainer, _, _ := runtime.Create(
+	testContainer, _, _ := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+			Image: GetTestImage(daemon).ID,
 			Cmd:   []string{"ls", "-al"},
 			Cmd:   []string{"ls", "-al"},
 		},
 		},
 		"conflictname",
 		"conflictname",
 	)
 	)
-	if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
+	if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
 		t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
 		t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
 	}
 	}
 
 
 	// Make sure create with bad parameters returns an error
 	// Make sure create with bad parameters returns an error
-	if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
+	if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, ""); err == nil {
 		t.Fatal("Builder.Create should throw an error when Cmd is missing")
 		t.Fatal("Builder.Create should throw an error when Cmd is missing")
 	}
 	}
 
 
-	if _, _, err := runtime.Create(
+	if _, _, err := daemon.Create(
 		&runconfig.Config{
 		&runconfig.Config{
-			Image: GetTestImage(runtime).ID,
+			Image: GetTestImage(daemon).ID,
 			Cmd:   []string{},
 			Cmd:   []string{},
 		},
 		},
 		"",
 		"",
@@ -327,20 +327,20 @@ func TestRuntimeCreate(t *testing.T) {
 	}
 	}
 
 
 	config := &runconfig.Config{
 	config := &runconfig.Config{
-		Image:     GetTestImage(runtime).ID,
+		Image:     GetTestImage(daemon).ID,
 		Cmd:       []string{"/bin/ls"},
 		Cmd:       []string{"/bin/ls"},
 		PortSpecs: []string{"80"},
 		PortSpecs: []string{"80"},
 	}
 	}
-	container, _, err = runtime.Create(config, "")
+	container, _, err = daemon.Create(config, "")
 
 
-	_, err = runtime.Commit(container, "testrepo", "testtag", "", "", config)
+	_, err = daemon.Commit(container, "testrepo", "testtag", "", "", config)
 	if err != nil {
 	if err != nil {
 		t.Error(err)
 		t.Error(err)
 	}
 	}
 
 
 	// test expose 80:8000
 	// test expose 80:8000
-	container, warnings, err := runtime.Create(&runconfig.Config{
-		Image:     GetTestImage(runtime).ID,
+	container, warnings, err := daemon.Create(&runconfig.Config{
+		Image:     GetTestImage(daemon).ID,
 		Cmd:       []string{"ls", "-al"},
 		Cmd:       []string{"ls", "-al"},
 		PortSpecs: []string{"80:8000"},
 		PortSpecs: []string{"80:8000"},
 	},
 	},
@@ -355,83 +355,83 @@ func TestRuntimeCreate(t *testing.T) {
 }
 }
 
 
 func TestDestroy(t *testing.T) {
 func TestDestroy(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"ls", "-al"},
 		Cmd:   []string{"ls", "-al"},
 	}, "")
 	}, "")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	// Destroy
 	// Destroy
-	if err := runtime.Destroy(container); err != nil {
+	if err := daemon.Destroy(container); err != nil {
 		t.Error(err)
 		t.Error(err)
 	}
 	}
 
 
-	// Make sure runtime.Exists() behaves correctly
-	if runtime.Exists("test_destroy") {
+	// Make sure daemon.Exists() behaves correctly
+	if daemon.Exists("test_destroy") {
 		t.Errorf("Exists() returned true")
 		t.Errorf("Exists() returned true")
 	}
 	}
 
 
-	// Make sure runtime.List() doesn't list the destroyed container
-	if len(runtime.List()) != 0 {
-		t.Errorf("Expected 0 container, %v found", len(runtime.List()))
+	// Make sure daemon.List() doesn't list the destroyed container
+	if len(daemon.List()) != 0 {
+		t.Errorf("Expected 0 container, %v found", len(daemon.List()))
 	}
 	}
 
 
-	// Make sure runtime.Get() refuses to return the unexisting container
-	if runtime.Get(container.ID) != nil {
+	// Make sure daemon.Get() refuses to return the unexisting container
+	if daemon.Get(container.ID) != nil {
 		t.Errorf("Unable to get newly created container")
 		t.Errorf("Unable to get newly created container")
 	}
 	}
 
 
 	// Test double destroy
 	// Test double destroy
-	if err := runtime.Destroy(container); err == nil {
+	if err := daemon.Destroy(container); err == nil {
 		// It should have failed
 		// It should have failed
 		t.Errorf("Double destroy did not fail")
 		t.Errorf("Double destroy did not fail")
 	}
 	}
 }
 }
 
 
 func TestGet(t *testing.T) {
 func TestGet(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
-	defer runtime.Destroy(container1)
+	container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
+	defer daemon.Destroy(container1)
 
 
-	container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
-	defer runtime.Destroy(container2)
+	container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
+	defer daemon.Destroy(container2)
 
 
-	container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
-	defer runtime.Destroy(container3)
+	container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
+	defer daemon.Destroy(container3)
 
 
-	if runtime.Get(container1.ID) != container1 {
-		t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
+	if daemon.Get(container1.ID) != container1 {
+		t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1)
 	}
 	}
 
 
-	if runtime.Get(container2.ID) != container2 {
-		t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
+	if daemon.Get(container2.ID) != container2 {
+		t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2)
 	}
 	}
 
 
-	if runtime.Get(container3.ID) != container3 {
-		t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
+	if daemon.Get(container3.ID) != container3 {
+		t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3)
 	}
 	}
 
 
 }
 }
 
 
-func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *runtime.Container, string) {
+func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) {
 	var (
 	var (
 		err     error
 		err     error
 		id      string
 		id      string
 		strPort string
 		strPort string
 		eng     = NewTestEngine(t)
 		eng     = NewTestEngine(t)
-		runtime = mkRuntimeFromEngine(eng, t)
+		daemon  = mkDaemonFromEngine(eng, t)
 		port    = 5554
 		port    = 5554
 		p       nat.Port
 		p       nat.Port
 	)
 	)
 	defer func() {
 	defer func() {
 		if err != nil {
 		if err != nil {
-			runtime.Nuke()
+			daemon.Nuke()
 		}
 		}
 	}()
 	}()
 
 
@@ -459,7 +459,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
 		if err := jobCreate.Run(); err != nil {
 		if err := jobCreate.Run(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
-		// FIXME: this relies on the undocumented behavior of runtime.Create
+		// FIXME: this relies on the undocumented behavior of daemon.Create
 		// which will return a nil error AND container if the exposed ports
 		// which will return a nil error AND container if the exposed ports
 		// are invalid. That behavior should be fixed!
 		// are invalid. That behavior should be fixed!
 		if id != "" {
 		if id != "" {
@@ -481,7 +481,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container := runtime.Get(id)
+	container := daemon.Get(id)
 	if container == nil {
 	if container == nil {
 		t.Fatalf("Couldn't fetch test container %s", id)
 		t.Fatalf("Couldn't fetch test container %s", id)
 	}
 	}
@@ -496,13 +496,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
 	container.WaitTimeout(500 * time.Millisecond)
 	container.WaitTimeout(500 * time.Millisecond)
 
 
 	strPort = container.NetworkSettings.Ports[p][0].HostPort
 	strPort = container.NetworkSettings.Ports[p][0].HostPort
-	return runtime, container, strPort
+	return daemon, container, strPort
 }
 }
 
 
 // Run a container with a TCP port allocated, and test that it can receive connections on localhost
 // Run a container with a TCP port allocated, and test that it can receive connections on localhost
 func TestAllocateTCPPortLocalhost(t *testing.T) {
 func TestAllocateTCPPortLocalhost(t *testing.T) {
-	runtime, container, port := startEchoServerContainer(t, "tcp")
-	defer nuke(runtime)
+	daemon, container, port := startEchoServerContainer(t, "tcp")
+	defer nuke(daemon)
 	defer container.Kill()
 	defer container.Kill()
 
 
 	for i := 0; i != 10; i++ {
 	for i := 0; i != 10; i++ {
@@ -550,8 +550,8 @@ func TestAllocateTCPPortLocalhost(t *testing.T) {
 
 
 // Run a container with an UDP port allocated, and test that it can receive connections on localhost
 // Run a container with an UDP port allocated, and test that it can receive connections on localhost
 func TestAllocateUDPPortLocalhost(t *testing.T) {
 func TestAllocateUDPPortLocalhost(t *testing.T) {
-	runtime, container, port := startEchoServerContainer(t, "udp")
-	defer nuke(runtime)
+	daemon, container, port := startEchoServerContainer(t, "udp")
+	defer nuke(daemon)
 	defer container.Kill()
 	defer container.Kill()
 
 
 	conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
 	conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
@@ -586,15 +586,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
 
 
 func TestRestore(t *testing.T) {
 func TestRestore(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime1 := mkRuntimeFromEngine(eng, t)
-	defer runtime1.Nuke()
+	daemon1 := mkDaemonFromEngine(eng, t)
+	defer daemon1.Nuke()
 	// Create a container with one instance of docker
 	// Create a container with one instance of docker
-	container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
-	defer runtime1.Destroy(container1)
+	container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t)
+	defer daemon1.Destroy(container1)
 
 
 	// Create a second container meant to be killed
 	// Create a second container meant to be killed
-	container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
-	defer runtime1.Destroy(container2)
+	container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t)
+	defer daemon1.Destroy(container2)
 
 
 	// Start the container non blocking
 	// Start the container non blocking
 	if err := container2.Start(); err != nil {
 	if err := container2.Start(); err != nil {
@@ -614,8 +614,8 @@ func TestRestore(t *testing.T) {
 	container2.State.SetRunning(42)
 	container2.State.SetRunning(42)
 	container2.ToDisk()
 	container2.ToDisk()
 
 
-	if len(runtime1.List()) != 2 {
-		t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
+	if len(daemon1.List()) != 2 {
+		t.Errorf("Expected 2 container, %v found", len(daemon1.List()))
 	}
 	}
 	if err := container1.Run(); err != nil {
 	if err := container1.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -628,12 +628,12 @@ func TestRestore(t *testing.T) {
 	// Here are are simulating a docker restart - that is, reloading all containers
 	// Here are are simulating a docker restart - that is, reloading all containers
 	// from scratch
 	// from scratch
 	eng = newTestEngine(t, false, eng.Root())
 	eng = newTestEngine(t, false, eng.Root())
-	runtime2 := mkRuntimeFromEngine(eng, t)
-	if len(runtime2.List()) != 2 {
-		t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
+	daemon2 := mkDaemonFromEngine(eng, t)
+	if len(daemon2.List()) != 2 {
+		t.Errorf("Expected 2 container, %v found", len(daemon2.List()))
 	}
 	}
 	runningCount := 0
 	runningCount := 0
-	for _, c := range runtime2.List() {
+	for _, c := range daemon2.List() {
 		if c.State.IsRunning() {
 		if c.State.IsRunning() {
 			t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
 			t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
 			runningCount++
 			runningCount++
@@ -642,7 +642,7 @@ func TestRestore(t *testing.T) {
 	if runningCount != 0 {
 	if runningCount != 0 {
 		t.Fatalf("Expected 0 container alive, %d found", runningCount)
 		t.Fatalf("Expected 0 container alive, %d found", runningCount)
 	}
 	}
-	container3 := runtime2.Get(container1.ID)
+	container3 := daemon2.Get(container1.ID)
 	if container3 == nil {
 	if container3 == nil {
 		t.Fatal("Unable to Get container")
 		t.Fatal("Unable to Get container")
 	}
 	}
@@ -654,22 +654,22 @@ func TestRestore(t *testing.T) {
 
 
 func TestDefaultContainerName(t *testing.T) {
 func TestDefaultContainerName(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
+	container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name"))
 	containerID := container.ID
 	containerID := container.ID
 
 
 	if container.Name != "/some_name" {
 	if container.Name != "/some_name" {
 		t.Fatalf("Expect /some_name got %s", container.Name)
 		t.Fatalf("Expect /some_name got %s", container.Name)
 	}
 	}
 
 
-	if c := runtime.Get("/some_name"); c == nil {
+	if c := daemon.Get("/some_name"); c == nil {
 		t.Fatalf("Couldn't retrieve test container as /some_name")
 		t.Fatalf("Couldn't retrieve test container as /some_name")
 	} else if c.ID != containerID {
 	} else if c.ID != containerID {
 		t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
 		t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
@@ -678,22 +678,22 @@ func TestDefaultContainerName(t *testing.T) {
 
 
 func TestRandomContainerName(t *testing.T) {
 func TestRandomContainerName(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
-	config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil)
+	config, _, _, err := runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container := runtime.Get(createTestContainer(eng, config, t))
+	container := daemon.Get(createTestContainer(eng, config, t))
 	containerID := container.ID
 	containerID := container.ID
 
 
 	if container.Name == "" {
 	if container.Name == "" {
 		t.Fatalf("Expected not empty container name")
 		t.Fatalf("Expected not empty container name")
 	}
 	}
 
 
-	if c := runtime.Get(container.Name); c == nil {
+	if c := daemon.Get(container.Name); c == nil {
 		log.Fatalf("Could not lookup container %s by its name", container.Name)
 		log.Fatalf("Could not lookup container %s by its name", container.Name)
 	} else if c.ID != containerID {
 	} else if c.ID != containerID {
 		log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
 		log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
@@ -702,8 +702,8 @@ func TestRandomContainerName(t *testing.T) {
 
 
 func TestContainerNameValidation(t *testing.T) {
 func TestContainerNameValidation(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	for _, test := range []struct {
 	for _, test := range []struct {
 		Name  string
 		Name  string
@@ -733,13 +733,13 @@ func TestContainerNameValidation(t *testing.T) {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 
 
-		container := runtime.Get(shortID)
+		container := daemon.Get(shortID)
 
 
 		if container.Name != "/"+test.Name {
 		if container.Name != "/"+test.Name {
 			t.Fatalf("Expect /%s got %s", test.Name, container.Name)
 			t.Fatalf("Expect /%s got %s", test.Name, container.Name)
 		}
 		}
 
 
-		if c := runtime.Get("/" + test.Name); c == nil {
+		if c := daemon.Get("/" + test.Name); c == nil {
 			t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
 			t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
 		} else if c.ID != container.ID {
 		} else if c.ID != container.ID {
 			t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
 			t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
@@ -750,17 +750,17 @@ func TestContainerNameValidation(t *testing.T) {
 
 
 func TestLinkChildContainer(t *testing.T) {
 func TestLinkChildContainer(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
+	container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
 
 
-	webapp, err := runtime.GetByName("/webapp")
+	webapp, err := daemon.GetByName("/webapp")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -769,19 +769,19 @@ func TestLinkChildContainer(t *testing.T) {
 		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
 		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
 	}
 	}
 
 
-	config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil)
+	config, _, _, err = runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	childContainer := runtime.Get(createTestContainer(eng, config, t))
+	childContainer := daemon.Get(createTestContainer(eng, config, t))
 
 
-	if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
+	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
 	// Get the child by it's new name
 	// Get the child by it's new name
-	db, err := runtime.GetByName("/webapp/db")
+	db, err := daemon.GetByName("/webapp/db")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -792,17 +792,17 @@ func TestLinkChildContainer(t *testing.T) {
 
 
 func TestGetAllChildren(t *testing.T) {
 func TestGetAllChildren(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
-	defer nuke(runtime)
+	daemon := mkDaemonFromEngine(eng, t)
+	defer nuke(daemon)
 
 
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
+	container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
 
 
-	webapp, err := runtime.GetByName("/webapp")
+	webapp, err := daemon.GetByName("/webapp")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -816,13 +816,13 @@ func TestGetAllChildren(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	childContainer := runtime.Get(createTestContainer(eng, config, t))
+	childContainer := daemon.Get(createTestContainer(eng, config, t))
 
 
-	if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
+	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	children, err := runtime.Children("/webapp")
+	children, err := daemon.Children("/webapp")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -845,11 +845,11 @@ func TestGetAllChildren(t *testing.T) {
 }
 }
 
 
 func TestDestroyWithInitLayer(t *testing.T) {
 func TestDestroyWithInitLayer(t *testing.T) {
-	runtime := mkRuntime(t)
-	defer nuke(runtime)
+	daemon := mkDaemon(t)
+	defer nuke(daemon)
 
 
-	container, _, err := runtime.Create(&runconfig.Config{
-		Image: GetTestImage(runtime).ID,
+	container, _, err := daemon.Create(&runconfig.Config{
+		Image: GetTestImage(daemon).ID,
 		Cmd:   []string{"ls", "-al"},
 		Cmd:   []string{"ls", "-al"},
 	}, "")
 	}, "")
 
 
@@ -857,21 +857,21 @@ func TestDestroyWithInitLayer(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	// Destroy
 	// Destroy
-	if err := runtime.Destroy(container); err != nil {
+	if err := daemon.Destroy(container); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	// Make sure runtime.Exists() behaves correctly
-	if runtime.Exists("test_destroy") {
+	// Make sure daemon.Exists() behaves correctly
+	if daemon.Exists("test_destroy") {
 		t.Fatalf("Exists() returned true")
 		t.Fatalf("Exists() returned true")
 	}
 	}
 
 
-	// Make sure runtime.List() doesn't list the destroyed container
-	if len(runtime.List()) != 0 {
-		t.Fatalf("Expected 0 container, %v found", len(runtime.List()))
+	// Make sure daemon.List() doesn't list the destroyed container
+	if len(daemon.List()) != 0 {
+		t.Fatalf("Expected 0 container, %v found", len(daemon.List()))
 	}
 	}
 
 
-	driver := runtime.Graph().Driver()
+	driver := daemon.Graph().Driver()
 
 
 	// Make sure that the container does not exist in the driver
 	// Make sure that the container does not exist in the driver
 	if _, err := driver.Get(container.ID); err == nil {
 	if _, err := driver.Get(container.ID); err == nil {

+ 15 - 15
integration/server_test.go

@@ -11,7 +11,7 @@ import (
 
 
 func TestCreateRm(t *testing.T) {
 func TestCreateRm(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
@@ -58,7 +58,7 @@ func TestCreateRm(t *testing.T) {
 
 
 func TestCreateNumberHostname(t *testing.T) {
 func TestCreateNumberHostname(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
@@ -70,7 +70,7 @@ func TestCreateNumberHostname(t *testing.T) {
 
 
 func TestCreateNumberUsername(t *testing.T) {
 func TestCreateNumberUsername(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil)
 	config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil)
 	if err != nil {
 	if err != nil {
@@ -82,7 +82,7 @@ func TestCreateNumberUsername(t *testing.T) {
 
 
 func TestCreateRmVolumes(t *testing.T) {
 func TestCreateRmVolumes(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil)
 	config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil)
 	if err != nil {
 	if err != nil {
@@ -142,7 +142,7 @@ func TestCreateRmVolumes(t *testing.T) {
 
 
 func TestCreateRmRunning(t *testing.T) {
 func TestCreateRmRunning(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil)
 	config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil)
 	if err != nil {
 	if err != nil {
@@ -216,7 +216,7 @@ func TestCreateRmRunning(t *testing.T) {
 
 
 func TestCommit(t *testing.T) {
 func TestCommit(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil)
 	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil)
 	if err != nil {
 	if err != nil {
@@ -236,7 +236,7 @@ func TestCommit(t *testing.T) {
 
 
 func TestMergeConfigOnCommit(t *testing.T) {
 func TestMergeConfigOnCommit(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	runtime := mkRuntimeFromEngine(eng, t)
+	runtime := mkDaemonFromEngine(eng, t)
 	defer runtime.Nuke()
 	defer runtime.Nuke()
 
 
 	container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
 	container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
@@ -294,7 +294,7 @@ func TestMergeConfigOnCommit(t *testing.T) {
 func TestRestartKillWait(t *testing.T) {
 func TestRestartKillWait(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
-	runtime := mkRuntimeFromEngine(eng, t)
+	runtime := mkDaemonFromEngine(eng, t)
 	defer runtime.Nuke()
 	defer runtime.Nuke()
 
 
 	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
 	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
@@ -360,7 +360,7 @@ func TestRestartKillWait(t *testing.T) {
 func TestCreateStartRestartStopStartKillRm(t *testing.T) {
 func TestCreateStartRestartStopStartKillRm(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
 	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
 	if err != nil {
 	if err != nil {
@@ -439,7 +439,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
 
 
 func TestRunWithTooLowMemoryLimit(t *testing.T) {
 func TestRunWithTooLowMemoryLimit(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
 	// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
 	job := eng.Job("create")
 	job := eng.Job("create")
@@ -457,7 +457,7 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
 func TestRmi(t *testing.T) {
 func TestRmi(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	initialImages := getAllImages(eng, t)
 	initialImages := getAllImages(eng, t)
 
 
@@ -542,7 +542,7 @@ func TestRmi(t *testing.T) {
 
 
 func TestImagesFilter(t *testing.T) {
 func TestImagesFilter(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
 	if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -584,7 +584,7 @@ func TestImagesFilter(t *testing.T) {
 // FIXE: 'insert' is deprecated and should be removed in a future version.
 // FIXE: 'insert' is deprecated and should be removed in a future version.
 func TestImageInsert(t *testing.T) {
 func TestImageInsert(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
 
 
 	// bad image name fails
 	// bad image name fails
@@ -606,7 +606,7 @@ func TestImageInsert(t *testing.T) {
 func TestListContainers(t *testing.T) {
 func TestListContainers(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	config := runconfig.Config{
 	config := runconfig.Config{
 		Image:     unitTestImageID,
 		Image:     unitTestImageID,
@@ -721,7 +721,7 @@ func assertContainerList(srv *server.Server, all bool, limit int, since, before
 // container
 // container
 func TestDeleteTagWithExistingContainers(t *testing.T) {
 func TestDeleteTagWithExistingContainers(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer nuke(mkRuntimeFromEngine(eng, t))
+	defer nuke(mkDaemonFromEngine(eng, t))
 
 
 	srv := mkServerFromEngine(eng, t)
 	srv := mkServerFromEngine(eng, t)
 
 

+ 2 - 2
integration/sorter_test.go

@@ -8,7 +8,7 @@ import (
 
 
 func TestServerListOrderedImagesByCreationDate(t *testing.T) {
 func TestServerListOrderedImagesByCreationDate(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	if err := generateImage("", eng); err != nil {
 	if err := generateImage("", eng); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -23,7 +23,7 @@ func TestServerListOrderedImagesByCreationDate(t *testing.T) {
 
 
 func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
 func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
-	defer mkRuntimeFromEngine(eng, t).Nuke()
+	defer mkDaemonFromEngine(eng, t).Nuke()
 
 
 	err := generateImage("bar", eng)
 	err := generateImage("bar", eng)
 	if err != nil {
 	if err != nil {

+ 19 - 19
integration/utils_test.go

@@ -15,9 +15,9 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/dotcloud/docker/builtins"
 	"github.com/dotcloud/docker/builtins"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/server"
 	"github.com/dotcloud/docker/server"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 )
 )
@@ -26,11 +26,11 @@ import (
 // It has to be named XXX_test.go, apparently, in other to access private functions
 // It has to be named XXX_test.go, apparently, in other to access private functions
 // from other XXX_test.go functions.
 // from other XXX_test.go functions.
 
 
-// Create a temporary runtime suitable for unit testing.
+// Create a temporary daemon suitable for unit testing.
 // Call t.Fatal() at the first error.
 // Call t.Fatal() at the first error.
-func mkRuntime(f utils.Fataler) *runtime.Runtime {
+func mkDaemon(f utils.Fataler) *daemon.Daemon {
 	eng := newTestEngine(f, false, "")
 	eng := newTestEngine(f, false, "")
-	return mkRuntimeFromEngine(eng, f)
+	return mkDaemonFromEngine(eng, f)
 	// FIXME:
 	// FIXME:
 	// [...]
 	// [...]
 	// Mtu:         docker.GetDefaultNetworkMtu(),
 	// Mtu:         docker.GetDefaultNetworkMtu(),
@@ -116,8 +116,8 @@ func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
 }
 }
 
 
 func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
 func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
-	runtime := mkRuntimeFromEngine(eng, t)
-	if c := runtime.Get(id); c != nil {
+	daemon := mkDaemonFromEngine(eng, t)
+	if c := daemon.Get(id); c != nil {
 		t.Fatal(fmt.Errorf("Container %s should not exist", id))
 		t.Fatal(fmt.Errorf("Container %s should not exist", id))
 	}
 	}
 }
 }
@@ -140,9 +140,9 @@ func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
 	}
 	}
 }
 }
 
 
-func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Container {
-	runtime := mkRuntimeFromEngine(eng, t)
-	c := runtime.Get(id)
+func getContainer(eng *engine.Engine, id string, t utils.Fataler) *daemon.Container {
+	daemon := mkDaemonFromEngine(eng, t)
+	c := daemon.Get(id)
 	if c == nil {
 	if c == nil {
 		t.Fatal(fmt.Errorf("No such container: %s", id))
 		t.Fatal(fmt.Errorf("No such container: %s", id))
 	}
 	}
@@ -161,16 +161,16 @@ func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *server.Server {
 	return srv
 	return srv
 }
 }
 
 
-func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *runtime.Runtime {
-	iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
-	if iRuntime == nil {
-		panic("Legacy runtime field not set in engine")
+func mkDaemonFromEngine(eng *engine.Engine, t utils.Fataler) *daemon.Daemon {
+	iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
+	if iDaemon == nil {
+		panic("Legacy daemon field not set in engine")
 	}
 	}
-	runtime, ok := iRuntime.(*runtime.Runtime)
+	daemon, ok := iDaemon.(*daemon.Daemon)
 	if !ok {
 	if !ok {
-		panic("Legacy runtime field in engine does not cast to *runtime.Runtime")
+		panic("Legacy daemon field in engine does not cast to *daemon.Daemon")
 	}
 	}
-	return runtime
+	return daemon
 }
 }
 
 
 func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine {
 func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine {
@@ -245,12 +245,12 @@ func readFile(src string, t *testing.T) (content string) {
 	return string(data)
 	return string(data)
 }
 }
 
 
-// Create a test container from the given runtime `r` and run arguments `args`.
+// Create a test container from the given daemon `r` and run arguments `args`.
 // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
 // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
 // dynamically replaced by the current test image.
 // dynamically replaced by the current test image.
 // The caller is responsible for destroying the container.
 // The caller is responsible for destroying the container.
 // Call t.Fatal() at the first error.
 // Call t.Fatal() at the first error.
-func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Container, *runconfig.HostConfig, error) {
+func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) {
 	config, hc, _, err := runconfig.Parse(args, nil)
 	config, hc, _, err := runconfig.Parse(args, nil)
 	defer func() {
 	defer func() {
 		if err != nil && t != nil {
 		if err != nil && t != nil {
@@ -281,7 +281,7 @@ func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Cont
 // and return its standard output as a string.
 // and return its standard output as a string.
 // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
 // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
 // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
 // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
-func runContainer(eng *engine.Engine, r *runtime.Runtime, args []string, t *testing.T) (output string, err error) {
+func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) {
 	defer func() {
 	defer func() {
 		if err != nil && t != nil {
 		if err != nil && t != nil {
 			t.Fatal(err)
 			t.Fatal(err)

+ 1 - 1
integration/z_final_test.go

@@ -11,7 +11,7 @@ func displayFdGoroutines(t *testing.T) {
 }
 }
 
 
 func TestFinal(t *testing.T) {
 func TestFinal(t *testing.T) {
-	nuke(globalRuntime)
+	nuke(globalDaemon)
 	t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines)
 	t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines)
 	displayFdGoroutines(t)
 	displayFdGoroutines(t)
 }
 }

+ 0 - 7
runtime/runtime_btrfs.go

@@ -1,7 +0,0 @@
-// +build !exclude_graphdriver_btrfs
-
-package runtime
-
-import (
-	_ "github.com/dotcloud/docker/runtime/graphdriver/btrfs"
-)

+ 0 - 7
runtime/runtime_devicemapper.go

@@ -1,7 +0,0 @@
-// +build !exclude_graphdriver_devicemapper
-
-package runtime
-
-import (
-	_ "github.com/dotcloud/docker/runtime/graphdriver/devmapper"
-)

+ 18 - 18
server/buildfile.go

@@ -7,10 +7,10 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/nat"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -35,8 +35,8 @@ type BuildFile interface {
 }
 }
 
 
 type buildFile struct {
 type buildFile struct {
-	runtime *runtime.Runtime
-	srv     *Server
+	daemon *daemon.Daemon
+	srv    *Server
 
 
 	image      string
 	image      string
 	maintainer string
 	maintainer string
@@ -64,8 +64,8 @@ type buildFile struct {
 
 
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
 	for c := range containers {
 	for c := range containers {
-		tmp := b.runtime.Get(c)
-		if err := b.runtime.Destroy(tmp); err != nil {
+		tmp := b.daemon.Get(c)
+		if err := b.daemon.Destroy(tmp); err != nil {
 			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
 			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
 		} else {
 		} else {
 			delete(containers, c)
 			delete(containers, c)
@@ -75,9 +75,9 @@ func (b *buildFile) clearTmp(containers map[string]struct{}) {
 }
 }
 
 
 func (b *buildFile) CmdFrom(name string) error {
 func (b *buildFile) CmdFrom(name string) error {
-	image, err := b.runtime.Repositories().LookupImage(name)
+	image, err := b.daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
-		if b.runtime.Graph().IsNotExist(err) {
+		if b.daemon.Graph().IsNotExist(err) {
 			remote, tag := utils.ParseRepositoryTag(name)
 			remote, tag := utils.ParseRepositoryTag(name)
 			job := b.srv.Eng.Job("pull", remote, tag)
 			job := b.srv.Eng.Job("pull", remote, tag)
 			job.SetenvBool("json", b.sf.Json())
 			job.SetenvBool("json", b.sf.Json())
@@ -87,7 +87,7 @@ func (b *buildFile) CmdFrom(name string) error {
 			if err := job.Run(); err != nil {
 			if err := job.Run(); err != nil {
 				return err
 				return err
 			}
 			}
-			image, err = b.runtime.Repositories().LookupImage(name)
+			image, err = b.daemon.Repositories().LookupImage(name)
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
@@ -101,7 +101,7 @@ func (b *buildFile) CmdFrom(name string) error {
 		b.config = image.Config
 		b.config = image.Config
 	}
 	}
 	if b.config.Env == nil || len(b.config.Env) == 0 {
 	if b.config.Env == nil || len(b.config.Env) == 0 {
-		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv)
+		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
 	}
 	}
 	// Process ONBUILD triggers if they exist
 	// Process ONBUILD triggers if they exist
 	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
 	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
@@ -383,7 +383,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
+func (b *buildFile) addContext(container *daemon.Container, orig, dest string, remote bool) error {
 	var (
 	var (
 		err      error
 		err      error
 		origPath = path.Join(b.contextPath, orig)
 		origPath = path.Join(b.contextPath, orig)
@@ -599,7 +599,7 @@ func (b *buildFile) CmdAdd(args string) error {
 	}
 	}
 
 
 	// Create the container and start it
 	// Create the container and start it
-	container, _, err := b.runtime.Create(b.config, "")
+	container, _, err := b.daemon.Create(b.config, "")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -621,14 +621,14 @@ func (b *buildFile) CmdAdd(args string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) create() (*runtime.Container, error) {
+func (b *buildFile) create() (*daemon.Container, error) {
 	if b.image == "" {
 	if b.image == "" {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 	}
 	b.config.Image = b.image
 	b.config.Image = b.image
 
 
 	// Create the container and start it
 	// Create the container and start it
-	c, _, err := b.runtime.Create(b.config, "")
+	c, _, err := b.daemon.Create(b.config, "")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -642,7 +642,7 @@ func (b *buildFile) create() (*runtime.Container, error) {
 	return c, nil
 	return c, nil
 }
 }
 
 
-func (b *buildFile) run(c *runtime.Container) error {
+func (b *buildFile) run(c *daemon.Container) error {
 	var errCh chan error
 	var errCh chan error
 
 
 	if b.verbose {
 	if b.verbose {
@@ -693,7 +693,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			return nil
 			return nil
 		}
 		}
 
 
-		container, warnings, err := b.runtime.Create(b.config, "")
+		container, warnings, err := b.daemon.Create(b.config, "")
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -709,7 +709,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 		}
 		}
 		defer container.Unmount()
 		defer container.Unmount()
 	}
 	}
-	container := b.runtime.Get(id)
+	container := b.daemon.Get(id)
 	if container == nil {
 	if container == nil {
 		return fmt.Errorf("An error occured while creating the container")
 		return fmt.Errorf("An error occured while creating the container")
 	}
 	}
@@ -718,7 +718,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 	autoConfig := *b.config
 	autoConfig := *b.config
 	autoConfig.Cmd = autoCmd
 	autoConfig.Cmd = autoCmd
 	// Commit the container
 	// Commit the container
-	image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig)
+	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, &autoConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -823,7 +823,7 @@ func stripComments(raw []byte) string {
 
 
 func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, configFile *registry.ConfigFile) BuildFile {
 func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, configFile *registry.ConfigFile) BuildFile {
 	return &buildFile{
 	return &buildFile{
-		runtime:       srv.runtime,
+		daemon:        srv.daemon,
 		srv:           srv,
 		srv:           srv,
 		config:        &runconfig.Config{},
 		config:        &runconfig.Config{},
 		outStream:     outStream,
 		outStream:     outStream,

+ 115 - 115
server/server.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/api"
 	"github.com/dotcloud/docker/api"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/daemon"
 	"github.com/dotcloud/docker/daemonconfig"
 	"github.com/dotcloud/docker/daemonconfig"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
@@ -14,7 +15,6 @@ import (
 	"github.com/dotcloud/docker/pkg/signal"
 	"github.com/dotcloud/docker/pkg/signal"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/runconfig"
 	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/runtime"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -43,9 +43,9 @@ func InitServer(job *engine.Job) engine.Status {
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
-	if srv.runtime.Config().Pidfile != "" {
+	if srv.daemon.Config().Pidfile != "" {
 		job.Logf("Creating pidfile")
 		job.Logf("Creating pidfile")
-		if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
+		if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
 			// FIXME: do we need fatal here instead of returning a job error?
 			// FIXME: do we need fatal here instead of returning a job error?
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
@@ -65,7 +65,7 @@ func InitServer(job *engine.Job) engine.Status {
 						interruptCount++
 						interruptCount++
 						// Initiate the cleanup only once
 						// Initiate the cleanup only once
 						if interruptCount == 1 {
 						if interruptCount == 1 {
-							utils.RemovePidFile(srv.runtime.Config().Pidfile)
+							utils.RemovePidFile(srv.daemon.Config().Pidfile)
 							srv.Close()
 							srv.Close()
 						} else {
 						} else {
 							return
 							return
@@ -80,7 +80,7 @@ func InitServer(job *engine.Job) engine.Status {
 		}
 		}
 	}()
 	}()
 	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
 	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
-	job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
+	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
 
 
 	// FIXME: 'insert' is deprecated and should be removed in a future version.
 	// FIXME: 'insert' is deprecated and should be removed in a future version.
 	for name, handler := range map[string]engine.Handler{
 	for name, handler := range map[string]engine.Handler{
@@ -172,13 +172,13 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
 		}
 		}
 	}
 	}
 
 
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
 		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
 		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
 		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
 			if err := container.Kill(); err != nil {
 			if err := container.Kill(); err != nil {
 				return job.Errorf("Cannot kill container %s: %s", name, err)
 				return job.Errorf("Cannot kill container %s: %s", name, err)
 			}
 			}
-			srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
+			srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
 		} else {
 		} else {
 			// Otherwise, just send the requested signal
 			// Otherwise, just send the requested signal
 			if err := container.KillSig(int(sig)); err != nil {
 			if err := container.KillSig(int(sig)); err != nil {
@@ -294,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s container_id", job.Name)
 		return job.Errorf("Usage: %s container_id", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		data, err := container.Export()
 		data, err := container.Export()
 		if err != nil {
 		if err != nil {
 			return job.Errorf("%s: %s", name, err)
 			return job.Errorf("%s: %s", name, err)
@@ -306,7 +306,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
 			return job.Errorf("%s: %s", name, err)
 			return job.Errorf("%s: %s", name, err)
 		}
 		}
 		// FIXME: factor job-specific LogEvent to engine.Job.Run()
 		// FIXME: factor job-specific LogEvent to engine.Job.Run()
-		srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
+		srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
 		return engine.StatusOK
 		return engine.StatusOK
 	}
 	}
 	return job.Errorf("No such container: %s", name)
 	return job.Errorf("No such container: %s", name)
@@ -331,7 +331,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
 
 
 	utils.Debugf("Serializing %s", name)
 	utils.Debugf("Serializing %s", name)
 
 
-	rootRepo, err := srv.runtime.Repositories().Get(name)
+	rootRepo, err := srv.daemon.Repositories().Get(name)
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
@@ -510,7 +510,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 	if repoName != "" {
 	if repoName != "" {
-		srv.runtime.Repositories().Set(repoName, tag, id, false)
+		srv.daemon.Repositories().Set(repoName, tag, id, false)
 	}
 	}
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
@@ -571,7 +571,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
 
 
 		for imageName, tagMap := range repositories {
 		for imageName, tagMap := range repositories {
 			for tag, address := range tagMap {
 			for tag, address := range tagMap {
-				if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
+				if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
 					return job.Error(err)
 					return job.Error(err)
 				}
 				}
 			}
 			}
@@ -604,13 +604,13 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
 			return err
 			return err
 		}
 		}
 		if img.Parent != "" {
 		if img.Parent != "" {
-			if !srv.runtime.Graph().Exists(img.Parent) {
+			if !srv.daemon.Graph().Exists(img.Parent) {
 				if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
 				if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
 					return err
 					return err
 				}
 				}
 			}
 			}
 		}
 		}
-		if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
+		if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -668,7 +668,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
 	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
 	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
 
 
 	out := utils.NewWriteFlusher(job.Stdout)
 	out := utils.NewWriteFlusher(job.Stdout)
-	img, err := srv.runtime.Repositories().LookupImage(name)
+	img, err := srv.daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
@@ -679,12 +679,12 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
 	}
 	}
 	defer file.Body.Close()
 	defer file.Body.Close()
 
 
-	config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
+	config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig())
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 
 
-	c, _, err := srv.runtime.Create(config, "")
+	c, _, err := srv.daemon.Create(config, "")
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
@@ -693,7 +693,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 	// FIXME: Handle custom repo, tag comment, author
 	// FIXME: Handle custom repo, tag comment, author
-	img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
+	img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil)
 	if err != nil {
 	if err != nil {
 		out.Write(sf.FormatError(err))
 		out.Write(sf.FormatError(err))
 		return engine.StatusErr
 		return engine.StatusErr
@@ -703,7 +703,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
 }
 }
 
 
 func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
 func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
-	images, _ := srv.runtime.Graph().Map()
+	images, _ := srv.daemon.Graph().Map()
 	if images == nil {
 	if images == nil {
 		return engine.StatusOK
 		return engine.StatusOK
 	}
 	}
@@ -727,7 +727,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
 
 
 	reporefs := make(map[string][]string)
 	reporefs := make(map[string][]string)
 
 
-	for name, repository := range srv.runtime.Repositories().Repositories {
+	for name, repository := range srv.daemon.Repositories().Repositories {
 		for tag, id := range repository {
 		for tag, id := range repository {
 			reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
 			reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
 		}
 		}
@@ -746,22 +746,22 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
 		err       error
 		err       error
 	)
 	)
 	if job.GetenvBool("all") {
 	if job.GetenvBool("all") {
-		allImages, err = srv.runtime.Graph().Map()
+		allImages, err = srv.daemon.Graph().Map()
 	} else {
 	} else {
-		allImages, err = srv.runtime.Graph().Heads()
+		allImages, err = srv.daemon.Graph().Heads()
 	}
 	}
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 	lookup := make(map[string]*engine.Env)
 	lookup := make(map[string]*engine.Env)
-	for name, repository := range srv.runtime.Repositories().Repositories {
+	for name, repository := range srv.daemon.Repositories().Repositories {
 		if job.Getenv("filter") != "" {
 		if job.Getenv("filter") != "" {
 			if match, _ := path.Match(job.Getenv("filter"), name); !match {
 			if match, _ := path.Match(job.Getenv("filter"), name); !match {
 				continue
 				continue
 			}
 			}
 		}
 		}
 		for tag, id := range repository {
 		for tag, id := range repository {
-			image, err := srv.runtime.Graph().Get(id)
+			image, err := srv.daemon.Graph().Get(id)
 			if err != nil {
 			if err != nil {
 				log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
 				log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
 				continue
 				continue
@@ -811,7 +811,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
 }
 }
 
 
 func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
 func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
-	images, _ := srv.runtime.Graph().Map()
+	images, _ := srv.daemon.Graph().Map()
 	var imgcount int
 	var imgcount int
 	if images == nil {
 	if images == nil {
 		imgcount = 0
 		imgcount = 0
@@ -826,22 +826,22 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
 	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
 	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
 	initPath := utils.DockerInitPath("")
 	initPath := utils.DockerInitPath("")
 	if initPath == "" {
 	if initPath == "" {
-		// if that fails, we'll just return the path from the runtime
-		initPath = srv.runtime.SystemInitPath()
+		// if that fails, we'll just return the path from the daemon
+		initPath = srv.daemon.SystemInitPath()
 	}
 	}
 
 
 	v := &engine.Env{}
 	v := &engine.Env{}
-	v.SetInt("Containers", len(srv.runtime.List()))
+	v.SetInt("Containers", len(srv.daemon.List()))
 	v.SetInt("Images", imgcount)
 	v.SetInt("Images", imgcount)
-	v.Set("Driver", srv.runtime.GraphDriver().String())
-	v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
-	v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
-	v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
-	v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
+	v.Set("Driver", srv.daemon.GraphDriver().String())
+	v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
+	v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
+	v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
+	v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
 	v.SetBool("Debug", os.Getenv("DEBUG") != "")
 	v.SetBool("Debug", os.Getenv("DEBUG") != "")
 	v.SetInt("NFd", utils.GetTotalUsedFds())
 	v.SetInt("NFd", utils.GetTotalUsedFds())
 	v.SetInt("NGoroutines", goruntime.NumGoroutine())
 	v.SetInt("NGoroutines", goruntime.NumGoroutine())
-	v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
+	v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
 	v.SetInt("NEventsListener", len(srv.listeners))
 	v.SetInt("NEventsListener", len(srv.listeners))
 	v.Set("KernelVersion", kernelVersion)
 	v.Set("KernelVersion", kernelVersion)
 	v.Set("IndexServerAddress", registry.IndexServerAddress())
 	v.Set("IndexServerAddress", registry.IndexServerAddress())
@@ -875,13 +875,13 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s IMAGE", job.Name)
 		return job.Errorf("Usage: %s IMAGE", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	foundImage, err := srv.runtime.Repositories().LookupImage(name)
+	foundImage, err := srv.daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 
 
 	lookupMap := make(map[string][]string)
 	lookupMap := make(map[string][]string)
-	for name, repository := range srv.runtime.Repositories().Repositories {
+	for name, repository := range srv.daemon.Repositories().Repositories {
 		for tag, id := range repository {
 		for tag, id := range repository {
 			// If the ID already has a reverse lookup, do not update it unless for "latest"
 			// If the ID already has a reverse lookup, do not update it unless for "latest"
 			if _, exists := lookupMap[id]; !exists {
 			if _, exists := lookupMap[id]; !exists {
@@ -922,11 +922,11 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
 		psArgs = job.Args[1]
 		psArgs = job.Args[1]
 	}
 	}
 
 
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		if !container.State.IsRunning() {
 		if !container.State.IsRunning() {
 			return job.Errorf("Container %s is not running", name)
 			return job.Errorf("Container %s is not running", name)
 		}
 		}
-		pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID)
+		pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
 		if err != nil {
 		if err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
@@ -984,7 +984,7 @@ func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s CONTAINER", job.Name)
 		return job.Errorf("Usage: %s CONTAINER", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		outs := engine.NewTable("", 0)
 		outs := engine.NewTable("", 0)
 		changes, err := container.Changes()
 		changes, err := container.Changes()
 		if err != nil {
 		if err != nil {
@@ -1019,27 +1019,27 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
 	outs := engine.NewTable("Created", 0)
 	outs := engine.NewTable("Created", 0)
 
 
 	names := map[string][]string{}
 	names := map[string][]string{}
-	srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
+	srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
 		names[e.ID()] = append(names[e.ID()], p)
 		names[e.ID()] = append(names[e.ID()], p)
 		return nil
 		return nil
 	}, -1)
 	}, -1)
 
 
-	var beforeCont, sinceCont *runtime.Container
+	var beforeCont, sinceCont *daemon.Container
 	if before != "" {
 	if before != "" {
-		beforeCont = srv.runtime.Get(before)
+		beforeCont = srv.daemon.Get(before)
 		if beforeCont == nil {
 		if beforeCont == nil {
 			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
 			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
 		}
 		}
 	}
 	}
 
 
 	if since != "" {
 	if since != "" {
-		sinceCont = srv.runtime.Get(since)
+		sinceCont = srv.daemon.Get(since)
 		if sinceCont == nil {
 		if sinceCont == nil {
 			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
 			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
 		}
 		}
 	}
 	}
 
 
-	for _, container := range srv.runtime.List() {
+	for _, container := range srv.daemon.List() {
 		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
 		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
 			continue
 			continue
 		}
 		}
@@ -1061,7 +1061,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
 		out := &engine.Env{}
 		out := &engine.Env{}
 		out.Set("Id", container.ID)
 		out.Set("Id", container.ID)
 		out.SetList("Names", names[container.ID])
 		out.SetList("Names", names[container.ID])
-		out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
+		out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
 		if len(container.Args) > 0 {
 		if len(container.Args) > 0 {
 			args := []string{}
 			args := []string{}
 			for _, arg := range container.Args {
 			for _, arg := range container.Args {
@@ -1104,7 +1104,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
 
 
-	container := srv.runtime.Get(name)
+	container := srv.daemon.Get(name)
 	if container == nil {
 	if container == nil {
 		return job.Errorf("No such container: %s", name)
 		return job.Errorf("No such container: %s", name)
 	}
 	}
@@ -1118,7 +1118,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 
 
-	img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
+	img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
@@ -1134,7 +1134,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status {
 	if len(job.Args) == 3 {
 	if len(job.Args) == 3 {
 		tag = job.Args[2]
 		tag = job.Args[2]
 	}
 	}
-	if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
+	if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 	return engine.StatusOK
 	return engine.StatusOK
@@ -1159,7 +1159,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 		}
 		}
 		defer srv.poolRemove("pull", "layer:"+id)
 		defer srv.poolRemove("pull", "layer:"+id)
 
 
-		if !srv.runtime.Graph().Exists(id) {
+		if !srv.daemon.Graph().Exists(id) {
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
 			var (
 			var (
 				imgJSON []byte
 				imgJSON []byte
@@ -1197,7 +1197,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 				return err
 				return err
 			}
 			}
 			defer layer.Close()
 			defer layer.Close()
-			if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
+			if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
 				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
 				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
 				return err
 				return err
 			}
 			}
@@ -1332,11 +1332,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
 		if askedTag != "" && tag != askedTag {
 		if askedTag != "" && tag != askedTag {
 			continue
 			continue
 		}
 		}
-		if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
+		if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	if err := srv.runtime.Repositories().Save(); err != nil {
+	if err := srv.daemon.Repositories().Save(); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -1467,7 +1467,7 @@ func (srv *Server) getImageList(localRepo map[string]string, requestedTag string
 
 
 		tagsByImage[id] = append(tagsByImage[id], tag)
 		tagsByImage[id] = append(tagsByImage[id], tag)
 
 
-		for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
+		for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
 			if err != nil {
 			if err != nil {
 				return nil, nil, err
 				return nil, nil, err
 			}
 			}
@@ -1582,7 +1582,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
 
 
 func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
 func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
 	out = utils.NewWriteFlusher(out)
 	out = utils.NewWriteFlusher(out)
-	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json"))
+	jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
 		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
 	}
 	}
@@ -1601,7 +1601,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 		return "", err
 		return "", err
 	}
 	}
 
 
-	layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
+	layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
 		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
 	}
 	}
@@ -1656,7 +1656,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 
 
-	img, err := srv.runtime.Graph().Get(localName)
+	img, err := srv.daemon.Graph().Get(localName)
 	r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
 	r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
 	if err2 != nil {
 	if err2 != nil {
 		return job.Error(err2)
 		return job.Error(err2)
@@ -1665,11 +1665,11 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
 	if err != nil {
 	if err != nil {
 		reposLen := 1
 		reposLen := 1
 		if tag == "" {
 		if tag == "" {
-			reposLen = len(srv.runtime.Repositories().Repositories[localName])
+			reposLen = len(srv.daemon.Repositories().Repositories[localName])
 		}
 		}
 		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
 		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
 		// If it fails, try to get the repository
 		// If it fails, try to get the repository
-		if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
+		if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
 			if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
 			if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
 				return job.Error(err)
 				return job.Error(err)
 			}
 			}
@@ -1725,13 +1725,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
 		defer progressReader.Close()
 		defer progressReader.Close()
 		archive = progressReader
 		archive = progressReader
 	}
 	}
-	img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
+	img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 	// Optionally register the image at REPO/TAG
 	// Optionally register the image at REPO/TAG
 	if repo != "" {
 	if repo != "" {
-		if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
+		if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
 	}
 	}
@@ -1750,17 +1750,17 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
 	if config.Memory != 0 && config.Memory < 524288 {
 	if config.Memory != 0 && config.Memory < 524288 {
 		return job.Errorf("Minimum memory limit allowed is 512k")
 		return job.Errorf("Minimum memory limit allowed is 512k")
 	}
 	}
-	if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
+	if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
 		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		config.Memory = 0
 		config.Memory = 0
 	}
 	}
-	if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
+	if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
 		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		config.MemorySwap = -1
 		config.MemorySwap = -1
 	}
 	}
-	container, buildWarnings, err := srv.runtime.Create(config, name)
+	container, buildWarnings, err := srv.daemon.Create(config, name)
 	if err != nil {
 	if err != nil {
-		if srv.runtime.Graph().IsNotExist(err) {
+		if srv.daemon.Graph().IsNotExist(err) {
 			_, tag := utils.ParseRepositoryTag(config.Image)
 			_, tag := utils.ParseRepositoryTag(config.Image)
 			if tag == "" {
 			if tag == "" {
 				tag = graph.DEFAULTTAG
 				tag = graph.DEFAULTTAG
@@ -1769,11 +1769,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
 		}
 		}
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
-	if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
+	if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
 		job.Errorf("IPv4 forwarding is disabled.\n")
 		job.Errorf("IPv4 forwarding is disabled.\n")
 	}
 	}
-	srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
-	// FIXME: this is necessary because runtime.Create might return a nil container
+	srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
+	// FIXME: this is necessary because daemon.Create might return a nil container
 	// with a non-nil error. This should not happen! Once it's fixed we
 	// with a non-nil error. This should not happen! Once it's fixed we
 	// can remove this workaround.
 	// can remove this workaround.
 	if container != nil {
 	if container != nil {
@@ -1796,11 +1796,11 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
 	if job.EnvExists("t") {
 	if job.EnvExists("t") {
 		t = job.GetenvInt("t")
 		t = job.GetenvInt("t")
 	}
 	}
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		if err := container.Restart(int(t)); err != nil {
 		if err := container.Restart(int(t)); err != nil {
 			return job.Errorf("Cannot restart container %s: %s\n", name, err)
 			return job.Errorf("Cannot restart container %s: %s\n", name, err)
 		}
 		}
-		srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
+		srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
 	} else {
 	} else {
 		return job.Errorf("No such container: %s\n", name)
 		return job.Errorf("No such container: %s\n", name)
 	}
 	}
@@ -1816,13 +1816,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
 	removeLink := job.GetenvBool("removeLink")
 	removeLink := job.GetenvBool("removeLink")
 	forceRemove := job.GetenvBool("forceRemove")
 	forceRemove := job.GetenvBool("forceRemove")
 
 
-	container := srv.runtime.Get(name)
+	container := srv.daemon.Get(name)
 
 
 	if removeLink {
 	if removeLink {
 		if container == nil {
 		if container == nil {
 			return job.Errorf("No such link: %s", name)
 			return job.Errorf("No such link: %s", name)
 		}
 		}
-		name, err := runtime.GetFullContainerName(name)
+		name, err := daemon.GetFullContainerName(name)
 		if err != nil {
 		if err != nil {
 			job.Error(err)
 			job.Error(err)
 		}
 		}
@@ -1830,17 +1830,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
 		if parent == "/" {
 		if parent == "/" {
 			return job.Errorf("Conflict, cannot remove the default name of the container")
 			return job.Errorf("Conflict, cannot remove the default name of the container")
 		}
 		}
-		pe := srv.runtime.ContainerGraph().Get(parent)
+		pe := srv.daemon.ContainerGraph().Get(parent)
 		if pe == nil {
 		if pe == nil {
 			return job.Errorf("Cannot get parent %s for name %s", parent, name)
 			return job.Errorf("Cannot get parent %s for name %s", parent, name)
 		}
 		}
-		parentContainer := srv.runtime.Get(pe.ID())
+		parentContainer := srv.daemon.Get(pe.ID())
 
 
 		if parentContainer != nil {
 		if parentContainer != nil {
 			parentContainer.DisableLink(n)
 			parentContainer.DisableLink(n)
 		}
 		}
 
 
-		if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
+		if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
 		return engine.StatusOK
 		return engine.StatusOK
@@ -1856,16 +1856,16 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
 				return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
 				return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
 			}
 			}
 		}
 		}
-		if err := srv.runtime.Destroy(container); err != nil {
+		if err := srv.daemon.Destroy(container); err != nil {
 			return job.Errorf("Cannot destroy container %s: %s", name, err)
 			return job.Errorf("Cannot destroy container %s: %s", name, err)
 		}
 		}
-		srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
+		srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
 
 
 		if removeVolume {
 		if removeVolume {
 			var (
 			var (
 				volumes     = make(map[string]struct{})
 				volumes     = make(map[string]struct{})
 				binds       = make(map[string]struct{})
 				binds       = make(map[string]struct{})
-				usedVolumes = make(map[string]*runtime.Container)
+				usedVolumes = make(map[string]*daemon.Container)
 			)
 			)
 
 
 			// the volume id is always the base of the path
 			// the volume id is always the base of the path
@@ -1903,7 +1903,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
 			}
 			}
 
 
 			// Retrieve all volumes from all remaining containers
 			// Retrieve all volumes from all remaining containers
-			for _, container := range srv.runtime.List() {
+			for _, container := range srv.daemon.List() {
 				for _, containerVolumeId := range container.Volumes {
 				for _, containerVolumeId := range container.Volumes {
 					containerVolumeId = getVolumeId(containerVolumeId)
 					containerVolumeId = getVolumeId(containerVolumeId)
 					usedVolumes[containerVolumeId] = container
 					usedVolumes[containerVolumeId] = container
@@ -1916,7 +1916,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
 					log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
 					log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
 					continue
 					continue
 				}
 				}
-				if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
+				if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
 					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
 					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
 				}
 				}
 			}
 			}
@@ -1938,9 +1938,9 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
 		tag = graph.DEFAULTTAG
 		tag = graph.DEFAULTTAG
 	}
 	}
 
 
-	img, err := srv.runtime.Repositories().LookupImage(name)
+	img, err := srv.daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
-		if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
+		if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
 			return fmt.Errorf("No such image: %s:%s", repoName, tag)
 			return fmt.Errorf("No such image: %s:%s", repoName, tag)
 		}
 		}
 		return fmt.Errorf("No such image: %s", name)
 		return fmt.Errorf("No such image: %s", name)
@@ -1951,14 +1951,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
 		tag = ""
 		tag = ""
 	}
 	}
 
 
-	byParents, err := srv.runtime.Graph().ByParent()
+	byParents, err := srv.daemon.Graph().ByParent()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	//If delete by id, see if the id belong only to one repository
 	//If delete by id, see if the id belong only to one repository
 	if repoName == "" {
 	if repoName == "" {
-		for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
+		for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
 			parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
 			parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
 			if repoName == "" || repoName == parsedRepo {
 			if repoName == "" || repoName == parsedRepo {
 				repoName = parsedRepo
 				repoName = parsedRepo
@@ -1981,7 +1981,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
 
 
 	//Untag the current image
 	//Untag the current image
 	for _, tag := range tags {
 	for _, tag := range tags {
-		tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag)
+		tagDeleted, err := srv.daemon.Repositories().Delete(repoName, tag)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -1992,16 +1992,16 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
 			srv.LogEvent("untag", img.ID, "")
 			srv.LogEvent("untag", img.ID, "")
 		}
 		}
 	}
 	}
-	tags = srv.runtime.Repositories().ByID()[img.ID]
+	tags = srv.daemon.Repositories().ByID()[img.ID]
 	if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
 	if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
 		if len(byParents[img.ID]) == 0 {
 		if len(byParents[img.ID]) == 0 {
 			if err := srv.canDeleteImage(img.ID); err != nil {
 			if err := srv.canDeleteImage(img.ID); err != nil {
 				return err
 				return err
 			}
 			}
-			if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
+			if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
 				return err
 				return err
 			}
 			}
-			if err := srv.runtime.Graph().Delete(img.ID); err != nil {
+			if err := srv.daemon.Graph().Delete(img.ID); err != nil {
 				return err
 				return err
 			}
 			}
 			out := &engine.Env{}
 			out := &engine.Env{}
@@ -2039,8 +2039,8 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
 }
 }
 
 
 func (srv *Server) canDeleteImage(imgID string) error {
 func (srv *Server) canDeleteImage(imgID string) error {
-	for _, container := range srv.runtime.List() {
-		parent, err := srv.runtime.Repositories().LookupImage(container.Image)
+	for _, container := range srv.daemon.List() {
+		parent, err := srv.daemon.Repositories().LookupImage(container.Image)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -2059,7 +2059,7 @@ func (srv *Server) canDeleteImage(imgID string) error {
 
 
 func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
 func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
 	// Retrieve all images
 	// Retrieve all images
-	images, err := srv.runtime.Graph().Map()
+	images, err := srv.daemon.Graph().Map()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -2076,7 +2076,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
 	// Loop on the children of the given image and check the config
 	// Loop on the children of the given image and check the config
 	var match *image.Image
 	var match *image.Image
 	for elem := range imageMap[imgID] {
 	for elem := range imageMap[imgID] {
-		img, err := srv.runtime.Graph().Get(elem)
+		img, err := srv.daemon.Graph().Get(elem)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -2089,8 +2089,8 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
 	return match, nil
 	return match, nil
 }
 }
 
 
-func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
-	runtime := srv.runtime
+func (srv *Server) RegisterLinks(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
+	daemon := srv.daemon
 
 
 	if hostConfig != nil && hostConfig.Links != nil {
 	if hostConfig != nil && hostConfig.Links != nil {
 		for _, l := range hostConfig.Links {
 		for _, l := range hostConfig.Links {
@@ -2098,19 +2098,19 @@ func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runco
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
-			child, err := srv.runtime.GetByName(parts["name"])
+			child, err := srv.daemon.GetByName(parts["name"])
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
 			if child == nil {
 			if child == nil {
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 			}
 			}
-			if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
+			if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
 
 
-		// After we load all the links into the runtime
+		// After we load all the links into the daemon
 		// set them to nil on the hostconfig
 		// set them to nil on the hostconfig
 		hostConfig.Links = nil
 		hostConfig.Links = nil
 		if err := container.WriteHostConfig(); err != nil {
 		if err := container.WriteHostConfig(); err != nil {
@@ -2126,8 +2126,8 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
 	}
 	}
 	var (
 	var (
 		name      = job.Args[0]
 		name      = job.Args[0]
-		runtime   = srv.runtime
-		container = runtime.Get(name)
+		daemon    = srv.daemon
+		container = daemon.Get(name)
 	)
 	)
 
 
 	if container == nil {
 	if container == nil {
@@ -2169,7 +2169,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
 	if err := container.Start(); err != nil {
 	if err := container.Start(); err != nil {
 		return job.Errorf("Cannot start container %s: %s", name, err)
 		return job.Errorf("Cannot start container %s: %s", name, err)
 	}
 	}
-	srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
+	srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
 
 
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
@@ -2185,11 +2185,11 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
 	if job.EnvExists("t") {
 	if job.EnvExists("t") {
 		t = job.GetenvInt("t")
 		t = job.GetenvInt("t")
 	}
 	}
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		if err := container.Stop(int(t)); err != nil {
 		if err := container.Stop(int(t)); err != nil {
 			return job.Errorf("Cannot stop container %s: %s\n", name, err)
 			return job.Errorf("Cannot stop container %s: %s\n", name, err)
 		}
 		}
-		srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
+		srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
 	} else {
 	} else {
 		return job.Errorf("No such container: %s\n", name)
 		return job.Errorf("No such container: %s\n", name)
 	}
 	}
@@ -2201,7 +2201,7 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s", job.Name)
 		return job.Errorf("Usage: %s", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		status := container.Wait()
 		status := container.Wait()
 		job.Printf("%d\n", status)
 		job.Printf("%d\n", status)
 		return engine.StatusOK
 		return engine.StatusOK
@@ -2222,7 +2222,7 @@ func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
 	if err != nil {
 	if err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 		if err := container.Resize(height, width); err != nil {
 		if err := container.Resize(height, width); err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
@@ -2245,7 +2245,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
 		stderr = job.GetenvBool("stderr")
 		stderr = job.GetenvBool("stderr")
 	)
 	)
 
 
-	container := srv.runtime.Get(name)
+	container := srv.daemon.Get(name)
 	if container == nil {
 	if container == nil {
 		return job.Errorf("No such container: %s", name)
 		return job.Errorf("No such container: %s", name)
 	}
 	}
@@ -2335,15 +2335,15 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
 
 
-func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
-	if container := srv.runtime.Get(name); container != nil {
+func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) {
+	if container := srv.daemon.Get(name); container != nil {
 		return container, nil
 		return container, nil
 	}
 	}
 	return nil, fmt.Errorf("No such container: %s", name)
 	return nil, fmt.Errorf("No such container: %s", name)
 }
 }
 
 
 func (srv *Server) ImageInspect(name string) (*image.Image, error) {
 func (srv *Server) ImageInspect(name string) (*image.Image, error) {
-	if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
+	if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil {
 		return image, nil
 		return image, nil
 	}
 	}
 	return nil, fmt.Errorf("No such image: %s", name)
 	return nil, fmt.Errorf("No such image: %s", name)
@@ -2378,7 +2378,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status {
 			return job.Error(errContainer)
 			return job.Error(errContainer)
 		}
 		}
 		object = &struct {
 		object = &struct {
-			*runtime.Container
+			*daemon.Container
 			HostConfig *runconfig.HostConfig
 			HostConfig *runconfig.HostConfig
 		}{container, container.HostConfig()}
 		}{container, container.HostConfig()}
 	default:
 	default:
@@ -2403,7 +2403,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
 		resource = job.Args[1]
 		resource = job.Args[1]
 	)
 	)
 
 
-	if container := srv.runtime.Get(name); container != nil {
+	if container := srv.daemon.Get(name); container != nil {
 
 
 		data, err := container.Copy(resource)
 		data, err := container.Copy(resource)
 		if err != nil {
 		if err != nil {
@@ -2420,20 +2420,20 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
 }
 }
 
 
 func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
 func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
-	runtime, err := runtime.NewRuntime(config, eng)
+	daemon, err := daemon.NewDaemon(config, eng)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	srv := &Server{
 	srv := &Server{
 		Eng:         eng,
 		Eng:         eng,
-		runtime:     runtime,
+		daemon:      daemon,
 		pullingPool: make(map[string]chan struct{}),
 		pullingPool: make(map[string]chan struct{}),
 		pushingPool: make(map[string]chan struct{}),
 		pushingPool: make(map[string]chan struct{}),
 		events:      make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
 		events:      make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
 		listeners:   make(map[string]chan utils.JSONMessage),
 		listeners:   make(map[string]chan utils.JSONMessage),
 		running:     true,
 		running:     true,
 	}
 	}
-	runtime.SetServer(srv)
+	daemon.SetServer(srv)
 	return srv, nil
 	return srv, nil
 }
 }
 
 
@@ -2498,15 +2498,15 @@ func (srv *Server) Close() error {
 		return nil
 		return nil
 	}
 	}
 	srv.SetRunning(false)
 	srv.SetRunning(false)
-	if srv.runtime == nil {
+	if srv.daemon == nil {
 		return nil
 		return nil
 	}
 	}
-	return srv.runtime.Close()
+	return srv.daemon.Close()
 }
 }
 
 
 type Server struct {
 type Server struct {
 	sync.RWMutex
 	sync.RWMutex
-	runtime     *runtime.Runtime
+	daemon      *daemon.Daemon
 	pullingPool map[string]chan struct{}
 	pullingPool map[string]chan struct{}
 	pushingPool map[string]chan struct{}
 	pushingPool map[string]chan struct{}
 	events      []utils.JSONMessage
 	events      []utils.JSONMessage

+ 3 - 3
sysinit/sysinit.go

@@ -3,9 +3,9 @@ package sysinit
 import (
 import (
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
-	"github.com/dotcloud/docker/runtime/execdriver"
-	_ "github.com/dotcloud/docker/runtime/execdriver/lxc"
-	_ "github.com/dotcloud/docker/runtime/execdriver/native"
+	"github.com/dotcloud/docker/daemon/execdriver"
+	_ "github.com/dotcloud/docker/daemon/execdriver/lxc"
+	_ "github.com/dotcloud/docker/daemon/execdriver/native"
 	"log"
 	"log"
 	"os"
 	"os"
 )
 )