Rename runtime/* to daemon/*

Docker-DCO-1.1-Signed-off-by: Alexander Larsson <alexl@redhat.com> (github: alexlarsson)
This commit is contained in:
Alexander Larsson 2014-04-17 14:43:01 -07:00
parent 57cbe8b106
commit 359b7df5d2
91 changed files with 827 additions and 827 deletions

View file

@ -2,8 +2,8 @@ package builtins
import (
api "github.com/dotcloud/docker/api/server"
"github.com/dotcloud/docker/daemon/networkdriver/bridge"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runtime/networkdriver/bridge"
"github.com/dotcloud/docker/server"
)

View file

@ -3,7 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver/devmapper"
"github.com/dotcloud/docker/daemon/graphdriver/devmapper"
"os"
"path"
"sort"

View file

@ -1,17 +1,17 @@
package runtime
package daemon
import (
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/links"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -64,7 +64,7 @@ type Container struct {
stdin io.ReadCloser
stdinPipe io.WriteCloser
runtime *Runtime
daemon *Daemon
waitLock chan struct{}
Volumes map[string]string
@ -325,7 +325,7 @@ func populateCommand(c *Container, env []string) {
)
en = &execdriver.Network{
Mtu: c.runtime.config.Mtu,
Mtu: c.daemon.config.Mtu,
Interface: nil,
}
@ -389,7 +389,7 @@ func (container *Container) Start() (err error) {
if err := container.initializeNetworking(); err != nil {
return err
}
container.verifyRuntimeSettings()
container.verifyDaemonSettings()
if err := prepareVolumesForContainer(container); err != nil {
return err
}
@ -397,7 +397,7 @@ func (container *Container) Start() (err error) {
if err != nil {
return err
}
env := container.createRuntimeEnvironment(linkedEnv)
env := container.createDaemonEnvironment(linkedEnv)
// TODO: This is only needed for lxc so we should look for a way to
// remove this dep
if err := container.generateEnvConfig(env); err != nil {
@ -496,11 +496,11 @@ func (container *Container) allocateNetwork() error {
var (
env *engine.Env
err error
eng = container.runtime.eng
eng = container.daemon.eng
)
if container.State.IsGhost() {
if container.runtime.config.DisableNetwork {
if container.daemon.config.DisableNetwork {
env = &engine.Env{}
} else {
currentIP := container.NetworkSettings.IPAddress
@ -610,7 +610,7 @@ func (container *Container) releaseNetwork() {
if container.Config.NetworkDisabled {
return
}
eng := container.runtime.eng
eng := container.daemon.eng
eng.Job("release_interface", container.ID).Run()
container.NetworkSettings = &NetworkSettings{}
@ -623,12 +623,12 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
)
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
exitCode, err = container.runtime.Run(container, pipes, callback)
exitCode, err = container.daemon.Run(container, pipes, callback)
if err != nil {
utils.Errorf("Error running container: %s", err)
}
if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() {
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
container.State.SetStopped(exitCode)
// FIXME: there is a race condition here which causes this to fail during the unit tests.
@ -651,8 +651,8 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
container.stdin, container.stdinPipe = io.Pipe()
}
if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
if container.daemon != nil && container.daemon.srv != nil {
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
}
close(container.waitLock)
@ -698,7 +698,7 @@ func (container *Container) KillSig(sig int) error {
if !container.State.IsRunning() {
return nil
}
return container.runtime.Kill(container, sig)
return container.daemon.Kill(container, sig)
}
func (container *Container) Kill() error {
@ -775,10 +775,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
if err := container.Mount(); err != nil {
return nil, err
}
if container.runtime == nil {
if container.daemon == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
}
archive, err := container.runtime.Diff(container)
archive, err := container.daemon.Diff(container)
if err != nil {
container.Unmount()
return nil, err
@ -825,22 +825,22 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
}
func (container *Container) Mount() error {
return container.runtime.Mount(container)
return container.daemon.Mount(container)
}
func (container *Container) Changes() ([]archive.Change, error) {
return container.runtime.Changes(container)
return container.daemon.Changes(container)
}
func (container *Container) GetImage() (*image.Image, error) {
if container.runtime == nil {
if container.daemon == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
return container.daemon.graph.Get(container.Image)
}
func (container *Container) Unmount() error {
return container.runtime.Unmount(container)
return container.daemon.Unmount(container)
}
func (container *Container) logPath(name string) string {
@ -893,7 +893,7 @@ func (container *Container) GetSize() (int64, int64) {
var (
sizeRw, sizeRootfs int64
err error
driver = container.runtime.driver
driver = container.daemon.driver
)
if err := container.Mount(); err != nil {
@ -902,7 +902,7 @@ func (container *Container) GetSize() (int64, int64) {
}
defer container.Unmount()
if differ, ok := container.runtime.driver.(graphdriver.Differ); ok {
if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
sizeRw, err = differ.DiffSize(container.ID)
if err != nil {
utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
@ -999,28 +999,28 @@ func (container *Container) setupContainerDns() error {
return nil
}
var (
config = container.hostConfig
runtime = container.runtime
config = container.hostConfig
daemon = container.daemon
)
resolvConf, err := utils.GetResolvConf()
if err != nil {
return err
}
// If custom dns exists, then create a resolv.conf for the container
if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 {
if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
var (
dns = utils.GetNameservers(resolvConf)
dnsSearch = utils.GetSearchDomains(resolvConf)
)
if len(config.Dns) > 0 {
dns = config.Dns
} else if len(runtime.config.Dns) > 0 {
dns = runtime.config.Dns
} else if len(daemon.config.Dns) > 0 {
dns = daemon.config.Dns
}
if len(config.DnsSearch) > 0 {
dnsSearch = config.DnsSearch
} else if len(runtime.config.DnsSearch) > 0 {
dnsSearch = runtime.config.DnsSearch
} else if len(daemon.config.DnsSearch) > 0 {
dnsSearch = daemon.config.DnsSearch
}
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
f, err := os.Create(container.ResolvConfPath)
@ -1045,7 +1045,7 @@ func (container *Container) setupContainerDns() error {
}
func (container *Container) initializeNetworking() error {
if container.runtime.config.DisableNetwork {
if container.daemon.config.DisableNetwork {
container.Config.NetworkDisabled = true
container.buildHostnameAndHostsFiles("127.0.1.1")
} else {
@ -1058,26 +1058,26 @@ func (container *Container) initializeNetworking() error {
}
// Make sure the config is compatible with the current kernel
func (container *Container) verifyRuntimeSettings() {
if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit {
func (container *Container) verifyDaemonSettings() {
if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit {
if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
if container.runtime.sysInfo.IPv4ForwardingDisabled {
if container.daemon.sysInfo.IPv4ForwardingDisabled {
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
}
}
func (container *Container) setupLinkedContainers() ([]string, error) {
var (
env []string
runtime = container.runtime
env []string
daemon = container.daemon
)
children, err := runtime.Children(container.Name)
children, err := daemon.Children(container.Name)
if err != nil {
return nil, err
}
@ -1105,7 +1105,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
linkAlias,
child.Config.Env,
child.Config.ExposedPorts,
runtime.eng)
daemon.eng)
if err != nil {
rollback()
@ -1126,7 +1126,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
return env, nil
}
func (container *Container) createRuntimeEnvironment(linkedEnv []string) []string {
func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
// Setup environment
env := []string{
"HOME=/",
@ -1167,10 +1167,10 @@ func (container *Container) setupWorkingDirectory() error {
func (container *Container) startLoggingToDisk() error {
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
if err := container.daemon.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
if err := container.daemon.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
return err
}
return nil

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"github.com/dotcloud/docker/nat"

View file

@ -1,9 +1,16 @@
package runtime
package daemon
import (
"container/list"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
"github.com/dotcloud/docker/daemon/execdriver/lxc"
"github.com/dotcloud/docker/daemon/graphdriver"
_ "github.com/dotcloud/docker/daemon/graphdriver/vfs"
_ "github.com/dotcloud/docker/daemon/networkdriver/bridge"
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
"github.com/dotcloud/docker/daemonconfig"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
@ -14,13 +21,6 @@ import (
"github.com/dotcloud/docker/pkg/selinux"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/runtime/execdriver/execdrivers"
"github.com/dotcloud/docker/runtime/execdriver/lxc"
"github.com/dotcloud/docker/runtime/graphdriver"
_ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
_ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -44,7 +44,7 @@ var (
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
)
type Runtime struct {
type Daemon struct {
repository string
sysInitPath string
containers *list.List
@ -76,17 +76,17 @@ func remountPrivate(mountPoint string) error {
return mount.ForceMount("", mountPoint, "none", "private")
}
// List returns an array of all containers registered in the runtime.
func (runtime *Runtime) List() []*Container {
// List returns an array of all containers registered in the daemon.
func (daemon *Daemon) List() []*Container {
containers := new(History)
for e := runtime.containers.Front(); e != nil; e = e.Next() {
for e := daemon.containers.Front(); e != nil; e = e.Next() {
containers.Add(e.Value.(*Container))
}
return *containers
}
func (runtime *Runtime) getContainerElement(id string) *list.Element {
for e := runtime.containers.Front(); e != nil; e = e.Next() {
func (daemon *Daemon) getContainerElement(id string) *list.Element {
for e := daemon.containers.Front(); e != nil; e = e.Next() {
container := e.Value.(*Container)
if container.ID == id {
return e
@ -97,17 +97,17 @@ func (runtime *Runtime) getContainerElement(id string) *list.Element {
// Get looks for a container by the specified ID or name, and returns it.
// If the container is not found, or if an error occurs, nil is returned.
func (runtime *Runtime) Get(name string) *Container {
if c, _ := runtime.GetByName(name); c != nil {
func (daemon *Daemon) Get(name string) *Container {
if c, _ := daemon.GetByName(name); c != nil {
return c
}
id, err := runtime.idIndex.Get(name)
id, err := daemon.idIndex.Get(name)
if err != nil {
return nil
}
e := runtime.getContainerElement(id)
e := daemon.getContainerElement(id)
if e == nil {
return nil
}
@ -116,18 +116,18 @@ func (runtime *Runtime) Get(name string) *Container {
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (runtime *Runtime) Exists(id string) bool {
return runtime.Get(id) != nil
func (daemon *Daemon) Exists(id string) bool {
return daemon.Get(id) != nil
}
func (runtime *Runtime) containerRoot(id string) string {
return path.Join(runtime.repository, id)
func (daemon *Daemon) containerRoot(id string) string {
return path.Join(daemon.repository, id)
}
// Load reads the contents of a container from disk
// This is typically done at startup.
func (runtime *Runtime) load(id string) (*Container, error) {
container := &Container{root: runtime.containerRoot(id)}
func (daemon *Daemon) load(id string) (*Container, error) {
container := &Container{root: daemon.containerRoot(id)}
if err := container.FromDisk(); err != nil {
return nil, err
}
@ -140,19 +140,19 @@ func (runtime *Runtime) load(id string) (*Container, error) {
return container, nil
}
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
if container.runtime != nil || runtime.Exists(container.ID) {
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *Container) error {
if container.daemon != nil || daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
return err
}
if err := runtime.ensureName(container); err != nil {
if err := daemon.ensureName(container); err != nil {
return err
}
container.runtime = runtime
container.daemon = daemon
// Attach to stdout and stderr
container.stderr = utils.NewWriteBroadcaster()
@ -164,8 +164,8 @@ func (runtime *Runtime) Register(container *Container) error {
container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
// done
runtime.containers.PushBack(container)
runtime.idIndex.Add(container.ID)
daemon.containers.PushBack(container)
daemon.idIndex.Add(container.ID)
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
@ -192,7 +192,7 @@ func (runtime *Runtime) Register(container *Container) error {
if err != nil {
utils.Debugf("cannot find existing process for %d", existingPid)
}
runtime.execDriver.Terminate(cmd)
daemon.execDriver.Terminate(cmd)
}
if err := container.Unmount(); err != nil {
utils.Debugf("ghost unmount error %s", err)
@ -202,10 +202,10 @@ func (runtime *Runtime) Register(container *Container) error {
}
}
info := runtime.execDriver.Info(container.ID)
info := daemon.execDriver.Info(container.ID)
if !info.IsRunning() {
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
if runtime.config.AutoRestart {
if daemon.config.AutoRestart {
utils.Debugf("Restarting")
if err := container.Unmount(); err != nil {
utils.Debugf("restart unmount error %s", err)
@ -234,9 +234,9 @@ func (runtime *Runtime) Register(container *Container) error {
return nil
}
func (runtime *Runtime) ensureName(container *Container) error {
func (daemon *Daemon) ensureName(container *Container) error {
if container.Name == "" {
name, err := generateRandomName(runtime)
name, err := generateRandomName(daemon)
if err != nil {
name = utils.TruncateID(container.ID)
}
@ -245,8 +245,8 @@ func (runtime *Runtime) ensureName(container *Container) error {
if err := container.ToDisk(); err != nil {
utils.Debugf("Error saving container name %s", err)
}
if !runtime.containerGraph.Exists(name) {
if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
if !daemon.containerGraph.Exists(name) {
if _, err := daemon.containerGraph.Set(name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
}
@ -254,7 +254,7 @@ func (runtime *Runtime) ensureName(container *Container) error {
return nil
}
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
@ -263,13 +263,13 @@ func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream strin
return nil
}
// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem.
func (runtime *Runtime) Destroy(container *Container) error {
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
func (daemon *Daemon) Destroy(container *Container) error {
if container == nil {
return fmt.Errorf("The given container is <nil>")
}
element := runtime.getContainerElement(container.ID)
element := daemon.getContainerElement(container.ID)
if element == nil {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
}
@ -278,42 +278,42 @@ func (runtime *Runtime) Destroy(container *Container) error {
return err
}
if err := runtime.driver.Remove(container.ID); err != nil {
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
if err := daemon.driver.Remove(container.ID); err != nil {
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
}
initID := fmt.Sprintf("%s-init", container.ID)
if err := runtime.driver.Remove(initID); err != nil {
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err)
if err := daemon.driver.Remove(initID); err != nil {
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
}
if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
utils.Debugf("Unable to remove container from link graph: %s", err)
}
// Deregister the container before removing its directory, to avoid race conditions
runtime.idIndex.Delete(container.ID)
runtime.containers.Remove(element)
daemon.idIndex.Delete(container.ID)
daemon.containers.Remove(element)
if err := os.RemoveAll(container.root); err != nil {
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
}
return nil
}
func (runtime *Runtime) restore() error {
func (daemon *Daemon) restore() error {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Printf("Loading containers: ")
}
dir, err := ioutil.ReadDir(runtime.repository)
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
containers := make(map[string]*Container)
currentDriver := runtime.driver.String()
currentDriver := daemon.driver.String()
for _, v := range dir {
id := v.Name()
container, err := runtime.load(id)
container, err := daemon.load(id)
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Print(".")
}
@ -332,12 +332,12 @@ func (runtime *Runtime) restore() error {
}
register := func(container *Container) {
if err := runtime.Register(container); err != nil {
if err := daemon.Register(container); err != nil {
utils.Debugf("Failed to register container %s: %s", container.ID, err)
}
}
if entities := runtime.containerGraph.List("/", -1); entities != nil {
if entities := daemon.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Print(".")
@ -353,12 +353,12 @@ func (runtime *Runtime) restore() error {
// Any containers that are left over do not exist in the graph
for _, container := range containers {
// Try to set the default name for a container if it exists prior to links
container.Name, err = generateRandomName(runtime)
container.Name, err = generateRandomName(daemon)
if err != nil {
container.Name = utils.TruncateID(container.ID)
}
if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
register(container)
@ -372,38 +372,38 @@ func (runtime *Runtime) restore() error {
}
// Create creates a new container from the given configuration with a given name.
func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) {
func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
var (
container *Container
warnings []string
)
img, err := runtime.repositories.LookupImage(config.Image)
img, err := daemon.repositories.LookupImage(config.Image)
if err != nil {
return nil, nil, err
}
if err := runtime.checkImageDepth(img); err != nil {
if err := daemon.checkImageDepth(img); err != nil {
return nil, nil, err
}
if warnings, err = runtime.mergeAndVerifyConfig(config, img); err != nil {
if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
return nil, nil, err
}
if container, err = runtime.newContainer(name, config, img); err != nil {
if container, err = daemon.newContainer(name, config, img); err != nil {
return nil, nil, err
}
if err := runtime.createRootfs(container, img); err != nil {
if err := daemon.createRootfs(container, img); err != nil {
return nil, nil, err
}
if err := container.ToDisk(); err != nil {
return nil, nil, err
}
if err := runtime.Register(container); err != nil {
if err := daemon.Register(container); err != nil {
return nil, nil, err
}
return container, warnings, nil
}
func (runtime *Runtime) checkImageDepth(img *image.Image) error {
func (daemon *Daemon) checkImageDepth(img *image.Image) error {
// We add 2 layers to the depth because the container's rw and
// init layer add to the restriction
depth, err := img.Depth()
@ -416,7 +416,7 @@ func (runtime *Runtime) checkImageDepth(img *image.Image) error {
return nil
}
func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
if config != nil {
if config.PortSpecs != nil {
for _, p := range config.PortSpecs {
@ -429,9 +429,9 @@ func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
return false
}
func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
warnings := []string{}
if runtime.checkDeprecatedExpose(img.Config) || runtime.checkDeprecatedExpose(config) {
if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) {
warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
}
if img.Config != nil {
@ -445,14 +445,14 @@ func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *imag
return warnings, nil
}
func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
var (
err error
id = utils.GenerateRandomID()
)
if name == "" {
name, err = generateRandomName(runtime)
name, err = generateRandomName(daemon)
if err != nil {
name = utils.TruncateID(id)
}
@ -465,19 +465,19 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
name = "/" + name
}
// Set the enitity in the graph using the default name specified
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if _, err := daemon.containerGraph.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) {
return "", "", err
}
conflictingContainer, err := runtime.GetByName(name)
conflictingContainer, err := daemon.GetByName(name)
if err != nil {
if strings.Contains(err.Error(), "Could not find entity") {
return "", "", err
}
// Remove name and continue starting the container
if err := runtime.containerGraph.Delete(name); err != nil {
if err := daemon.containerGraph.Delete(name); err != nil {
return "", "", err
}
} else {
@ -490,7 +490,7 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
return id, name, nil
}
func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) {
// Generate default hostname
// FIXME: the lxc template no longer needs to set a default hostname
if config.Hostname == "" {
@ -498,7 +498,7 @@ func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
}
}
func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
func (daemon *Daemon) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
var (
entrypoint string
args []string
@ -513,18 +513,18 @@ func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string,
return entrypoint, args
}
func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
var (
id string
err error
)
id, name, err = runtime.generateIdAndName(name)
id, name, err = daemon.generateIdAndName(name)
if err != nil {
return nil, err
}
runtime.generateHostname(id, config)
entrypoint, args := runtime.getEntrypointAndArgs(config)
daemon.generateHostname(id, config)
entrypoint, args := daemon.getEntrypointAndArgs(config)
container := &Container{
// FIXME: we should generate the ID here instead of receiving it as an argument
@ -537,34 +537,34 @@ func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img
Image: img.ID, // Always use the resolved image id
NetworkSettings: &NetworkSettings{},
Name: name,
Driver: runtime.driver.String(),
ExecDriver: runtime.execDriver.Name(),
Driver: daemon.driver.String(),
ExecDriver: daemon.execDriver.Name(),
}
container.root = runtime.containerRoot(container.ID)
container.root = daemon.containerRoot(container.ID)
return container, nil
}
func (runtime *Runtime) createRootfs(container *Container, img *image.Image) error {
func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error {
// Step 1: create the container directory.
// This doubles as a barrier to avoid race conditions.
if err := os.Mkdir(container.root, 0700); err != nil {
return err
}
initID := fmt.Sprintf("%s-init", container.ID)
if err := runtime.driver.Create(initID, img.ID, ""); err != nil {
if err := daemon.driver.Create(initID, img.ID, ""); err != nil {
return err
}
initPath, err := runtime.driver.Get(initID)
initPath, err := daemon.driver.Get(initID)
if err != nil {
return err
}
defer runtime.driver.Put(initID)
defer daemon.driver.Put(initID)
if err := graph.SetupInitLayer(initPath); err != nil {
return err
}
if err := runtime.driver.Create(container.ID, initID, ""); err != nil {
if err := daemon.driver.Create(container.ID, initID, ""); err != nil {
return err
}
return nil
@ -572,7 +572,7 @@ func (runtime *Runtime) createRootfs(container *Container, img *image.Image) err
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
// FIXME: freeze the container before copying it to avoid data corruption?
if err := container.Mount(); err != nil {
return nil, err
@ -595,13 +595,13 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
containerImage = container.Image
containerConfig = container.Config
}
img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
if err != nil {
return nil, err
}
// Register the image if needed
if repository != "" {
if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil {
if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
return img, err
}
}
@ -618,31 +618,31 @@ func GetFullContainerName(name string) (string, error) {
return name, nil
}
func (runtime *Runtime) GetByName(name string) (*Container, error) {
func (daemon *Daemon) GetByName(name string) (*Container, error) {
fullName, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
entity := runtime.containerGraph.Get(fullName)
entity := daemon.containerGraph.Get(fullName)
if entity == nil {
return nil, fmt.Errorf("Could not find entity for %s", name)
}
e := runtime.getContainerElement(entity.ID())
e := daemon.getContainerElement(entity.ID())
if e == nil {
return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
}
return e.Value.(*Container), nil
}
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
name, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
children := make(map[string]*Container)
err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
c := runtime.Get(e.ID())
err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
c := daemon.Get(e.ID())
if c == nil {
return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
}
@ -656,25 +656,25 @@ func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
return children, nil
}
func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error {
func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if !runtime.containerGraph.Exists(fullName) {
_, err := runtime.containerGraph.Set(fullName, child.ID)
if !daemon.containerGraph.Exists(fullName) {
_, err := daemon.containerGraph.Set(fullName, child.ID)
return err
}
return nil
}
// FIXME: harmonize with NewGraph()
func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
runtime, err := NewRuntimeFromDirectory(config, eng)
func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
daemon, err := NewDaemonFromDirectory(config, eng)
if err != nil {
return nil, err
}
return runtime, nil
return daemon, nil
}
func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
if !config.EnableSelinuxSupport {
selinux.SetDisabled()
}
@ -693,9 +693,9 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
return nil, err
}
runtimeRepo := path.Join(config.Root, "containers")
daemonRepo := path.Join(config.Root, "containers")
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
@ -774,8 +774,8 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
return nil, err
}
runtime := &Runtime{
repository: runtimeRepo,
daemon := &Daemon{
repository: daemonRepo,
containers: list.New(),
graph: g,
repositories: repositories,
@ -790,19 +790,19 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
eng: eng,
}
if err := runtime.checkLocaldns(); err != nil {
if err := daemon.checkLocaldns(); err != nil {
return nil, err
}
if err := runtime.restore(); err != nil {
if err := daemon.restore(); err != nil {
return nil, err
}
return runtime, nil
return daemon, nil
}
func (runtime *Runtime) shutdown() error {
func (daemon *Daemon) shutdown() error {
group := sync.WaitGroup{}
utils.Debugf("starting clean shutdown of all containers...")
for _, container := range runtime.List() {
for _, container := range daemon.List() {
c := container
if c.State.IsRunning() {
utils.Debugf("stopping %s", c.ID)
@ -823,22 +823,22 @@ func (runtime *Runtime) shutdown() error {
return nil
}
func (runtime *Runtime) Close() error {
func (daemon *Daemon) Close() error {
errorsStrings := []string{}
if err := runtime.shutdown(); err != nil {
utils.Errorf("runtime.shutdown(): %s", err)
if err := daemon.shutdown(); err != nil {
utils.Errorf("daemon.shutdown(): %s", err)
errorsStrings = append(errorsStrings, err.Error())
}
if err := portallocator.ReleaseAll(); err != nil {
utils.Errorf("portallocator.ReleaseAll(): %s", err)
errorsStrings = append(errorsStrings, err.Error())
}
if err := runtime.driver.Cleanup(); err != nil {
utils.Errorf("runtime.driver.Cleanup(): %s", err.Error())
if err := daemon.driver.Cleanup(); err != nil {
utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
errorsStrings = append(errorsStrings, err.Error())
}
if err := runtime.containerGraph.Close(); err != nil {
utils.Errorf("runtime.containerGraph.Close(): %s", err.Error())
if err := daemon.containerGraph.Close(); err != nil {
utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
errorsStrings = append(errorsStrings, err.Error())
}
if len(errorsStrings) > 0 {
@ -847,55 +847,55 @@ func (runtime *Runtime) Close() error {
return nil
}
func (runtime *Runtime) Mount(container *Container) error {
dir, err := runtime.driver.Get(container.ID)
func (daemon *Daemon) Mount(container *Container) error {
dir, err := daemon.driver.Get(container.ID)
if err != nil {
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err)
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
}
if container.basefs == "" {
container.basefs = dir
} else if container.basefs != dir {
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
runtime.driver, container.ID, container.basefs, dir)
daemon.driver, container.ID, container.basefs, dir)
}
return nil
}
func (runtime *Runtime) Unmount(container *Container) error {
runtime.driver.Put(container.ID)
func (daemon *Daemon) Unmount(container *Container) error {
daemon.driver.Put(container.ID)
return nil
}
func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) {
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
if differ, ok := daemon.driver.(graphdriver.Differ); ok {
return differ.Changes(container.ID)
}
cDir, err := runtime.driver.Get(container.ID)
cDir, err := daemon.driver.Get(container.ID)
if err != nil {
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
}
defer runtime.driver.Put(container.ID)
initDir, err := runtime.driver.Get(container.ID + "-init")
defer daemon.driver.Put(container.ID)
initDir, err := daemon.driver.Get(container.ID + "-init")
if err != nil {
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
}
defer runtime.driver.Put(container.ID + "-init")
defer daemon.driver.Put(container.ID + "-init")
return archive.ChangesDirs(cDir, initDir)
}
func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
if differ, ok := daemon.driver.(graphdriver.Differ); ok {
return differ.Diff(container.ID)
}
changes, err := runtime.Changes(container)
changes, err := daemon.Changes(container)
if err != nil {
return nil, err
}
cDir, err := runtime.driver.Get(container.ID)
cDir, err := daemon.driver.Get(container.ID)
if err != nil {
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
}
archive, err := archive.ExportChanges(cDir, changes)
@ -904,26 +904,26 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
}
return utils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
runtime.driver.Put(container.ID)
daemon.driver.Put(container.ID)
return err
}), nil
}
func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return runtime.execDriver.Run(c.command, pipes, startCallback)
func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return daemon.execDriver.Run(c.command, pipes, startCallback)
}
func (runtime *Runtime) Kill(c *Container, sig int) error {
return runtime.execDriver.Kill(c.command, sig)
func (daemon *Daemon) Kill(c *Container, sig int) error {
return daemon.execDriver.Kill(c.command, sig)
}
// Nuke kills all containers then removes all content
// from the content root, including images, volumes and
// container filesystems.
// Again: this will remove your entire docker runtime!
func (runtime *Runtime) Nuke() error {
// Again: this will remove your entire docker daemon!
func (daemon *Daemon) Nuke() error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
for _, container := range daemon.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
@ -931,63 +931,63 @@ func (runtime *Runtime) Nuke() error {
}(container)
}
wg.Wait()
runtime.Close()
daemon.Close()
return os.RemoveAll(runtime.config.Root)
return os.RemoveAll(daemon.config.Root)
}
// FIXME: this is a convenience function for integration tests
// which need direct access to runtime.graph.
// which need direct access to daemon.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
func (runtime *Runtime) Graph() *graph.Graph {
return runtime.graph
func (daemon *Daemon) Graph() *graph.Graph {
return daemon.graph
}
func (runtime *Runtime) Repositories() *graph.TagStore {
return runtime.repositories
func (daemon *Daemon) Repositories() *graph.TagStore {
return daemon.repositories
}
func (runtime *Runtime) Config() *daemonconfig.Config {
return runtime.config
func (daemon *Daemon) Config() *daemonconfig.Config {
return daemon.config
}
func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
return runtime.sysInfo
func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo {
return daemon.sysInfo
}
func (runtime *Runtime) SystemInitPath() string {
return runtime.sysInitPath
func (daemon *Daemon) SystemInitPath() string {
return daemon.sysInitPath
}
func (runtime *Runtime) GraphDriver() graphdriver.Driver {
return runtime.driver
func (daemon *Daemon) GraphDriver() graphdriver.Driver {
return daemon.driver
}
func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
return runtime.execDriver
func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
return daemon.execDriver
}
func (runtime *Runtime) Volumes() *graph.Graph {
return runtime.volumes
func (daemon *Daemon) Volumes() *graph.Graph {
return daemon.volumes
}
func (runtime *Runtime) ContainerGraph() *graphdb.Database {
return runtime.containerGraph
func (daemon *Daemon) ContainerGraph() *graphdb.Database {
return daemon.containerGraph
}
func (runtime *Runtime) SetServer(server Server) {
runtime.srv = server
func (daemon *Daemon) SetServer(server Server) {
daemon.srv = server
}
func (runtime *Runtime) checkLocaldns() error {
func (daemon *Daemon) checkLocaldns() error {
resolvConf, err := utils.GetResolvConf()
if err != nil {
return err
}
if len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
runtime.config.Dns = DefaultDns
daemon.config.Dns = DefaultDns
}
return nil
}

View file

@ -1,11 +1,11 @@
// +build !exclude_graphdriver_aufs
package runtime
package daemon
import (
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver/aufs"
"github.com/dotcloud/docker/graph"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/runtime/graphdriver/aufs"
"github.com/dotcloud/docker/utils"
)

7
daemon/daemon_btrfs.go Normal file
View file

@ -0,0 +1,7 @@
// +build !exclude_graphdriver_btrfs
package daemon
import (
_ "github.com/dotcloud/docker/daemon/graphdriver/btrfs"
)

View file

@ -0,0 +1,7 @@
// +build !exclude_graphdriver_devicemapper
package daemon
import (
_ "github.com/dotcloud/docker/daemon/graphdriver/devmapper"
)

View file

@ -1,9 +1,9 @@
// +build exclude_graphdriver_aufs
package runtime
package daemon
import (
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
)
func migrateIfAufs(driver graphdriver.Driver, root string) error {

View file

@ -2,10 +2,10 @@ package execdrivers
import (
"fmt"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/lxc"
"github.com/dotcloud/docker/daemon/execdriver/native"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/runtime/execdriver/lxc"
"github.com/dotcloud/docker/runtime/execdriver/native"
"path"
)

View file

@ -2,9 +2,9 @@ package lxc
import (
"fmt"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"log"

View file

@ -3,9 +3,9 @@ package lxc
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/dotcloud/docker/pkg/user"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/syndtr/gocapability/capability"
"io/ioutil"
"net"

View file

@ -1,8 +1,8 @@
package lxc
import (
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/runtime/execdriver"
"strings"
"text/template"
)

View file

@ -3,7 +3,7 @@ package lxc
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/daemon/execdriver"
"io/ioutil"
"math/rand"
"os"

View file

@ -1,7 +1,7 @@
package configuration
import (
"github.com/dotcloud/docker/runtime/execdriver/native/template"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"testing"
)

View file

@ -4,12 +4,12 @@ import (
"fmt"
"os"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/runtime/execdriver/native/configuration"
"github.com/dotcloud/docker/runtime/execdriver/native/template"
)
// createContainer populates and configures the container type with the

View file

@ -3,12 +3,12 @@ package native
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/runtime/execdriver"
"io"
"io/ioutil"
"log"

View file

@ -5,7 +5,7 @@
package native
import (
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/daemon/execdriver"
"io"
"os"
"os/exec"

View file

@ -24,8 +24,8 @@ import (
"bufio"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
mountpk "github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"os"
"os/exec"

View file

@ -5,7 +5,7 @@ import (
"encoding/hex"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"io/ioutil"
"os"
"path"

View file

@ -11,7 +11,7 @@ import "C"
import (
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"os"
"path"
"syscall"

View file

@ -4,7 +4,7 @@ package devmapper
import (
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"os"

View file

@ -4,7 +4,7 @@ package devmapper
import (
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"io/ioutil"
"path"
"runtime"

View file

@ -2,7 +2,7 @@ package vfs
import (
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
"os"
"os/exec"
"path"

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"sort"

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"github.com/dotcloud/docker/engine"

View file

@ -2,13 +2,13 @@ package bridge
import (
"fmt"
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/iptables"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/dotcloud/docker/runtime/networkdriver"
"github.com/dotcloud/docker/runtime/networkdriver/ipallocator"
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
"github.com/dotcloud/docker/runtime/networkdriver/portmapper"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"log"

View file

@ -3,8 +3,8 @@ package ipallocator
import (
"encoding/binary"
"errors"
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/pkg/collections"
"github.com/dotcloud/docker/runtime/networkdriver"
"net"
"sync"
)

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"github.com/dotcloud/docker/utils"

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import "sort"

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"fmt"

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"fmt"
@ -51,14 +51,14 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[
}
type checker struct {
runtime *Runtime
daemon *Daemon
}
func (c *checker) Exists(name string) bool {
return c.runtime.containerGraph.Exists("/" + name)
return c.daemon.containerGraph.Exists("/" + name)
}
// Generate a random and unique name
func generateRandomName(runtime *Runtime) (string, error) {
return namesgenerator.GenerateRandomName(&checker{runtime})
func generateRandomName(daemon *Daemon) (string, error) {
return namesgenerator.GenerateRandomName(&checker{daemon})
}

View file

@ -1,4 +1,4 @@
package runtime
package daemon
import (
"testing"

View file

@ -1,9 +1,9 @@
package runtime
package daemon
import (
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"os"
@ -40,7 +40,7 @@ func setupMountsForContainer(container *Container) error {
}
mounts := []execdriver.Mount{
{container.runtime.sysInitPath, "/.dockerinit", false, true},
{container.daemon.sysInitPath, "/.dockerinit", false, true},
{envPath, "/.dockerenv", false, true},
{container.ResolvConfPath, "/etc/resolv.conf", false, true},
}
@ -85,7 +85,7 @@ func applyVolumesFrom(container *Container) error {
}
}
c := container.runtime.Get(specParts[0])
c := container.daemon.Get(specParts[0])
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
}
@ -167,7 +167,7 @@ func createVolumes(container *Container) error {
return err
}
volumesDriver := container.runtime.volumes.Driver()
volumesDriver := container.daemon.volumes.Driver()
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = filepath.Clean(volPath)
@ -200,7 +200,7 @@ func createVolumes(container *Container) error {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil)
c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
if err != nil {
return err
}

View file

@ -1,8 +1,8 @@
package daemonconfig
import (
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runtime/networkdriver"
"net"
)

View file

@ -3,10 +3,10 @@ package graph
import (
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"

View file

@ -2,9 +2,9 @@ package graph
import (
"bytes"
"github.com/dotcloud/docker/daemon/graphdriver"
_ "github.com/dotcloud/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runtime/graphdriver"
_ "github.com/dotcloud/docker/runtime/graphdriver/vfs" // import the vfs driver so it is used in the tests
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"

View file

@ -1,7 +1,7 @@
package image
import (
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
)
type Graph interface {

View file

@ -4,8 +4,8 @@ import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"os"

View file

@ -3,7 +3,7 @@ package main
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/daemon"
"net"
"os/exec"
"path/filepath"
@ -47,7 +47,7 @@ func TestNetworkNat(t *testing.T) {
inspectOut, _, err := runCommandWithOutput(inspectCmd)
errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err))
containers := []*runtime.Container{}
containers := []*daemon.Container{}
if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil {
t.Fatalf("Error inspecting the container: %s", err)
}

View file

@ -16,10 +16,10 @@ import (
"github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/api/server"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
@ -27,10 +27,10 @@ import (
func TestGetEvents(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// FIXME: we might not need daemon, why not simply nuke
// the engine?
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
var events []*utils.JSONMessage
for _, parts := range [][3]string{
@ -72,7 +72,7 @@ func TestGetEvents(t *testing.T) {
func TestGetImagesJSON(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
job := eng.Job("images")
initialImages, err := job.Stdout.AddListTable()
@ -175,7 +175,7 @@ func TestGetImagesJSON(t *testing.T) {
func TestGetImagesHistory(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
r := httptest.NewRecorder()
@ -199,7 +199,7 @@ func TestGetImagesHistory(t *testing.T) {
func TestGetImagesByName(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil)
if err != nil {
@ -223,7 +223,7 @@ func TestGetImagesByName(t *testing.T) {
func TestGetContainersJSON(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
job := eng.Job("containers")
job.SetenvBool("all", true)
@ -269,7 +269,7 @@ func TestGetContainersJSON(t *testing.T) {
func TestGetContainersExport(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// Create a container and remove a file
containerID := createTestContainer(eng,
@ -317,7 +317,7 @@ func TestGetContainersExport(t *testing.T) {
func TestSaveImageAndThenLoad(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// save image
r := httptest.NewRecorder()
@ -388,7 +388,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
func TestGetContainersChanges(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// Create a container and remove a file
containerID := createTestContainer(eng,
@ -428,7 +428,7 @@ func TestGetContainersChanges(t *testing.T) {
func TestGetContainersTop(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -439,7 +439,7 @@ func TestGetContainersTop(t *testing.T) {
t,
)
defer func() {
// Make sure the process dies before destroying runtime
// Make sure the process dies before destroying daemon
containerKill(eng, containerID, t)
containerWait(eng, containerID, t)
}()
@ -504,7 +504,7 @@ func TestGetContainersTop(t *testing.T) {
func TestGetContainersByName(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// Create a container and remove a file
containerID := createTestContainer(eng,
@ -524,7 +524,7 @@ func TestGetContainersByName(t *testing.T) {
t.Fatal(err)
}
assertHttpNotError(r, t)
outContainer := &runtime.Container{}
outContainer := &daemon.Container{}
if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
t.Fatal(err)
}
@ -535,7 +535,7 @@ func TestGetContainersByName(t *testing.T) {
func TestPostCommit(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
// Create a container and remove a file
@ -574,7 +574,7 @@ func TestPostCommit(t *testing.T) {
func TestPostContainersCreate(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
configJSON, err := json.Marshal(&runconfig.Config{
Image: unitTestImageID,
@ -615,7 +615,7 @@ func TestPostContainersCreate(t *testing.T) {
func TestPostContainersKill(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -654,7 +654,7 @@ func TestPostContainersKill(t *testing.T) {
func TestPostContainersRestart(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -699,7 +699,7 @@ func TestPostContainersRestart(t *testing.T) {
func TestPostContainersStart(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(
eng,
@ -752,7 +752,7 @@ func TestPostContainersStart(t *testing.T) {
// Expected behaviour: using / as a bind mount source should throw an error
func TestRunErrorBindMountRootSource(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(
eng,
@ -787,7 +787,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
func TestPostContainersStop(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -827,7 +827,7 @@ func TestPostContainersStop(t *testing.T) {
func TestPostContainersWait(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -865,7 +865,7 @@ func TestPostContainersWait(t *testing.T) {
func TestPostContainersAttach(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -943,7 +943,7 @@ func TestPostContainersAttach(t *testing.T) {
func TestPostContainersAttachStderr(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -1024,7 +1024,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
// FIXME: Test deleting volume in use by other container
func TestDeleteContainers(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
containerID := createTestContainer(eng,
&runconfig.Config{
@ -1050,7 +1050,7 @@ func TestDeleteContainers(t *testing.T) {
func TestOptionsRoute(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
r := httptest.NewRecorder()
req, err := http.NewRequest("OPTIONS", "/", nil)
@ -1068,7 +1068,7 @@ func TestOptionsRoute(t *testing.T) {
func TestGetEnabledCors(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
r := httptest.NewRecorder()
@ -1103,7 +1103,7 @@ func TestDeleteImages(t *testing.T) {
eng := NewTestEngine(t)
//we expect errors, so we disable stderr
eng.Stderr = ioutil.Discard
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
initialImages := getImages(eng, t, true, "")
@ -1160,7 +1160,7 @@ func TestDeleteImages(t *testing.T) {
func TestPostContainersCopy(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// Create a container and remove a file
containerID := createTestContainer(eng,
@ -1218,7 +1218,7 @@ func TestPostContainersCopy(t *testing.T) {
func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
r := httptest.NewRecorder()

View file

@ -365,7 +365,7 @@ func TestBuild(t *testing.T) {
func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) {
if eng == nil {
eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
runtime := mkDaemonFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
defer nuke(runtime)
@ -547,7 +547,7 @@ func TestBuildEntrypoint(t *testing.T) {
// utilizing cache
func TestBuildEntrypointRunCleanup(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
img, err := buildImage(testContextTemplate{`
from {IMAGE}
@ -576,7 +576,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
img, err := buildImage(template, t, eng, true)
if err != nil {
@ -660,7 +660,7 @@ func TestBuildADDLocalFileWithCache(t *testing.T) {
},
nil}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
id1 := checkCacheBehaviorFromEngime(t, template, true, eng)
template.files = append(template.files, [2]string{"bar", "hello2"})
@ -796,7 +796,7 @@ func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) {
func TestForbiddenContextPath(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
context := testContextTemplate{`
@ -844,7 +844,7 @@ func TestForbiddenContextPath(t *testing.T) {
func TestBuildADDFileNotFound(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
context := testContextTemplate{`
from {IMAGE}
@ -890,7 +890,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
func TestBuildInheritance(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
img, err := buildImage(testContextTemplate{`
from {IMAGE}
@ -1012,7 +1012,7 @@ func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) {
// gh #2446
func TestBuildAddToSymlinkDest(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
_, err := buildImage(testContextTemplate{`
from {IMAGE}

View file

@ -4,10 +4,10 @@ import (
"bufio"
"fmt"
"github.com/dotcloud/docker/api/client"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -36,7 +36,7 @@ func closeWrap(args ...io.Closer) error {
return nil
}
func setRaw(t *testing.T, c *runtime.Container) *term.State {
func setRaw(t *testing.T, c *daemon.Container) *term.State {
pty, err := c.GetPtyMaster()
if err != nil {
t.Fatal(err)
@ -48,7 +48,7 @@ func setRaw(t *testing.T, c *runtime.Container) *term.State {
return state
}
func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) {
pty, err := c.GetPtyMaster()
if err != nil {
t.Fatal(err)
@ -56,12 +56,12 @@ func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
term.RestoreTerminal(pty.Fd(), state)
}
func waitContainerStart(t *testing.T, timeout time.Duration) *runtime.Container {
var container *runtime.Container
func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container {
var container *daemon.Container
setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
for {
l := globalRuntime.List()
l := globalDaemon.List()
if len(l) == 1 && l[0].State.IsRunning() {
container = l[0]
break
@ -142,7 +142,7 @@ func TestRunHostname(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
<-c
@ -187,7 +187,7 @@ func TestRunWorkdir(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
<-c
@ -232,7 +232,7 @@ func TestRunWorkdirExists(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
<-c
@ -290,7 +290,7 @@ func TestRunExit(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
// Closing /bin/cat stdin, expect it to exit
if err := stdin.Close(); err != nil {
@ -359,7 +359,7 @@ func TestRunDisconnect(t *testing.T) {
// Client disconnect after run -i should cause stdin to be closed, which should
// cause /bin/cat to exit.
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
container.Wait()
if container.State.IsRunning() {
t.Fatalf("/bin/cat is still running after closing stdin")
@ -445,7 +445,7 @@ func TestRunAttachStdin(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
// Check output
setTimeout(t, "Reading command output time out", 10*time.Second, func() {
@ -701,7 +701,7 @@ func TestAttachDisconnect(t *testing.T) {
setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
for {
l := globalRuntime.List()
l := globalDaemon.List()
if len(l) == 1 && l[0].State.IsRunning() {
break
}
@ -709,7 +709,7 @@ func TestAttachDisconnect(t *testing.T) {
}
})
container := globalRuntime.List()[0]
container := globalDaemon.List()[0]
// Attach to it
c1 := make(chan struct{})
@ -781,7 +781,7 @@ func TestRunAutoRemove(t *testing.T) {
time.Sleep(500 * time.Millisecond)
if len(globalRuntime.List()) > 0 {
if len(globalDaemon.List()) > 0 {
t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID)
}
}
@ -798,7 +798,7 @@ func TestCmdLogs(t *testing.T) {
t.Fatal(err)
}
if err := cli.CmdLogs(globalRuntime.List()[0].ID); err != nil {
if err := cli.CmdLogs(globalDaemon.List()[0].ID); err != nil {
t.Fatal(err)
}
}

View file

@ -17,11 +17,11 @@ import (
)
func TestIDFormat(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container1, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
"",
@ -39,14 +39,14 @@ func TestIDFormat(t *testing.T) {
}
func TestMultipleAttachRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, _ := mkContainer(
runtime,
daemon,
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
t,
)
defer runtime.Destroy(container)
defer daemon.Destroy(container)
// Simulate 3 client attaching to the container and stop/restart
@ -135,11 +135,11 @@ func TestMultipleAttachRestart(t *testing.T) {
func TestDiff(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
// Create a container and remove a file
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
defer runtime.Destroy(container1)
container1, _, _ := mkContainer(daemon, []string{"_", "/bin/rm", "/etc/passwd"}, t)
defer daemon.Destroy(container1)
// The changelog should be empty and not fail before run. See #1705
c, err := container1.Changes()
@ -170,14 +170,14 @@ func TestDiff(t *testing.T) {
}
// Commit the container
img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil)
img, err := daemon.Commit(container1, "", "", "unit test commited image - diff", "", nil)
if err != nil {
t.Fatal(err)
}
// Create a new container from the commited image
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
defer runtime.Destroy(container2)
container2, _, _ := mkContainer(daemon, []string{img.ID, "cat", "/etc/passwd"}, t)
defer daemon.Destroy(container2)
if err := container2.Run(); err != nil {
t.Fatal(err)
@ -195,8 +195,8 @@ func TestDiff(t *testing.T) {
}
// Create a new container
container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
defer runtime.Destroy(container3)
container3, _, _ := mkContainer(daemon, []string{"_", "rm", "/bin/httpd"}, t)
defer daemon.Destroy(container3)
if err := container3.Run(); err != nil {
t.Fatal(err)
@ -219,10 +219,10 @@ func TestDiff(t *testing.T) {
}
func TestCommitAutoRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
daemon := mkDaemon(t)
defer nuke(daemon)
container1, _, _ := mkContainer(daemon, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer daemon.Destroy(container1)
if container1.State.IsRunning() {
t.Errorf("Container shouldn't be running")
@ -234,14 +234,14 @@ func TestCommitAutoRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}})
img, err := daemon.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _, _ := mkContainer(runtime, []string{img.ID}, t)
defer runtime.Destroy(container2)
container2, _, _ := mkContainer(daemon, []string{img.ID}, t)
defer daemon.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
@ -274,11 +274,11 @@ func TestCommitAutoRun(t *testing.T) {
}
func TestCommitRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
container1, _, _ := mkContainer(daemon, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer daemon.Destroy(container1)
if container1.State.IsRunning() {
t.Errorf("Container shouldn't be running")
@ -290,14 +290,14 @@ func TestCommitRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil)
img, err := daemon.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
defer runtime.Destroy(container2)
container2, _, _ := mkContainer(daemon, []string{img.ID, "cat", "/world"}, t)
defer daemon.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
@ -330,10 +330,10 @@ func TestCommitRun(t *testing.T) {
}
func TestStart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, _ := mkContainer(runtime, []string{"-i", "_", "/bin/cat"}, t)
defer runtime.Destroy(container)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, _ := mkContainer(daemon, []string{"-i", "_", "/bin/cat"}, t)
defer daemon.Destroy(container)
cStdin, err := container.StdinPipe()
if err != nil {
@ -365,10 +365,10 @@ func TestCpuShares(t *testing.T) {
if err1 == nil || err2 == nil {
t.Skip("Fixme. Setting cpu cgroup shares doesn't work in dind on a Fedora host. The lxc utils are confused by the cpu,cpuacct mount.")
}
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
defer runtime.Destroy(container)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, _ := mkContainer(daemon, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
defer daemon.Destroy(container)
cStdin, err := container.StdinPipe()
if err != nil {
@ -395,10 +395,10 @@ func TestCpuShares(t *testing.T) {
}
func TestRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
defer daemon.Destroy(container)
if container.State.IsRunning() {
t.Errorf("Container shouldn't be running")
@ -412,11 +412,11 @@ func TestRun(t *testing.T) {
}
func TestOutput(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
"",
@ -424,7 +424,7 @@ func TestOutput(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@ -435,11 +435,11 @@ func TestOutput(t *testing.T) {
}
func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"cat"},
OpenStdin: true,
User: "daemon",
@ -449,7 +449,7 @@ func TestKillDifferentUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
// FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
// there is a side effect I'm not seeing.
// defer container.stdin.Close()
@ -495,8 +495,8 @@ func TestKillDifferentUser(t *testing.T) {
// Test that creating a container with a volume doesn't crash. Regression test for #995.
func TestCreateVolume(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
if err != nil {
@ -519,19 +519,19 @@ func TestCreateVolume(t *testing.T) {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
c := daemon.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
t.Fatalf("Couldn't retrieve container %s from daemon", id)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
}
func TestKill(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"sleep", "2"},
},
"",
@ -539,7 +539,7 @@ func TestKill(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
if container.State.IsRunning() {
t.Errorf("Container shouldn't be running")
@ -571,17 +571,17 @@ func TestKill(t *testing.T) {
}
func TestExitCode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
trueContainer, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
trueContainer, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"/bin/true"},
}, "")
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(trueContainer)
defer daemon.Destroy(trueContainer)
if err := trueContainer.Run(); err != nil {
t.Fatal(err)
}
@ -589,14 +589,14 @@ func TestExitCode(t *testing.T) {
t.Fatalf("Unexpected exit code %d (expected 0)", code)
}
falseContainer, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
falseContainer, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"/bin/false"},
}, "")
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(falseContainer)
defer daemon.Destroy(falseContainer)
if err := falseContainer.Run(); err != nil {
t.Fatal(err)
}
@ -606,10 +606,10 @@ func TestExitCode(t *testing.T) {
}
func TestRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
"",
@ -617,7 +617,7 @@ func TestRestart(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@ -637,10 +637,10 @@ func TestRestart(t *testing.T) {
}
func TestRestartStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"cat"},
OpenStdin: true,
@ -650,7 +650,7 @@ func TestRestartStdin(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
@ -713,12 +713,12 @@ func TestRestartStdin(t *testing.T) {
}
func TestUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
// Default user must be root
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
},
"",
@ -726,7 +726,7 @@ func TestUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@ -736,8 +736,8 @@ func TestUser(t *testing.T) {
}
// Set a username
container, _, err = runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err = daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
User: "root",
@ -747,7 +747,7 @@ func TestUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err = container.Output()
if code := container.State.GetExitCode(); err != nil || code != 0 {
t.Fatal(err)
@ -757,8 +757,8 @@ func TestUser(t *testing.T) {
}
// Set a UID
container, _, err = runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err = daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
User: "0",
@ -768,7 +768,7 @@ func TestUser(t *testing.T) {
if code := container.State.GetExitCode(); err != nil || code != 0 {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err = container.Output()
if code := container.State.GetExitCode(); err != nil || code != 0 {
t.Fatal(err)
@ -778,8 +778,8 @@ func TestUser(t *testing.T) {
}
// Set a different user by uid
container, _, err = runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err = daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
User: "1",
@ -789,7 +789,7 @@ func TestUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err = container.Output()
if err != nil {
t.Fatal(err)
@ -801,8 +801,8 @@ func TestUser(t *testing.T) {
}
// Set a different user by username
container, _, err = runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err = daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
User: "daemon",
@ -812,7 +812,7 @@ func TestUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err = container.Output()
if code := container.State.GetExitCode(); err != nil || code != 0 {
t.Fatal(err)
@ -822,8 +822,8 @@ func TestUser(t *testing.T) {
}
// Test an wrong username
container, _, err = runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err = daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"id"},
User: "unknownuser",
@ -833,7 +833,7 @@ func TestUser(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err = container.Output()
if container.State.GetExitCode() == 0 {
t.Fatal("Starting container with wrong uid should fail but it passed.")
@ -841,11 +841,11 @@ func TestUser(t *testing.T) {
}
func TestMultipleContainers(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container1, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container1, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"sleep", "2"},
},
"",
@ -853,10 +853,10 @@ func TestMultipleContainers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
defer daemon.Destroy(container1)
container2, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container2, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"sleep", "2"},
},
"",
@ -864,7 +864,7 @@ func TestMultipleContainers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
defer daemon.Destroy(container2)
// Start both containers
if err := container1.Start(); err != nil {
@ -897,10 +897,10 @@ func TestMultipleContainers(t *testing.T) {
}
func TestStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"cat"},
OpenStdin: true,
@ -910,7 +910,7 @@ func TestStdin(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
@ -942,10 +942,10 @@ func TestStdin(t *testing.T) {
}
func TestTty(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"cat"},
OpenStdin: true,
@ -955,7 +955,7 @@ func TestTty(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
stdin, err := container.StdinPipe()
if err != nil {
@ -989,17 +989,17 @@ func TestTty(t *testing.T) {
func TestEnv(t *testing.T) {
os.Setenv("TRUE", "false")
os.Setenv("TRICKY", "tri\ncky\n")
runtime := mkRuntime(t)
defer nuke(runtime)
config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
daemon := mkDaemon(t)
defer nuke(daemon)
config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(daemon).ID, "env"}, nil)
if err != nil {
t.Fatal(err)
}
container, _, err := runtime.Create(config, "")
container, _, err := daemon.Create(config, "")
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
@ -1041,11 +1041,11 @@ func TestEnv(t *testing.T) {
}
func TestEntrypoint(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Entrypoint: []string{"/bin/echo"},
Cmd: []string{"-n", "foobar"},
},
@ -1054,7 +1054,7 @@ func TestEntrypoint(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@ -1065,11 +1065,11 @@ func TestEntrypoint(t *testing.T) {
}
func TestEntrypointNoCmd(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Entrypoint: []string{"/bin/echo", "foobar"},
},
"",
@ -1077,7 +1077,7 @@ func TestEntrypointNoCmd(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@ -1088,11 +1088,11 @@ func TestEntrypointNoCmd(t *testing.T) {
}
func BenchmarkRunSequential(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
daemon := mkDaemon(b)
defer nuke(daemon)
for i := 0; i < b.N; i++ {
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"echo", "-n", "foo"},
},
"",
@ -1100,7 +1100,7 @@ func BenchmarkRunSequential(b *testing.B) {
if err != nil {
b.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
output, err := container.Output()
if err != nil {
b.Fatal(err)
@ -1108,15 +1108,15 @@ func BenchmarkRunSequential(b *testing.B) {
if string(output) != "foo" {
b.Fatalf("Unexpected output: %s", output)
}
if err := runtime.Destroy(container); err != nil {
if err := daemon.Destroy(container); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRunParallel(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
daemon := mkDaemon(b)
defer nuke(daemon)
var tasks []chan error
@ -1124,8 +1124,8 @@ func BenchmarkRunParallel(b *testing.B) {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"echo", "-n", "foo"},
},
"",
@ -1134,7 +1134,7 @@ func BenchmarkRunParallel(b *testing.B) {
complete <- err
return
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
if err := container.Start(); err != nil {
complete <- err
return
@ -1146,7 +1146,7 @@ func BenchmarkRunParallel(b *testing.B) {
// if string(output) != "foo" {
// complete <- fmt.Errorf("Unexecpted output: %v", string(output))
// }
if err := runtime.Destroy(container); err != nil {
if err := daemon.Destroy(container); err != nil {
complete <- err
return
}
@ -1176,7 +1176,7 @@ func tempDir(t *testing.T) string {
// Test for #1737
func TestCopyVolumeUidGid(t *testing.T) {
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
r := mkDaemonFromEngine(eng, t)
defer r.Nuke()
// Add directory not owned by root
@ -1210,7 +1210,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
// Test for #1582
func TestCopyVolumeContent(t *testing.T) {
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
r := mkDaemonFromEngine(eng, t)
defer r.Nuke()
// Put some content in a directory of a container and commit it
@ -1243,7 +1243,7 @@ func TestCopyVolumeContent(t *testing.T) {
func TestBindMounts(t *testing.T) {
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
r := mkDaemonFromEngine(eng, t)
defer r.Nuke()
tmpDir := tempDir(t)
@ -1275,11 +1275,11 @@ func TestBindMounts(t *testing.T) {
// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
@ -1288,7 +1288,7 @@ func TestRestartWithVolumes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
for key := range container.Config.Volumes {
if key != "/test" {
@ -1318,11 +1318,11 @@ func TestRestartWithVolumes(t *testing.T) {
}
func TestContainerNetwork(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
// If I change this to ping 8.8.8.8 it fails. Any idea why? - timthelion
Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
},
@ -1331,7 +1331,7 @@ func TestContainerNetwork(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
if err := container.Run(); err != nil {
t.Fatal(err)
}
@ -1342,11 +1342,11 @@ func TestContainerNetwork(t *testing.T) {
// Issue #4681
func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
NetworkDisabled: true,
},
@ -1355,7 +1355,7 @@ func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer daemon.Destroy(container)
if err := container.Run(); err != nil {
t.Fatal(err)
}
@ -1366,10 +1366,10 @@ func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show", "up"}, nil)
config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(daemon).ID, "ip", "addr", "show", "up"}, nil)
if err != nil {
t.Fatal(err)
}
@ -1384,9 +1384,9 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
c := daemon.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
t.Fatalf("Couldn't retrieve container %s from daemon", id)
}
stdout, err := c.StdoutPipe()
if err != nil {
@ -1419,36 +1419,36 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
func TestPrivilegedCanMknod(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, err := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
daemon := mkDaemonFromEngine(eng, t)
defer daemon.Nuke()
if output, err := runContainer(eng, daemon, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatalf("Could not mknod into privileged container %s %v", output, err)
}
}
func TestPrivilegedCanMount(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
daemon := mkDaemonFromEngine(eng, t)
defer daemon.Nuke()
if output, _ := runContainer(eng, daemon, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mount into privileged container")
}
}
func TestUnprivilegedCanMknod(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
daemon := mkDaemonFromEngine(eng, t)
defer daemon.Nuke()
if output, _ := runContainer(eng, daemon, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatal("Couldn't mknod into secure container")
}
}
func TestUnprivilegedCannotMount(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
daemon := mkDaemonFromEngine(eng, t)
defer daemon.Nuke()
if output, _ := runContainer(eng, daemon, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
t.Fatal("Could mount into secure container")
}
}

View file

@ -3,10 +3,10 @@ package docker
import (
"errors"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/graph"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"

View file

@ -3,11 +3,11 @@ package docker
import (
"bytes"
"fmt"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
"io"
@ -38,8 +38,8 @@ const (
)
var (
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *runtime.Runtime
// FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted.
globalDaemon *daemon.Daemon
globalEngine *engine.Engine
globalHttpsEngine *engine.Engine
globalRogueHttpsEngine *engine.Engine
@ -47,17 +47,17 @@ var (
startGoroutines int
)
// FIXME: nuke() is deprecated by Runtime.Nuke()
func nuke(runtime *runtime.Runtime) error {
return runtime.Nuke()
// FIXME: nuke() is deprecated by Daemon.Nuke()
func nuke(daemon *daemon.Daemon) error {
return daemon.Nuke()
}
// FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() {
daemon := mkDaemonFromEngine(eng, t)
for _, container := range daemon.List() {
container.Kill()
runtime.Destroy(container)
daemon.Destroy(container)
}
job := eng.Job("images")
images, err := job.Stdout.AddTable()
@ -119,11 +119,11 @@ func init() {
src.Close()
}
// Setup the base runtime, which will be duplicated for each test.
// Setup the base daemon, which will be duplicated for each test.
// (no tests are run directly in the base)
setupBaseImage()
// Create the "global runtime" with a long-running daemons for integration tests
// Create the "global daemon" with a long-running daemons for integration tests
spawnGlobalDaemon()
spawnLegitHttpsDaemon()
spawnRogueHttpsDaemon()
@ -146,14 +146,14 @@ func setupBaseImage() {
}
func spawnGlobalDaemon() {
if globalRuntime != nil {
utils.Debugf("Global runtime already exists. Skipping.")
if globalDaemon != nil {
utils.Debugf("Global daemon already exists. Skipping.")
return
}
t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t)
globalEngine = eng
globalRuntime = mkRuntimeFromEngine(eng, t)
globalDaemon = mkDaemonFromEngine(eng, t)
// Spawn a Daemon
go func() {
@ -235,8 +235,8 @@ func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
// FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *runtime.Runtime) *image.Image {
imgs, err := runtime.Graph().Map()
func GetTestImage(daemon *daemon.Daemon) *image.Image {
imgs, err := daemon.Graph().Map()
if err != nil {
log.Fatalf("Unable to get the test image: %s", err)
}
@ -245,21 +245,21 @@ func GetTestImage(runtime *runtime.Runtime) *image.Image {
return image
}
}
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs)
return nil
}
func TestRuntimeCreate(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
func TestDaemonCreate(t *testing.T) {
daemon := mkDaemon(t)
defer nuke(daemon)
// Make sure we start we 0 containers
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
if len(daemon.List()) != 0 {
t.Errorf("Expected 0 containers, %v found", len(daemon.List()))
}
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"ls", "-al"},
},
"",
@ -269,56 +269,56 @@ func TestRuntimeCreate(t *testing.T) {
}
defer func() {
if err := runtime.Destroy(container); err != nil {
if err := daemon.Destroy(container); err != nil {
t.Error(err)
}
}()
// Make sure we can find the newly created container with List()
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if len(daemon.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(daemon.List()))
}
// Make sure the container List() returns is the right one
if runtime.List()[0].ID != container.ID {
t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
if daemon.List()[0].ID != container.ID {
t.Errorf("Unexpected container %v returned by List", daemon.List()[0])
}
// Make sure we can get the container with Get()
if runtime.Get(container.ID) == nil {
if daemon.Get(container.ID) == nil {
t.Errorf("Unable to get newly created container")
}
// Make sure it is the right container
if runtime.Get(container.ID) != container {
if daemon.Get(container.ID) != container {
t.Errorf("Get() returned the wrong container")
}
// Make sure Exists returns it as existing
if !runtime.Exists(container.ID) {
if !daemon.Exists(container.ID) {
t.Errorf("Exists() returned false for a newly created container")
}
// Test that conflict error displays correct details
testContainer, _, _ := runtime.Create(
testContainer, _, _ := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{"ls", "-al"},
},
"conflictname",
)
if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
}
// Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing")
}
if _, _, err := runtime.Create(
if _, _, err := daemon.Create(
&runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{},
},
"",
@ -327,20 +327,20 @@ func TestRuntimeCreate(t *testing.T) {
}
config := &runconfig.Config{
Image: GetTestImage(runtime).ID,
Image: GetTestImage(daemon).ID,
Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"},
}
container, _, err = runtime.Create(config, "")
container, _, err = daemon.Create(config, "")
_, err = runtime.Commit(container, "testrepo", "testtag", "", "", config)
_, err = daemon.Commit(container, "testrepo", "testtag", "", "", config)
if err != nil {
t.Error(err)
}
// test expose 80:8000
container, warnings, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, warnings, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
},
@ -355,83 +355,83 @@ func TestRuntimeCreate(t *testing.T) {
}
func TestDestroy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"ls", "-al"},
}, "")
if err != nil {
t.Fatal(err)
}
// Destroy
if err := runtime.Destroy(container); err != nil {
if err := daemon.Destroy(container); err != nil {
t.Error(err)
}
// Make sure runtime.Exists() behaves correctly
if runtime.Exists("test_destroy") {
// Make sure daemon.Exists() behaves correctly
if daemon.Exists("test_destroy") {
t.Errorf("Exists() returned true")
}
// Make sure runtime.List() doesn't list the destroyed container
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
// Make sure daemon.List() doesn't list the destroyed container
if len(daemon.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(daemon.List()))
}
// Make sure runtime.Get() refuses to return the unexisting container
if runtime.Get(container.ID) != nil {
// Make sure daemon.Get() refuses to return the unexisting container
if daemon.Get(container.ID) != nil {
t.Errorf("Unable to get newly created container")
}
// Test double destroy
if err := runtime.Destroy(container); err == nil {
if err := daemon.Destroy(container); err == nil {
// It should have failed
t.Errorf("Double destroy did not fail")
}
}
func TestGet(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1)
container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
defer daemon.Destroy(container1)
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2)
container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
defer daemon.Destroy(container2)
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3)
container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
defer daemon.Destroy(container3)
if runtime.Get(container1.ID) != container1 {
t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
if daemon.Get(container1.ID) != container1 {
t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1)
}
if runtime.Get(container2.ID) != container2 {
t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
if daemon.Get(container2.ID) != container2 {
t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2)
}
if runtime.Get(container3.ID) != container3 {
t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
if daemon.Get(container3.ID) != container3 {
t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3)
}
}
func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *runtime.Container, string) {
func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) {
var (
err error
id string
strPort string
eng = NewTestEngine(t)
runtime = mkRuntimeFromEngine(eng, t)
daemon = mkDaemonFromEngine(eng, t)
port = 5554
p nat.Port
)
defer func() {
if err != nil {
runtime.Nuke()
daemon.Nuke()
}
}()
@ -459,7 +459,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this relies on the undocumented behavior of runtime.Create
// FIXME: this relies on the undocumented behavior of daemon.Create
// which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
@ -481,7 +481,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
t.Fatal(err)
}
container := runtime.Get(id)
container := daemon.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
@ -496,13 +496,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *ru
container.WaitTimeout(500 * time.Millisecond)
strPort = container.NetworkSettings.Ports[p][0].HostPort
return runtime, container, strPort
return daemon, container, strPort
}
// Run a container with a TCP port allocated, and test that it can receive connections on localhost
func TestAllocateTCPPortLocalhost(t *testing.T) {
runtime, container, port := startEchoServerContainer(t, "tcp")
defer nuke(runtime)
daemon, container, port := startEchoServerContainer(t, "tcp")
defer nuke(daemon)
defer container.Kill()
for i := 0; i != 10; i++ {
@ -550,8 +550,8 @@ func TestAllocateTCPPortLocalhost(t *testing.T) {
// Run a container with an UDP port allocated, and test that it can receive connections on localhost
func TestAllocateUDPPortLocalhost(t *testing.T) {
runtime, container, port := startEchoServerContainer(t, "udp")
defer nuke(runtime)
daemon, container, port := startEchoServerContainer(t, "udp")
defer nuke(daemon)
defer container.Kill()
conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
@ -586,15 +586,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
func TestRestore(t *testing.T) {
eng := NewTestEngine(t)
runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
daemon1 := mkDaemonFromEngine(eng, t)
defer daemon1.Nuke()
// Create a container with one instance of docker
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t)
defer daemon1.Destroy(container1)
// Create a second container meant to be killed
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t)
defer daemon1.Destroy(container2)
// Start the container non blocking
if err := container2.Start(); err != nil {
@ -614,8 +614,8 @@ func TestRestore(t *testing.T) {
container2.State.SetRunning(42)
container2.ToDisk()
if len(runtime1.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
if len(daemon1.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(daemon1.List()))
}
if err := container1.Run(); err != nil {
t.Fatal(err)
@ -628,12 +628,12 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
eng = newTestEngine(t, false, eng.Root())
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
daemon2 := mkDaemonFromEngine(eng, t)
if len(daemon2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(daemon2.List()))
}
runningCount := 0
for _, c := range runtime2.List() {
for _, c := range daemon2.List() {
if c.State.IsRunning() {
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
runningCount++
@ -642,7 +642,7 @@ func TestRestore(t *testing.T) {
if runningCount != 0 {
t.Fatalf("Expected 0 container alive, %d found", runningCount)
}
container3 := runtime2.Get(container1.ID)
container3 := daemon2.Get(container1.ID)
if container3 == nil {
t.Fatal("Unable to Get container")
}
@ -654,22 +654,22 @@ func TestRestore(t *testing.T) {
func TestDefaultContainerName(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name"))
containerID := container.ID
if container.Name != "/some_name" {
t.Fatalf("Expect /some_name got %s", container.Name)
}
if c := runtime.Get("/some_name"); c == nil {
if c := daemon.Get("/some_name"); c == nil {
t.Fatalf("Couldn't retrieve test container as /some_name")
} else if c.ID != containerID {
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
@ -678,22 +678,22 @@ func TestDefaultContainerName(t *testing.T) {
func TestRandomContainerName(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createTestContainer(eng, config, t))
container := daemon.Get(createTestContainer(eng, config, t))
containerID := container.ID
if container.Name == "" {
t.Fatalf("Expected not empty container name")
}
if c := runtime.Get(container.Name); c == nil {
if c := daemon.Get(container.Name); c == nil {
log.Fatalf("Could not lookup container %s by its name", container.Name)
} else if c.ID != containerID {
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
@ -702,8 +702,8 @@ func TestRandomContainerName(t *testing.T) {
func TestContainerNameValidation(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
for _, test := range []struct {
Name string
@ -733,13 +733,13 @@ func TestContainerNameValidation(t *testing.T) {
t.Fatal(err)
}
container := runtime.Get(shortID)
container := daemon.Get(shortID)
if container.Name != "/"+test.Name {
t.Fatalf("Expect /%s got %s", test.Name, container.Name)
}
if c := runtime.Get("/" + test.Name); c == nil {
if c := daemon.Get("/" + test.Name); c == nil {
t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
} else if c.ID != container.ID {
t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
@ -750,17 +750,17 @@ func TestContainerNameValidation(t *testing.T) {
func TestLinkChildContainer(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
webapp, err := daemon.GetByName("/webapp")
if err != nil {
t.Fatal(err)
}
@ -769,19 +769,19 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(createTestContainer(eng, config, t))
childContainer := daemon.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
}
// Get the child by it's new name
db, err := runtime.GetByName("/webapp/db")
db, err := daemon.GetByName("/webapp/db")
if err != nil {
t.Fatal(err)
}
@ -792,17 +792,17 @@ func TestLinkChildContainer(t *testing.T) {
func TestGetAllChildren(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
daemon := mkDaemonFromEngine(eng, t)
defer nuke(daemon)
config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
webapp, err := daemon.GetByName("/webapp")
if err != nil {
t.Fatal(err)
}
@ -816,13 +816,13 @@ func TestGetAllChildren(t *testing.T) {
t.Fatal(err)
}
childContainer := runtime.Get(createTestContainer(eng, config, t))
childContainer := daemon.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
}
children, err := runtime.Children("/webapp")
children, err := daemon.Children("/webapp")
if err != nil {
t.Fatal(err)
}
@ -845,11 +845,11 @@ func TestGetAllChildren(t *testing.T) {
}
func TestDestroyWithInitLayer(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
daemon := mkDaemon(t)
defer nuke(daemon)
container, _, err := runtime.Create(&runconfig.Config{
Image: GetTestImage(runtime).ID,
container, _, err := daemon.Create(&runconfig.Config{
Image: GetTestImage(daemon).ID,
Cmd: []string{"ls", "-al"},
}, "")
@ -857,21 +857,21 @@ func TestDestroyWithInitLayer(t *testing.T) {
t.Fatal(err)
}
// Destroy
if err := runtime.Destroy(container); err != nil {
if err := daemon.Destroy(container); err != nil {
t.Fatal(err)
}
// Make sure runtime.Exists() behaves correctly
if runtime.Exists("test_destroy") {
// Make sure daemon.Exists() behaves correctly
if daemon.Exists("test_destroy") {
t.Fatalf("Exists() returned true")
}
// Make sure runtime.List() doesn't list the destroyed container
if len(runtime.List()) != 0 {
t.Fatalf("Expected 0 container, %v found", len(runtime.List()))
// Make sure daemon.List() doesn't list the destroyed container
if len(daemon.List()) != 0 {
t.Fatalf("Expected 0 container, %v found", len(daemon.List()))
}
driver := runtime.Graph().Driver()
driver := daemon.Graph().Driver()
// Make sure that the container does not exist in the driver
if _, err := driver.Get(container.ID); err == nil {

View file

@ -11,7 +11,7 @@ import (
func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
@ -58,7 +58,7 @@ func TestCreateRm(t *testing.T) {
func TestCreateNumberHostname(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil)
if err != nil {
@ -70,7 +70,7 @@ func TestCreateNumberHostname(t *testing.T) {
func TestCreateNumberUsername(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil)
if err != nil {
@ -82,7 +82,7 @@ func TestCreateNumberUsername(t *testing.T) {
func TestCreateRmVolumes(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil)
if err != nil {
@ -142,7 +142,7 @@ func TestCreateRmVolumes(t *testing.T) {
func TestCreateRmRunning(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil)
if err != nil {
@ -216,7 +216,7 @@ func TestCreateRmRunning(t *testing.T) {
func TestCommit(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
@ -236,7 +236,7 @@ func TestCommit(t *testing.T) {
func TestMergeConfigOnCommit(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
runtime := mkDaemonFromEngine(eng, t)
defer runtime.Nuke()
container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
@ -294,7 +294,7 @@ func TestMergeConfigOnCommit(t *testing.T) {
func TestRestartKillWait(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := mkRuntimeFromEngine(eng, t)
runtime := mkDaemonFromEngine(eng, t)
defer runtime.Nuke()
config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
@ -360,7 +360,7 @@ func TestRestartKillWait(t *testing.T) {
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
if err != nil {
@ -439,7 +439,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
func TestRunWithTooLowMemoryLimit(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
job := eng.Job("create")
@ -457,7 +457,7 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
func TestRmi(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
initialImages := getAllImages(eng, t)
@ -542,7 +542,7 @@ func TestRmi(t *testing.T) {
func TestImagesFilter(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
t.Fatal(err)
@ -584,7 +584,7 @@ func TestImagesFilter(t *testing.T) {
// FIXE: 'insert' is deprecated and should be removed in a future version.
func TestImageInsert(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
// bad image name fails
@ -606,7 +606,7 @@ func TestImageInsert(t *testing.T) {
func TestListContainers(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
config := runconfig.Config{
Image: unitTestImageID,
@ -721,7 +721,7 @@ func assertContainerList(srv *server.Server, all bool, limit int, since, before
// container
func TestDeleteTagWithExistingContainers(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
defer nuke(mkDaemonFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)

View file

@ -8,7 +8,7 @@ import (
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
if err := generateImage("", eng); err != nil {
t.Fatal(err)
@ -23,7 +23,7 @@ func TestServerListOrderedImagesByCreationDate(t *testing.T) {
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
defer mkDaemonFromEngine(eng, t).Nuke()
err := generateImage("bar", eng)
if err != nil {

View file

@ -15,9 +15,9 @@ import (
"time"
"github.com/dotcloud/docker/builtins"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/server"
"github.com/dotcloud/docker/utils"
)
@ -26,11 +26,11 @@ import (
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
// Create a temporary runtime suitable for unit testing.
// Create a temporary daemon suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *runtime.Runtime {
func mkDaemon(f utils.Fataler) *daemon.Daemon {
eng := newTestEngine(f, false, "")
return mkRuntimeFromEngine(eng, f)
return mkDaemonFromEngine(eng, f)
// FIXME:
// [...]
// Mtu: docker.GetDefaultNetworkMtu(),
@ -116,8 +116,8 @@ func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
}
func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
runtime := mkRuntimeFromEngine(eng, t)
if c := runtime.Get(id); c != nil {
daemon := mkDaemonFromEngine(eng, t)
if c := daemon.Get(id); c != nil {
t.Fatal(fmt.Errorf("Container %s should not exist", id))
}
}
@ -140,9 +140,9 @@ func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
}
}
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Container {
runtime := mkRuntimeFromEngine(eng, t)
c := runtime.Get(id)
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *daemon.Container {
daemon := mkDaemonFromEngine(eng, t)
c := daemon.Get(id)
if c == nil {
t.Fatal(fmt.Errorf("No such container: %s", id))
}
@ -161,16 +161,16 @@ func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *server.Server {
return srv
}
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *runtime.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
func mkDaemonFromEngine(eng *engine.Engine, t utils.Fataler) *daemon.Daemon {
iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
if iDaemon == nil {
panic("Legacy daemon field not set in engine")
}
runtime, ok := iRuntime.(*runtime.Runtime)
daemon, ok := iDaemon.(*daemon.Daemon)
if !ok {
panic("Legacy runtime field in engine does not cast to *runtime.Runtime")
panic("Legacy daemon field in engine does not cast to *daemon.Daemon")
}
return runtime
return daemon
}
func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine {
@ -245,12 +245,12 @@ func readFile(src string, t *testing.T) (content string) {
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// Create a test container from the given daemon `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Container, *runconfig.HostConfig, error) {
func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) {
config, hc, _, err := runconfig.Parse(args, nil)
defer func() {
if err != nil && t != nil {
@ -281,7 +281,7 @@ func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Cont
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(eng *engine.Engine, r *runtime.Runtime, args []string, t *testing.T) (output string, err error) {
func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)

View file

@ -11,7 +11,7 @@ func displayFdGoroutines(t *testing.T) {
}
func TestFinal(t *testing.T) {
nuke(globalRuntime)
nuke(globalDaemon)
t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines)
displayFdGoroutines(t)
}

View file

@ -1,7 +0,0 @@
// +build !exclude_graphdriver_btrfs
package runtime
import (
_ "github.com/dotcloud/docker/runtime/graphdriver/btrfs"
)

View file

@ -1,7 +0,0 @@
// +build !exclude_graphdriver_devicemapper
package runtime
import (
_ "github.com/dotcloud/docker/runtime/graphdriver/devmapper"
)

View file

@ -7,10 +7,10 @@ import (
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -35,8 +35,8 @@ type BuildFile interface {
}
type buildFile struct {
runtime *runtime.Runtime
srv *Server
daemon *daemon.Daemon
srv *Server
image string
maintainer string
@ -64,8 +64,8 @@ type buildFile struct {
func (b *buildFile) clearTmp(containers map[string]struct{}) {
for c := range containers {
tmp := b.runtime.Get(c)
if err := b.runtime.Destroy(tmp); err != nil {
tmp := b.daemon.Get(c)
if err := b.daemon.Destroy(tmp); err != nil {
fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
} else {
delete(containers, c)
@ -75,9 +75,9 @@ func (b *buildFile) clearTmp(containers map[string]struct{}) {
}
func (b *buildFile) CmdFrom(name string) error {
image, err := b.runtime.Repositories().LookupImage(name)
image, err := b.daemon.Repositories().LookupImage(name)
if err != nil {
if b.runtime.Graph().IsNotExist(err) {
if b.daemon.Graph().IsNotExist(err) {
remote, tag := utils.ParseRepositoryTag(name)
job := b.srv.Eng.Job("pull", remote, tag)
job.SetenvBool("json", b.sf.Json())
@ -87,7 +87,7 @@ func (b *buildFile) CmdFrom(name string) error {
if err := job.Run(); err != nil {
return err
}
image, err = b.runtime.Repositories().LookupImage(name)
image, err = b.daemon.Repositories().LookupImage(name)
if err != nil {
return err
}
@ -101,7 +101,7 @@ func (b *buildFile) CmdFrom(name string) error {
b.config = image.Config
}
if b.config.Env == nil || len(b.config.Env) == 0 {
b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv)
b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
}
// Process ONBUILD triggers if they exist
if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
@ -383,7 +383,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
return nil
}
func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
func (b *buildFile) addContext(container *daemon.Container, orig, dest string, remote bool) error {
var (
err error
origPath = path.Join(b.contextPath, orig)
@ -599,7 +599,7 @@ func (b *buildFile) CmdAdd(args string) error {
}
// Create the container and start it
container, _, err := b.runtime.Create(b.config, "")
container, _, err := b.daemon.Create(b.config, "")
if err != nil {
return err
}
@ -621,14 +621,14 @@ func (b *buildFile) CmdAdd(args string) error {
return nil
}
func (b *buildFile) create() (*runtime.Container, error) {
func (b *buildFile) create() (*daemon.Container, error) {
if b.image == "" {
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
}
b.config.Image = b.image
// Create the container and start it
c, _, err := b.runtime.Create(b.config, "")
c, _, err := b.daemon.Create(b.config, "")
if err != nil {
return nil, err
}
@ -642,7 +642,7 @@ func (b *buildFile) create() (*runtime.Container, error) {
return c, nil
}
func (b *buildFile) run(c *runtime.Container) error {
func (b *buildFile) run(c *daemon.Container) error {
var errCh chan error
if b.verbose {
@ -693,7 +693,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
return nil
}
container, warnings, err := b.runtime.Create(b.config, "")
container, warnings, err := b.daemon.Create(b.config, "")
if err != nil {
return err
}
@ -709,7 +709,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
}
defer container.Unmount()
}
container := b.runtime.Get(id)
container := b.daemon.Get(id)
if container == nil {
return fmt.Errorf("An error occured while creating the container")
}
@ -718,7 +718,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
autoConfig := *b.config
autoConfig.Cmd = autoCmd
// Commit the container
image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig)
image, err := b.daemon.Commit(container, "", "", "", b.maintainer, &autoConfig)
if err != nil {
return err
}
@ -823,7 +823,7 @@ func stripComments(raw []byte) string {
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, configFile *registry.ConfigFile) BuildFile {
return &buildFile{
runtime: srv.runtime,
daemon: srv.daemon,
srv: srv,
config: &runconfig.Config{},
outStream: outStream,

View file

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon"
"github.com/dotcloud/docker/daemonconfig"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
@ -14,7 +15,6 @@ import (
"github.com/dotcloud/docker/pkg/signal"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -43,9 +43,9 @@ func InitServer(job *engine.Job) engine.Status {
if err != nil {
return job.Error(err)
}
if srv.runtime.Config().Pidfile != "" {
if srv.daemon.Config().Pidfile != "" {
job.Logf("Creating pidfile")
if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
// FIXME: do we need fatal here instead of returning a job error?
log.Fatal(err)
}
@ -65,7 +65,7 @@ func InitServer(job *engine.Job) engine.Status {
interruptCount++
// Initiate the cleanup only once
if interruptCount == 1 {
utils.RemovePidFile(srv.runtime.Config().Pidfile)
utils.RemovePidFile(srv.daemon.Config().Pidfile)
srv.Close()
} else {
return
@ -80,7 +80,7 @@ func InitServer(job *engine.Job) engine.Status {
}
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
// FIXME: 'insert' is deprecated and should be removed in a future version.
for name, handler := range map[string]engine.Handler{
@ -172,13 +172,13 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
}
}
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
if err := container.Kill(); err != nil {
return job.Errorf("Cannot kill container %s: %s", name, err)
}
srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
} else {
// Otherwise, just send the requested signal
if err := container.KillSig(int(sig)); err != nil {
@ -294,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
return job.Errorf("Usage: %s container_id", job.Name)
}
name := job.Args[0]
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
data, err := container.Export()
if err != nil {
return job.Errorf("%s: %s", name, err)
@ -306,7 +306,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
return job.Errorf("%s: %s", name, err)
}
// FIXME: factor job-specific LogEvent to engine.Job.Run()
srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
return engine.StatusOK
}
return job.Errorf("No such container: %s", name)
@ -331,7 +331,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
utils.Debugf("Serializing %s", name)
rootRepo, err := srv.runtime.Repositories().Get(name)
rootRepo, err := srv.daemon.Repositories().Get(name)
if err != nil {
return job.Error(err)
}
@ -510,7 +510,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
return job.Error(err)
}
if repoName != "" {
srv.runtime.Repositories().Set(repoName, tag, id, false)
srv.daemon.Repositories().Set(repoName, tag, id, false)
}
return engine.StatusOK
}
@ -571,7 +571,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
for imageName, tagMap := range repositories {
for tag, address := range tagMap {
if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
return job.Error(err)
}
}
@ -604,13 +604,13 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
return err
}
if img.Parent != "" {
if !srv.runtime.Graph().Exists(img.Parent) {
if !srv.daemon.Graph().Exists(img.Parent) {
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
return err
}
}
}
if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
return err
}
}
@ -668,7 +668,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
out := utils.NewWriteFlusher(job.Stdout)
img, err := srv.runtime.Repositories().LookupImage(name)
img, err := srv.daemon.Repositories().LookupImage(name)
if err != nil {
return job.Error(err)
}
@ -679,12 +679,12 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
}
defer file.Body.Close()
config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig())
if err != nil {
return job.Error(err)
}
c, _, err := srv.runtime.Create(config, "")
c, _, err := srv.daemon.Create(config, "")
if err != nil {
return job.Error(err)
}
@ -693,7 +693,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
return job.Error(err)
}
// FIXME: Handle custom repo, tag comment, author
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil)
if err != nil {
out.Write(sf.FormatError(err))
return engine.StatusErr
@ -703,7 +703,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
}
func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
images, _ := srv.runtime.Graph().Map()
images, _ := srv.daemon.Graph().Map()
if images == nil {
return engine.StatusOK
}
@ -727,7 +727,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
reporefs := make(map[string][]string)
for name, repository := range srv.runtime.Repositories().Repositories {
for name, repository := range srv.daemon.Repositories().Repositories {
for tag, id := range repository {
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
}
@ -746,22 +746,22 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
err error
)
if job.GetenvBool("all") {
allImages, err = srv.runtime.Graph().Map()
allImages, err = srv.daemon.Graph().Map()
} else {
allImages, err = srv.runtime.Graph().Heads()
allImages, err = srv.daemon.Graph().Heads()
}
if err != nil {
return job.Error(err)
}
lookup := make(map[string]*engine.Env)
for name, repository := range srv.runtime.Repositories().Repositories {
for name, repository := range srv.daemon.Repositories().Repositories {
if job.Getenv("filter") != "" {
if match, _ := path.Match(job.Getenv("filter"), name); !match {
continue
}
}
for tag, id := range repository {
image, err := srv.runtime.Graph().Get(id)
image, err := srv.daemon.Graph().Get(id)
if err != nil {
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
continue
@ -811,7 +811,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
}
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
images, _ := srv.runtime.Graph().Map()
images, _ := srv.daemon.Graph().Map()
var imgcount int
if images == nil {
imgcount = 0
@ -826,22 +826,22 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
initPath := utils.DockerInitPath("")
if initPath == "" {
// if that fails, we'll just return the path from the runtime
initPath = srv.runtime.SystemInitPath()
// if that fails, we'll just return the path from the daemon
initPath = srv.daemon.SystemInitPath()
}
v := &engine.Env{}
v.SetInt("Containers", len(srv.runtime.List()))
v.SetInt("Containers", len(srv.daemon.List()))
v.SetInt("Images", imgcount)
v.Set("Driver", srv.runtime.GraphDriver().String())
v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
v.Set("Driver", srv.daemon.GraphDriver().String())
v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
v.SetBool("Debug", os.Getenv("DEBUG") != "")
v.SetInt("NFd", utils.GetTotalUsedFds())
v.SetInt("NGoroutines", goruntime.NumGoroutine())
v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
v.SetInt("NEventsListener", len(srv.listeners))
v.Set("KernelVersion", kernelVersion)
v.Set("IndexServerAddress", registry.IndexServerAddress())
@ -875,13 +875,13 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
return job.Errorf("Usage: %s IMAGE", job.Name)
}
name := job.Args[0]
foundImage, err := srv.runtime.Repositories().LookupImage(name)
foundImage, err := srv.daemon.Repositories().LookupImage(name)
if err != nil {
return job.Error(err)
}
lookupMap := make(map[string][]string)
for name, repository := range srv.runtime.Repositories().Repositories {
for name, repository := range srv.daemon.Repositories().Repositories {
for tag, id := range repository {
// If the ID already has a reverse lookup, do not update it unless for "latest"
if _, exists := lookupMap[id]; !exists {
@ -922,11 +922,11 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
psArgs = job.Args[1]
}
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
if !container.State.IsRunning() {
return job.Errorf("Container %s is not running", name)
}
pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID)
pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
if err != nil {
return job.Error(err)
}
@ -984,7 +984,7 @@ func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
return job.Errorf("Usage: %s CONTAINER", job.Name)
}
name := job.Args[0]
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
outs := engine.NewTable("", 0)
changes, err := container.Changes()
if err != nil {
@ -1019,27 +1019,27 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
outs := engine.NewTable("Created", 0)
names := map[string][]string{}
srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
names[e.ID()] = append(names[e.ID()], p)
return nil
}, -1)
var beforeCont, sinceCont *runtime.Container
var beforeCont, sinceCont *daemon.Container
if before != "" {
beforeCont = srv.runtime.Get(before)
beforeCont = srv.daemon.Get(before)
if beforeCont == nil {
return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
}
}
if since != "" {
sinceCont = srv.runtime.Get(since)
sinceCont = srv.daemon.Get(since)
if sinceCont == nil {
return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
}
}
for _, container := range srv.runtime.List() {
for _, container := range srv.daemon.List() {
if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
continue
}
@ -1061,7 +1061,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
out := &engine.Env{}
out.Set("Id", container.ID)
out.SetList("Names", names[container.ID])
out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
if len(container.Args) > 0 {
args := []string{}
for _, arg := range container.Args {
@ -1104,7 +1104,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
}
name := job.Args[0]
container := srv.runtime.Get(name)
container := srv.daemon.Get(name)
if container == nil {
return job.Errorf("No such container: %s", name)
}
@ -1118,7 +1118,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
return job.Error(err)
}
img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
if err != nil {
return job.Error(err)
}
@ -1134,7 +1134,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status {
if len(job.Args) == 3 {
tag = job.Args[2]
}
if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
return job.Error(err)
}
return engine.StatusOK
@ -1159,7 +1159,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
}
defer srv.poolRemove("pull", "layer:"+id)
if !srv.runtime.Graph().Exists(id) {
if !srv.daemon.Graph().Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
var (
imgJSON []byte
@ -1197,7 +1197,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
return err
}
defer layer.Close()
if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
return err
}
@ -1332,11 +1332,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
if askedTag != "" && tag != askedTag {
continue
}
if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
return err
}
}
if err := srv.runtime.Repositories().Save(); err != nil {
if err := srv.daemon.Repositories().Save(); err != nil {
return err
}
@ -1467,7 +1467,7 @@ func (srv *Server) getImageList(localRepo map[string]string, requestedTag string
tagsByImage[id] = append(tagsByImage[id], tag)
for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
if err != nil {
return nil, nil, err
}
@ -1582,7 +1582,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
out = utils.NewWriteFlusher(out)
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json"))
jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
}
@ -1601,7 +1601,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
return "", err
}
layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
if err != nil {
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
}
@ -1656,7 +1656,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
return job.Error(err)
}
img, err := srv.runtime.Graph().Get(localName)
img, err := srv.daemon.Graph().Get(localName)
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
if err2 != nil {
return job.Error(err2)
@ -1665,11 +1665,11 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
if err != nil {
reposLen := 1
if tag == "" {
reposLen = len(srv.runtime.Repositories().Repositories[localName])
reposLen = len(srv.daemon.Repositories().Repositories[localName])
}
job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
// If it fails, try to get the repository
if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
return job.Error(err)
}
@ -1725,13 +1725,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
defer progressReader.Close()
archive = progressReader
}
img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
if err != nil {
return job.Error(err)
}
// Optionally register the image at REPO/TAG
if repo != "" {
if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
return job.Error(err)
}
}
@ -1750,17 +1750,17 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
if config.Memory != 0 && config.Memory < 524288 {
return job.Errorf("Minimum memory limit allowed is 512k")
}
if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
config.Memory = 0
}
if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
container, buildWarnings, err := srv.runtime.Create(config, name)
container, buildWarnings, err := srv.daemon.Create(config, name)
if err != nil {
if srv.runtime.Graph().IsNotExist(err) {
if srv.daemon.Graph().IsNotExist(err) {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = graph.DEFAULTTAG
@ -1769,11 +1769,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
}
return job.Error(err)
}
if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
job.Errorf("IPv4 forwarding is disabled.\n")
}
srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
// FIXME: this is necessary because runtime.Create might return a nil container
srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
// FIXME: this is necessary because daemon.Create might return a nil container
// with a non-nil error. This should not happen! Once it's fixed we
// can remove this workaround.
if container != nil {
@ -1796,11 +1796,11 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
if job.EnvExists("t") {
t = job.GetenvInt("t")
}
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
if err := container.Restart(int(t)); err != nil {
return job.Errorf("Cannot restart container %s: %s\n", name, err)
}
srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
} else {
return job.Errorf("No such container: %s\n", name)
}
@ -1816,13 +1816,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
removeLink := job.GetenvBool("removeLink")
forceRemove := job.GetenvBool("forceRemove")
container := srv.runtime.Get(name)
container := srv.daemon.Get(name)
if removeLink {
if container == nil {
return job.Errorf("No such link: %s", name)
}
name, err := runtime.GetFullContainerName(name)
name, err := daemon.GetFullContainerName(name)
if err != nil {
job.Error(err)
}
@ -1830,17 +1830,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
if parent == "/" {
return job.Errorf("Conflict, cannot remove the default name of the container")
}
pe := srv.runtime.ContainerGraph().Get(parent)
pe := srv.daemon.ContainerGraph().Get(parent)
if pe == nil {
return job.Errorf("Cannot get parent %s for name %s", parent, name)
}
parentContainer := srv.runtime.Get(pe.ID())
parentContainer := srv.daemon.Get(pe.ID())
if parentContainer != nil {
parentContainer.DisableLink(n)
}
if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
return job.Error(err)
}
return engine.StatusOK
@ -1856,16 +1856,16 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
}
}
if err := srv.runtime.Destroy(container); err != nil {
if err := srv.daemon.Destroy(container); err != nil {
return job.Errorf("Cannot destroy container %s: %s", name, err)
}
srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
if removeVolume {
var (
volumes = make(map[string]struct{})
binds = make(map[string]struct{})
usedVolumes = make(map[string]*runtime.Container)
usedVolumes = make(map[string]*daemon.Container)
)
// the volume id is always the base of the path
@ -1903,7 +1903,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
}
// Retrieve all volumes from all remaining containers
for _, container := range srv.runtime.List() {
for _, container := range srv.daemon.List() {
for _, containerVolumeId := range container.Volumes {
containerVolumeId = getVolumeId(containerVolumeId)
usedVolumes[containerVolumeId] = container
@ -1916,7 +1916,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
continue
}
if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
}
}
@ -1938,9 +1938,9 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
tag = graph.DEFAULTTAG
}
img, err := srv.runtime.Repositories().LookupImage(name)
img, err := srv.daemon.Repositories().LookupImage(name)
if err != nil {
if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
return fmt.Errorf("No such image: %s:%s", repoName, tag)
}
return fmt.Errorf("No such image: %s", name)
@ -1951,14 +1951,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
tag = ""
}
byParents, err := srv.runtime.Graph().ByParent()
byParents, err := srv.daemon.Graph().ByParent()
if err != nil {
return err
}
//If delete by id, see if the id belong only to one repository
if repoName == "" {
for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
if repoName == "" || repoName == parsedRepo {
repoName = parsedRepo
@ -1981,7 +1981,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
//Untag the current image
for _, tag := range tags {
tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag)
tagDeleted, err := srv.daemon.Repositories().Delete(repoName, tag)
if err != nil {
return err
}
@ -1992,16 +1992,16 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
srv.LogEvent("untag", img.ID, "")
}
}
tags = srv.runtime.Repositories().ByID()[img.ID]
tags = srv.daemon.Repositories().ByID()[img.ID]
if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
if len(byParents[img.ID]) == 0 {
if err := srv.canDeleteImage(img.ID); err != nil {
return err
}
if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
return err
}
if err := srv.runtime.Graph().Delete(img.ID); err != nil {
if err := srv.daemon.Graph().Delete(img.ID); err != nil {
return err
}
out := &engine.Env{}
@ -2039,8 +2039,8 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
}
func (srv *Server) canDeleteImage(imgID string) error {
for _, container := range srv.runtime.List() {
parent, err := srv.runtime.Repositories().LookupImage(container.Image)
for _, container := range srv.daemon.List() {
parent, err := srv.daemon.Repositories().LookupImage(container.Image)
if err != nil {
return err
}
@ -2059,7 +2059,7 @@ func (srv *Server) canDeleteImage(imgID string) error {
func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
// Retrieve all images
images, err := srv.runtime.Graph().Map()
images, err := srv.daemon.Graph().Map()
if err != nil {
return nil, err
}
@ -2076,7 +2076,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
// Loop on the children of the given image and check the config
var match *image.Image
for elem := range imageMap[imgID] {
img, err := srv.runtime.Graph().Get(elem)
img, err := srv.daemon.Graph().Get(elem)
if err != nil {
return nil, err
}
@ -2089,8 +2089,8 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
return match, nil
}
func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
runtime := srv.runtime
func (srv *Server) RegisterLinks(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
daemon := srv.daemon
if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links {
@ -2098,19 +2098,19 @@ func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runco
if err != nil {
return err
}
child, err := srv.runtime.GetByName(parts["name"])
child, err := srv.daemon.GetByName(parts["name"])
if err != nil {
return err
}
if child == nil {
return fmt.Errorf("Could not get container for %s", parts["name"])
}
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
return err
}
}
// After we load all the links into the runtime
// After we load all the links into the daemon
// set them to nil on the hostconfig
hostConfig.Links = nil
if err := container.WriteHostConfig(); err != nil {
@ -2126,8 +2126,8 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
}
var (
name = job.Args[0]
runtime = srv.runtime
container = runtime.Get(name)
daemon = srv.daemon
container = daemon.Get(name)
)
if container == nil {
@ -2169,7 +2169,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
if err := container.Start(); err != nil {
return job.Errorf("Cannot start container %s: %s", name, err)
}
srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
return engine.StatusOK
}
@ -2185,11 +2185,11 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
if job.EnvExists("t") {
t = job.GetenvInt("t")
}
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
if err := container.Stop(int(t)); err != nil {
return job.Errorf("Cannot stop container %s: %s\n", name, err)
}
srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
} else {
return job.Errorf("No such container: %s\n", name)
}
@ -2201,7 +2201,7 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
return job.Errorf("Usage: %s", job.Name)
}
name := job.Args[0]
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
status := container.Wait()
job.Printf("%d\n", status)
return engine.StatusOK
@ -2222,7 +2222,7 @@ func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
if err != nil {
return job.Error(err)
}
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
if err := container.Resize(height, width); err != nil {
return job.Error(err)
}
@ -2245,7 +2245,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
stderr = job.GetenvBool("stderr")
)
container := srv.runtime.Get(name)
container := srv.daemon.Get(name)
if container == nil {
return job.Errorf("No such container: %s", name)
}
@ -2335,15 +2335,15 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
return engine.StatusOK
}
func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
if container := srv.runtime.Get(name); container != nil {
func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) {
if container := srv.daemon.Get(name); container != nil {
return container, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ImageInspect(name string) (*image.Image, error) {
if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil {
return image, nil
}
return nil, fmt.Errorf("No such image: %s", name)
@ -2378,7 +2378,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status {
return job.Error(errContainer)
}
object = &struct {
*runtime.Container
*daemon.Container
HostConfig *runconfig.HostConfig
}{container, container.HostConfig()}
default:
@ -2403,7 +2403,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
resource = job.Args[1]
)
if container := srv.runtime.Get(name); container != nil {
if container := srv.daemon.Get(name); container != nil {
data, err := container.Copy(resource)
if err != nil {
@ -2420,20 +2420,20 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
}
func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
runtime, err := runtime.NewRuntime(config, eng)
daemon, err := daemon.NewDaemon(config, eng)
if err != nil {
return nil, err
}
srv := &Server{
Eng: eng,
runtime: runtime,
daemon: daemon,
pullingPool: make(map[string]chan struct{}),
pushingPool: make(map[string]chan struct{}),
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
listeners: make(map[string]chan utils.JSONMessage),
running: true,
}
runtime.SetServer(srv)
daemon.SetServer(srv)
return srv, nil
}
@ -2498,15 +2498,15 @@ func (srv *Server) Close() error {
return nil
}
srv.SetRunning(false)
if srv.runtime == nil {
if srv.daemon == nil {
return nil
}
return srv.runtime.Close()
return srv.daemon.Close()
}
type Server struct {
sync.RWMutex
runtime *runtime.Runtime
daemon *daemon.Daemon
pullingPool map[string]chan struct{}
pushingPool map[string]chan struct{}
events []utils.JSONMessage

View file

@ -3,9 +3,9 @@ package sysinit
import (
"flag"
"fmt"
"github.com/dotcloud/docker/runtime/execdriver"
_ "github.com/dotcloud/docker/runtime/execdriver/lxc"
_ "github.com/dotcloud/docker/runtime/execdriver/native"
"github.com/dotcloud/docker/daemon/execdriver"
_ "github.com/dotcloud/docker/daemon/execdriver/lxc"
_ "github.com/dotcloud/docker/daemon/execdriver/native"
"log"
"os"
)