Refactor HostConfig and escape apparmor confinement
This commit is contained in:
parent
65ba2868d7
commit
31638ab2ad
16 changed files with 197 additions and 184 deletions
12
Dockerfile
12
Dockerfile
|
@ -4,24 +4,24 @@
|
|||
#
|
||||
# # Assemble the full dev environment. This is slow the first time.
|
||||
# docker build -t docker .
|
||||
# # Apparmor messes with privileged mode: disable it
|
||||
# /etc/init.d/apparmor stop ; /etc/init.d/apparmor teardown
|
||||
#
|
||||
# # Mount your source in an interactive container for quick testing:
|
||||
# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -lxc-conf=lxc.aa_profile=unconfined -i -t docker bash
|
||||
#
|
||||
# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash
|
||||
#
|
||||
# # Run the test suite:
|
||||
# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
|
||||
# docker run -privileged docker hack/make.sh test
|
||||
#
|
||||
# # Publish a release:
|
||||
# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined \
|
||||
# docker run -privileged \
|
||||
# -e AWS_S3_BUCKET=baz \
|
||||
# -e AWS_ACCESS_KEY=foo \
|
||||
# -e AWS_SECRET_KEY=bar \
|
||||
# -e GPG_PASSPHRASE=gloubiboulga \
|
||||
# docker hack/release.sh
|
||||
#
|
||||
# Note: Apparmor used to mess with privileged mode, but this is no longer
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
from ubuntu:12.04
|
||||
|
|
21
api_test.go
21
api_test.go
|
@ -499,8 +499,7 @@ func TestGetContainersTop(t *testing.T) {
|
|||
container.WaitTimeout(2 * time.Second)
|
||||
}()
|
||||
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -704,8 +703,7 @@ func TestPostContainersKill(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -747,8 +745,7 @@ func TestPostContainersRestart(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -855,8 +852,7 @@ func TestPostContainersStop(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -903,8 +899,7 @@ func TestPostContainersWait(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -947,8 +942,7 @@ func TestPostContainersAttach(t *testing.T) {
|
|||
defer runtime.Destroy(container)
|
||||
|
||||
// Start the process
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1037,8 +1031,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
|
|||
defer runtime.Destroy(container)
|
||||
|
||||
// Start the process
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -388,8 +388,7 @@ func (b *buildFile) run() (string, error) {
|
|||
}
|
||||
|
||||
//start the container
|
||||
hostConfig := &HostConfig{}
|
||||
if err := c.Start(hostConfig); err != nil {
|
||||
if err := c.Start(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
99
container.go
99
container.go
|
@ -60,11 +60,15 @@ type Container struct {
|
|||
Volumes map[string]string
|
||||
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
|
||||
// Easier than migrating older container configs :)
|
||||
VolumesRW map[string]bool
|
||||
VolumesRW map[string]bool
|
||||
hostConfig *HostConfig
|
||||
|
||||
activeLinks map[string]*Link
|
||||
}
|
||||
|
||||
// Note: the Config structure should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
// Non-portable information *should* appear in HostConfig.
|
||||
type Config struct {
|
||||
Hostname string
|
||||
Domainname string
|
||||
|
@ -89,13 +93,13 @@ type Config struct {
|
|||
WorkingDir string
|
||||
Entrypoint []string
|
||||
NetworkDisabled bool
|
||||
Privileged bool
|
||||
}
|
||||
|
||||
type HostConfig struct {
|
||||
Binds []string
|
||||
ContainerIDFile string
|
||||
LxcConf []KeyValuePair
|
||||
Privileged bool
|
||||
PortBindings map[Port][]PortBinding
|
||||
Links []string
|
||||
PublishAllPorts bool
|
||||
|
@ -320,7 +324,6 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
Volumes: flVolumes,
|
||||
VolumesFrom: strings.Join(flVolumesFrom, ","),
|
||||
Entrypoint: entrypoint,
|
||||
Privileged: *flPrivileged,
|
||||
WorkingDir: *flWorkingDir,
|
||||
}
|
||||
|
||||
|
@ -328,6 +331,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
Binds: binds,
|
||||
ContainerIDFile: *flContainerIDFile,
|
||||
LxcConf: lxcConf,
|
||||
Privileged: *flPrivileged,
|
||||
PortBindings: portBindings,
|
||||
Links: flLinks,
|
||||
PublishAllPorts: *flPublishAll,
|
||||
|
@ -416,7 +420,7 @@ func (container *Container) FromDisk() error {
|
|||
if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return container.readHostConfig()
|
||||
}
|
||||
|
||||
func (container *Container) ToDisk() (err error) {
|
||||
|
@ -424,23 +428,31 @@ func (container *Container) ToDisk() (err error) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
return ioutil.WriteFile(container.jsonPath(), data, 0666)
|
||||
err = ioutil.WriteFile(container.jsonPath(), data, 0666)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return container.writeHostConfig()
|
||||
}
|
||||
|
||||
func (container *Container) ReadHostConfig() (*HostConfig, error) {
|
||||
func (container *Container) readHostConfig() error {
|
||||
container.hostConfig = &HostConfig{}
|
||||
// If the hostconfig file does not exist, do not read it.
|
||||
// (We still have to initialize container.hostConfig,
|
||||
// but that's OK, since we just did that above.)
|
||||
_, err := os.Stat(container.hostConfigPath())
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(container.hostConfigPath())
|
||||
if err != nil {
|
||||
return &HostConfig{}, err
|
||||
return err
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := json.Unmarshal(data, hostConfig); err != nil {
|
||||
return &HostConfig{}, err
|
||||
}
|
||||
return hostConfig, nil
|
||||
return json.Unmarshal(data, container.hostConfig)
|
||||
}
|
||||
|
||||
func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
|
||||
data, err := json.Marshal(hostConfig)
|
||||
func (container *Container) writeHostConfig() (err error) {
|
||||
data, err := json.Marshal(container.hostConfig)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -456,21 +468,13 @@ func (container *Container) generateEnvConfig(env []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
|
||||
func (container *Container) generateLXCConfig() error {
|
||||
fo, err := os.Create(container.lxcConfigPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fo.Close()
|
||||
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
|
||||
return err
|
||||
}
|
||||
if hostConfig != nil {
|
||||
if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return LxcTemplateCompiled.Execute(fo, container)
|
||||
}
|
||||
|
||||
func (container *Container) startPty() error {
|
||||
|
@ -665,7 +669,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
|||
})
|
||||
}
|
||||
|
||||
func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
||||
func (container *Container) Start() (err error) {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
defer func() {
|
||||
|
@ -674,10 +678,6 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
if hostConfig == nil { // in docker start of docker restart we want to reuse previous HostConfigFile
|
||||
hostConfig, _ = container.ReadHostConfig()
|
||||
}
|
||||
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
|
@ -687,7 +687,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
if container.runtime.networkManager.disabled {
|
||||
container.Config.NetworkDisabled = true
|
||||
} else {
|
||||
if err := container.allocateNetwork(hostConfig); err != nil {
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -711,7 +711,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
// Define illegal container destinations
|
||||
illegalDsts := []string{"/", "."}
|
||||
|
||||
for _, bind := range hostConfig.Binds {
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
// FIXME: factorize bind parsing in parseBind
|
||||
var src, dst, mode string
|
||||
arr := strings.Split(bind, ":")
|
||||
|
@ -845,7 +845,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(hostConfig); err != nil {
|
||||
if err := container.generateLXCConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -942,8 +942,11 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
params = append(params, "--", container.Path)
|
||||
params = append(params, container.Args...)
|
||||
|
||||
container.cmd = exec.Command("lxc-start", params...)
|
||||
|
||||
var lxcStart string = "lxc-start"
|
||||
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
|
||||
lxcStart = path.Join(container.runtime.config.GraphPath, "lxc-start-unconfined")
|
||||
}
|
||||
container.cmd = exec.Command(lxcStart, params...)
|
||||
// Setup logging of stdout and stderr to disk
|
||||
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
|
||||
return err
|
||||
|
@ -970,8 +973,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
container.waitLock = make(chan struct{})
|
||||
|
||||
container.ToDisk()
|
||||
container.SaveHostConfig(hostConfig)
|
||||
go container.monitor(hostConfig)
|
||||
go container.monitor()
|
||||
|
||||
defer utils.Debugf("Container running: %v", container.State.Running)
|
||||
// We wait for the container to be fully running.
|
||||
|
@ -1008,7 +1010,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|||
}
|
||||
|
||||
func (container *Container) Run() error {
|
||||
if err := container.Start(&HostConfig{}); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Wait()
|
||||
|
@ -1021,8 +1023,7 @@ func (container *Container) Output() (output []byte, err error) {
|
|||
return nil, err
|
||||
}
|
||||
defer pipe.Close()
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err = ioutil.ReadAll(pipe)
|
||||
|
@ -1054,7 +1055,7 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
|||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
||||
func (container *Container) allocateNetwork() error {
|
||||
if container.Config.NetworkDisabled {
|
||||
return nil
|
||||
}
|
||||
|
@ -1083,11 +1084,11 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
|||
|
||||
if container.Config.PortSpecs != nil {
|
||||
utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
|
||||
if err := migratePortMappings(container.Config, hostConfig); err != nil {
|
||||
if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Config.PortSpecs = nil
|
||||
if err := container.SaveHostConfig(hostConfig); err != nil {
|
||||
if err := container.writeHostConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1099,8 +1100,8 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
|||
if container.Config.ExposedPorts != nil {
|
||||
portSpecs = container.Config.ExposedPorts
|
||||
}
|
||||
if hostConfig.PortBindings != nil {
|
||||
bindings = hostConfig.PortBindings
|
||||
if container.hostConfig.PortBindings != nil {
|
||||
bindings = container.hostConfig.PortBindings
|
||||
}
|
||||
} else {
|
||||
if container.NetworkSettings.Ports != nil {
|
||||
|
@ -1130,7 +1131,7 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
|||
}
|
||||
bindings[port] = binding
|
||||
}
|
||||
container.SaveHostConfig(hostConfig)
|
||||
container.writeHostConfig()
|
||||
|
||||
container.NetworkSettings.Ports = bindings
|
||||
container.network = iface
|
||||
|
@ -1166,7 +1167,7 @@ func (container *Container) waitLxc() error {
|
|||
}
|
||||
}
|
||||
|
||||
func (container *Container) monitor(hostConfig *HostConfig) {
|
||||
func (container *Container) monitor() {
|
||||
// Wait for the program to exit
|
||||
|
||||
// If the command does not exist, try to wait via lxc
|
||||
|
@ -1323,11 +1324,7 @@ func (container *Container) Restart(seconds int) error {
|
|||
if err := container.Stop(seconds); err != nil {
|
||||
return err
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return container.Start()
|
||||
}
|
||||
|
||||
// Wait blocks until the container stops running, then returns its exit code.
|
||||
|
|
|
@ -40,7 +40,7 @@ func TestIDFormat(t *testing.T) {
|
|||
func TestMultipleAttachRestart(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, hostConfig, _ := mkContainer(
|
||||
container, _ := mkContainer(
|
||||
runtime,
|
||||
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
|
||||
t,
|
||||
|
@ -61,7 +61,7 @@ func TestMultipleAttachRestart(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
l1, err := bufio.NewReader(stdout1).ReadString('\n')
|
||||
|
@ -102,7 +102,7 @@ func TestMultipleAttachRestart(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ func TestDiff(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
// Create a container and remove a file
|
||||
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
|
||||
container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
// The changelog should be empty and not fail before run. See #1705
|
||||
|
@ -178,7 +178,7 @@ func TestDiff(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a new container from the commited image
|
||||
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
|
||||
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
if err := container2.Run(); err != nil {
|
||||
|
@ -197,7 +197,7 @@ func TestDiff(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a new container
|
||||
container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
|
||||
container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
|
||||
defer runtime.Destroy(container3)
|
||||
|
||||
if err := container3.Run(); err != nil {
|
||||
|
@ -223,7 +223,7 @@ func TestDiff(t *testing.T) {
|
|||
func TestCommitAutoRun(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
|
@ -246,7 +246,7 @@ func TestCommitAutoRun(t *testing.T) {
|
|||
}
|
||||
|
||||
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
|
||||
container2, hostConfig, _ := mkContainer(runtime, []string{img.ID}, t)
|
||||
container2, _ := mkContainer(runtime, []string{img.ID}, t)
|
||||
defer runtime.Destroy(container2)
|
||||
stdout, err := container2.StdoutPipe()
|
||||
if err != nil {
|
||||
|
@ -256,7 +256,7 @@ func TestCommitAutoRun(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container2.Start(hostConfig); err != nil {
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.Wait()
|
||||
|
@ -283,7 +283,7 @@ func TestCommitRun(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
container1, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
|
@ -306,7 +306,7 @@ func TestCommitRun(t *testing.T) {
|
|||
}
|
||||
|
||||
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
|
||||
container2, hostConfig, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
|
||||
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
|
||||
defer runtime.Destroy(container2)
|
||||
stdout, err := container2.StdoutPipe()
|
||||
if err != nil {
|
||||
|
@ -316,7 +316,7 @@ func TestCommitRun(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container2.Start(hostConfig); err != nil {
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.Wait()
|
||||
|
@ -342,7 +342,7 @@ func TestCommitRun(t *testing.T) {
|
|||
func TestStart(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, hostConfig, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
|
||||
container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
cStdin, err := container.StdinPipe()
|
||||
|
@ -350,7 +350,7 @@ func TestStart(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -360,7 +360,7 @@ func TestStart(t *testing.T) {
|
|||
if !container.State.Running {
|
||||
t.Errorf("Container should be running")
|
||||
}
|
||||
if err := container.Start(hostConfig); err == nil {
|
||||
if err := container.Start(); err == nil {
|
||||
t.Fatalf("A running container should be able to be started")
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ func TestStart(t *testing.T) {
|
|||
func TestRun(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
if container.State.Running {
|
||||
|
@ -452,7 +452,7 @@ func TestKillDifferentUser(t *testing.T) {
|
|||
if container.State.Running {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container.Start(&HostConfig{}); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -501,7 +501,8 @@ func TestCreateVolume(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(c)
|
||||
if err := c.Start(hc); err != nil {
|
||||
c.hostConfig = hc
|
||||
if err := c.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.WaitTimeout(500 * time.Millisecond)
|
||||
|
@ -525,8 +526,7 @@ func TestKill(t *testing.T) {
|
|||
if container.State.Running {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -642,8 +642,7 @@ func TestRestartStdin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.WriteString(stdin, "hello world"); err != nil {
|
||||
|
@ -673,7 +672,7 @@ func TestRestartStdin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
|
||||
|
@ -850,11 +849,10 @@ func TestMultipleContainers(t *testing.T) {
|
|||
defer runtime.Destroy(container2)
|
||||
|
||||
// Start both containers
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container1.Start(hostConfig); err != nil {
|
||||
if err := container1.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container2.Start(hostConfig); err != nil {
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -904,8 +902,7 @@ func TestStdin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer stdin.Close()
|
||||
|
@ -950,8 +947,7 @@ func TestTty(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer stdin.Close()
|
||||
|
@ -992,8 +988,7 @@ func TestEnv(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
defer stdout.Close()
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.Wait()
|
||||
|
@ -1121,7 +1116,7 @@ func TestLXCConfig(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
container.generateLXCConfig(nil)
|
||||
container.generateLXCConfig()
|
||||
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
|
||||
grepFile(t, container.lxcConfigPath(),
|
||||
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
|
||||
|
@ -1144,7 +1139,7 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
hostConfig := &HostConfig{LxcConf: []KeyValuePair{
|
||||
container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
|
||||
{
|
||||
Key: "lxc.utsname",
|
||||
Value: "docker",
|
||||
|
@ -1155,7 +1150,7 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
},
|
||||
}}
|
||||
|
||||
container.generateLXCConfig(hostConfig)
|
||||
container.generateLXCConfig()
|
||||
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
|
||||
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
|
||||
}
|
||||
|
@ -1208,8 +1203,7 @@ func BenchmarkRunParallel(b *testing.B) {
|
|||
return
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
complete <- err
|
||||
return
|
||||
}
|
||||
|
@ -1253,7 +1247,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
|
|||
defer nuke(r)
|
||||
|
||||
// Add directory not owned by root
|
||||
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
|
||||
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
|
||||
defer r.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
|
@ -1290,7 +1284,7 @@ func TestCopyVolumeContent(t *testing.T) {
|
|||
defer nuke(r)
|
||||
|
||||
// Put some content in a directory of a container and commit it
|
||||
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
||||
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
||||
defer r.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
|
@ -1527,9 +1521,9 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer runtime.Destroy(c)
|
||||
if err := c.Start(hc); err != nil {
|
||||
c.hostConfig = hc
|
||||
if err := c.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.WaitTimeout(500 * time.Millisecond)
|
||||
|
|
|
@ -56,7 +56,7 @@ To create the Docker binary, run this command:
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
|
||||
|
||||
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
||||
|
||||
|
@ -64,18 +64,11 @@ This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
|||
Step 5: Run the Tests
|
||||
---------------------
|
||||
|
||||
To run the Docker test cases you first need to disable `AppArmor <https://wiki.ubuntu.com/AppArmor>`_ using the following commands
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo /etc/init.d/apparmor stop
|
||||
sudo /etc/init.d/apparmor teardown
|
||||
|
||||
To execute the test cases, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
|
||||
|
||||
Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
|
||||
|
|
|
@ -21,6 +21,14 @@ mountpoint -q $CGROUP ||
|
|||
exit 1
|
||||
}
|
||||
|
||||
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
|
||||
then
|
||||
mount -t securityfs none /sys/kernel/security || {
|
||||
echo "Could not mount /sys/kernel/security."
|
||||
echo "AppArmor detection and -privileged mode might break."
|
||||
}
|
||||
fi
|
||||
|
||||
# Mount the cgroup hierarchies exactly as they are in the parent system.
|
||||
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
|
||||
do
|
||||
|
|
|
@ -135,11 +135,6 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
|
|||
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
|
||||
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
|
||||
|
||||
#### FIXME. Temporarily install docker with proper apparmor handling
|
||||
sudo('stop docker')
|
||||
sudo('wget -q -O /usr/bin/docker http://test.docker.io/test/docker')
|
||||
sudo('start docker')
|
||||
|
||||
# Preventively reboot docker-ci daily
|
||||
sudo('ln -s /sbin/reboot /etc/cron.daily')
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
|
||||
# "INDEX_AUTH='Encripted_index_authentication' }
|
||||
# TO_BUILD: docker build -t dockerbuilder .
|
||||
# TO_RELEASE: docker run -i -t -privileged -lxc-conf="lxc.aa_profile = unconfined" -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
|
||||
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
|
||||
|
||||
from docker
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
|
@ -23,9 +23,6 @@ run apt-get update; apt-get install -y -q wget python2.7
|
|||
# Add production docker binary
|
||||
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
|
||||
|
||||
#### FIXME. Temporarily install docker with proper apparmor handling
|
||||
run wget -q -O /usr/bin/docker http://test.docker.io/test/docker; chmod +x /usr/bin/docker
|
||||
|
||||
# Add proto docker builder
|
||||
add ./dockerbuild /usr/bin/dockerbuild
|
||||
run chmod +x /usr/bin/dockerbuild
|
||||
|
|
|
@ -13,9 +13,6 @@ cd /
|
|||
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
|
||||
cd /go/src/github.com/dotcloud/docker
|
||||
|
||||
echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
|
||||
git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
|
||||
|
||||
# Launch docker daemon using dind inside the container
|
||||
./hack/dind /usr/bin/docker -d &
|
||||
sleep 5
|
||||
|
@ -27,7 +24,7 @@ date > timestamp
|
|||
docker build -t docker .
|
||||
|
||||
# Run Docker unittests binary and Ubuntu package
|
||||
docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh
|
||||
docker run -privileged docker hack/make.sh
|
||||
exit_status=$?
|
||||
|
||||
# Display load if test fails
|
||||
|
|
|
@ -11,7 +11,6 @@ lxc.utsname = {{.Config.Hostname}}
|
|||
{{else}}
|
||||
lxc.utsname = {{.Id}}
|
||||
{{end}}
|
||||
#lxc.aa_profile = unconfined
|
||||
|
||||
{{if .Config.NetworkDisabled}}
|
||||
# network is disabled (-n=false)
|
||||
|
@ -46,7 +45,7 @@ lxc.console = none
|
|||
# no controlling tty at all
|
||||
lxc.tty = 1
|
||||
|
||||
{{if .Config.Privileged}}
|
||||
{{if (getHostConfig .).Privileged}}
|
||||
lxc.cgroup.devices.allow = a
|
||||
{{else}}
|
||||
# no implicit access to devices
|
||||
|
@ -66,7 +65,7 @@ lxc.cgroup.devices.allow = c 4:1 rwm
|
|||
lxc.cgroup.devices.allow = c 1:9 rwm
|
||||
lxc.cgroup.devices.allow = c 1:8 rwm
|
||||
|
||||
# /dev/pts/* - pts namespaces are "coming soon"
|
||||
# /dev/pts/ - pts namespaces are "coming soon"
|
||||
lxc.cgroup.devices.allow = c 136:* rwm
|
||||
lxc.cgroup.devices.allow = c 5:2 rwm
|
||||
|
||||
|
@ -109,8 +108,13 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,{{ if ind
|
|||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{if .Config.Privileged}}
|
||||
{{if (getHostConfig .).Privileged}}
|
||||
# retain all capabilities; no lxc.cap.drop line
|
||||
{{if (getCapabilities .).AppArmor}}
|
||||
lxc.aa_profile = unconfined
|
||||
{{else}}
|
||||
#lxc.aa_profile = unconfined
|
||||
{{end}}
|
||||
{{else}}
|
||||
# drop linux capabilities (apply mainly to the user root in the container)
|
||||
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
|
||||
|
@ -130,18 +134,15 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
|
|||
{{if .Config.CpuShares}}
|
||||
lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
const LxcHostConfigTemplate = `
|
||||
{{if .LxcConf}}
|
||||
{{range $pair := .LxcConf}}
|
||||
{{if (getHostConfig .).LxcConf}}
|
||||
{{range $pair := (getHostConfig .).LxcConf}}
|
||||
{{$pair.Key}} = {{$pair.Value}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
var LxcTemplateCompiled *template.Template
|
||||
var LxcHostConfigTemplateCompiled *template.Template
|
||||
|
||||
func getMemorySwap(config *Config) int64 {
|
||||
// By default, MemorySwap is set to twice the size of RAM.
|
||||
|
@ -152,17 +153,23 @@ func getMemorySwap(config *Config) int64 {
|
|||
return config.Memory * 2
|
||||
}
|
||||
|
||||
func getHostConfig(container *Container) *HostConfig {
|
||||
return container.hostConfig
|
||||
}
|
||||
|
||||
func getCapabilities(container *Container) *Capabilities {
|
||||
return container.runtime.capabilities
|
||||
}
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
funcMap := template.FuncMap{
|
||||
"getMemorySwap": getMemorySwap,
|
||||
"getMemorySwap": getMemorySwap,
|
||||
"getHostConfig": getHostConfig,
|
||||
"getCapabilities": getCapabilities,
|
||||
}
|
||||
LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
LxcHostConfigTemplateCompiled, err = template.New("lxc-hostconfig").Funcs(funcMap).Parse(LxcHostConfigTemplate)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
42
runtime.go
42
runtime.go
|
@ -24,6 +24,7 @@ type Capabilities struct {
|
|||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
IPv4ForwardingDisabled bool
|
||||
AppArmor bool
|
||||
}
|
||||
|
||||
type Runtime struct {
|
||||
|
@ -149,8 +150,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
utils.Debugf("Restarting")
|
||||
container.State.Ghost = false
|
||||
container.State.setStopped(0)
|
||||
hostConfig, _ := container.ReadHostConfig()
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
nomonitor = true
|
||||
|
@ -169,9 +169,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
if !container.State.Running {
|
||||
close(container.waitLock)
|
||||
} else if !nomonitor {
|
||||
hostConfig, _ := container.ReadHostConfig()
|
||||
container.allocateNetwork(hostConfig)
|
||||
go container.monitor(hostConfig)
|
||||
go container.monitor()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -310,6 +308,15 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
|
|||
if runtime.capabilities.IPv4ForwardingDisabled && !quiet {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled.")
|
||||
}
|
||||
|
||||
// Check if AppArmor seems to be enabled on this system.
|
||||
if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
|
||||
utils.Debugf("/sys/kernel/security/apparmor not found; assuming AppArmor is not enabled.")
|
||||
runtime.capabilities.AppArmor = false
|
||||
} else {
|
||||
utils.Debugf("/sys/kernel/security/apparmor found; assuming AppArmor is enabled.")
|
||||
runtime.capabilities.AppArmor = true
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a new container from the given configuration with a given name.
|
||||
|
@ -400,6 +407,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
|
|||
Path: entrypoint,
|
||||
Args: args, //FIXME: de-duplicate from config
|
||||
Config: config,
|
||||
hostConfig: &HostConfig{},
|
||||
Image: img.ID, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
// FIXME: do we need to store this in the container?
|
||||
|
@ -574,6 +582,9 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := copyLxcStart(config.Root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g, err := NewGraph(path.Join(config.Root, "graph"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -636,6 +647,27 @@ func (runtime *Runtime) Close() error {
|
|||
return runtime.containerGraph.Close()
|
||||
}
|
||||
|
||||
func copyLxcStart(root string) error {
|
||||
sourcePath, err := exec.LookPath("lxc-start")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetPath := path.Join(root, "lxc-start-unconfined")
|
||||
sourceFile, err := os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
targetFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer targetFile.Close()
|
||||
os.Chmod(targetPath, 0755)
|
||||
_, err = io.Copy(targetFile, sourceFile)
|
||||
return err
|
||||
}
|
||||
|
||||
// History is a convenience type for storing a list of containers,
|
||||
// ordered by creation date.
|
||||
type History []*Container
|
||||
|
|
|
@ -325,13 +325,13 @@ func TestGet(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime.Destroy(container3)
|
||||
|
||||
if runtime.Get(container1.ID) != container1 {
|
||||
|
@ -390,13 +390,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
|
|||
t.Logf("Port %v already in use, trying another one", strPort)
|
||||
}
|
||||
|
||||
hostConfig := &HostConfig{
|
||||
container.hostConfig = &HostConfig{
|
||||
PortBindings: make(map[Port][]PortBinding),
|
||||
}
|
||||
hostConfig.PortBindings[p] = []PortBinding{
|
||||
container.hostConfig.PortBindings[p] = []PortBinding{
|
||||
{},
|
||||
}
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
nuke(runtime)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -503,16 +503,15 @@ func TestRestore(t *testing.T) {
|
|||
runtime1 := mkRuntime(t)
|
||||
defer nuke(runtime1)
|
||||
// Create a container with one instance of docker
|
||||
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
||||
container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime1.Destroy(container1)
|
||||
|
||||
// Create a second container meant to be killed
|
||||
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
||||
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
||||
defer runtime1.Destroy(container2)
|
||||
|
||||
// Start the container non blocking
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container2.Start(hostConfig); err != nil {
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -575,25 +574,23 @@ func TestReloadContainerLinks(t *testing.T) {
|
|||
runtime1 := mkRuntime(t)
|
||||
defer nuke(runtime1)
|
||||
// Create a container with one instance of docker
|
||||
container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
||||
container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
||||
defer runtime1.Destroy(container1)
|
||||
|
||||
// Create a second container meant to be killed
|
||||
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
||||
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
||||
defer runtime1.Destroy(container2)
|
||||
|
||||
// Start the container non blocking
|
||||
hostConfig := &HostConfig{}
|
||||
if err := container2.Start(hostConfig); err != nil {
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h1 := &HostConfig{}
|
||||
// Add a link to container 2
|
||||
h1.Links = []string{"/" + container2.ID + ":first"}
|
||||
container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
|
||||
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container1.Start(h1); err != nil {
|
||||
if err := container1.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1304,7 +1304,7 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
// After we load all the links into the runtime
|
||||
// set them to nil on the hostconfig
|
||||
hostConfig.Links = nil
|
||||
if err := container.SaveHostConfig(hostConfig); err != nil {
|
||||
if err := container.writeHostConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1317,8 +1317,11 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
|
|||
if container == nil {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if hostConfig != nil {
|
||||
container.hostConfig = hostConfig
|
||||
container.ToDisk()
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return fmt.Errorf("Cannot start container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image))
|
||||
|
|
|
@ -246,14 +246,14 @@ func TestContainerTop(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
c, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
||||
c, hostConfig, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
||||
c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
||||
c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer runtime.Destroy(c)
|
||||
if err := c.Start(hostConfig); err != nil {
|
||||
if err := c.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ func readFile(src string, t *testing.T) (content string) {
|
|||
// dynamically replaced by the current test image.
|
||||
// The caller is responsible for destroying the container.
|
||||
// Call t.Fatal() at the first error.
|
||||
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConfig, error) {
|
||||
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
|
||||
config, hostConfig, _, err := ParseRun(args, nil)
|
||||
defer func() {
|
||||
if err != nil && t != nil {
|
||||
|
@ -124,16 +124,17 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConf
|
|||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
if config.Image == "_" {
|
||||
config.Image = GetTestImage(r).ID
|
||||
}
|
||||
c, _, err := r.Create(config, "")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return c, hostConfig, nil
|
||||
c.hostConfig = hostConfig
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Create a test container, start it, wait for it to complete, destroy it,
|
||||
|
@ -146,7 +147,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
|
|||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
container, hostConfig, err := mkContainer(r, args, t)
|
||||
container, err := mkContainer(r, args, t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
|
|||
return "", err
|
||||
}
|
||||
defer stdout.Close()
|
||||
if err := container.Start(hostConfig); err != nil {
|
||||
if err := container.Start(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
container.Wait()
|
||||
|
|
Loading…
Reference in a new issue