|
@@ -3,17 +3,19 @@ package docker
|
|
|
import (
|
|
|
"bytes"
|
|
|
"fmt"
|
|
|
+ "github.com/dotcloud/docker"
|
|
|
+ "github.com/dotcloud/docker/engine"
|
|
|
"github.com/dotcloud/docker/sysinit"
|
|
|
"github.com/dotcloud/docker/utils"
|
|
|
"io"
|
|
|
"log"
|
|
|
"net"
|
|
|
+ "net/url"
|
|
|
"os"
|
|
|
"path/filepath"
|
|
|
"runtime"
|
|
|
"strconv"
|
|
|
"strings"
|
|
|
- "sync"
|
|
|
"syscall"
|
|
|
"testing"
|
|
|
"time"
|
|
@@ -22,6 +24,7 @@ import (
|
|
|
const (
|
|
|
unitTestImageName = "docker-test-image"
|
|
|
unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
|
|
|
+ unitTestImageIDShort = "83599e29c455"
|
|
|
unitTestNetworkBridge = "testdockbr0"
|
|
|
unitTestStoreBase = "/var/lib/docker/unit-tests"
|
|
|
testDaemonAddr = "127.0.0.1:4270"
|
|
@@ -29,39 +32,33 @@ const (
|
|
|
)
|
|
|
|
|
|
var (
|
|
|
- globalRuntime *Runtime
|
|
|
+ // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
|
|
|
+ globalRuntime *docker.Runtime
|
|
|
+ globalEngine *engine.Engine
|
|
|
startFds int
|
|
|
startGoroutines int
|
|
|
)
|
|
|
|
|
|
-func nuke(runtime *Runtime) error {
|
|
|
- var wg sync.WaitGroup
|
|
|
- for _, container := range runtime.List() {
|
|
|
- wg.Add(1)
|
|
|
- go func(c *Container) {
|
|
|
- c.Kill()
|
|
|
- wg.Done()
|
|
|
- }(container)
|
|
|
- }
|
|
|
- wg.Wait()
|
|
|
- runtime.Close()
|
|
|
-
|
|
|
- os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
|
|
|
- return os.RemoveAll(runtime.config.Root)
|
|
|
+// FIXME: nuke() is deprecated by Runtime.Nuke()
|
|
|
+func nuke(runtime *docker.Runtime) error {
|
|
|
+ return runtime.Nuke()
|
|
|
}
|
|
|
|
|
|
-func cleanup(runtime *Runtime) error {
|
|
|
+// FIXME: cleanup and nuke are redundant.
|
|
|
+func cleanup(eng *engine.Engine, t *testing.T) error {
|
|
|
+ runtime := mkRuntimeFromEngine(eng, t)
|
|
|
for _, container := range runtime.List() {
|
|
|
container.Kill()
|
|
|
runtime.Destroy(container)
|
|
|
}
|
|
|
- images, err := runtime.graph.Map()
|
|
|
+ srv := mkServerFromEngine(eng, t)
|
|
|
+ images, err := srv.Images(true, "")
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
for _, image := range images {
|
|
|
if image.ID != unitTestImageID {
|
|
|
- runtime.graph.Delete(image.ID)
|
|
|
+ srv.ImageDelete(image.ID, false)
|
|
|
}
|
|
|
}
|
|
|
return nil
|
|
@@ -118,28 +115,24 @@ func init() {
|
|
|
}
|
|
|
|
|
|
func setupBaseImage() {
|
|
|
- config := &DaemonConfig{
|
|
|
- Root: unitTestStoreBase,
|
|
|
- AutoRestart: false,
|
|
|
- BridgeIface: unitTestNetworkBridge,
|
|
|
- }
|
|
|
- runtime, err := NewRuntimeFromDirectory(config)
|
|
|
+ eng, err := engine.New(unitTestStoreBase)
|
|
|
if err != nil {
|
|
|
- log.Fatalf("Unable to create a runtime for tests:", err)
|
|
|
+ log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
|
|
|
}
|
|
|
-
|
|
|
- // Create the "Server"
|
|
|
- srv := &Server{
|
|
|
- runtime: runtime,
|
|
|
- pullingPool: make(map[string]struct{}),
|
|
|
- pushingPool: make(map[string]struct{}),
|
|
|
+ job := eng.Job("initapi")
|
|
|
+ job.Setenv("Root", unitTestStoreBase)
|
|
|
+ job.SetenvBool("Autorestart", false)
|
|
|
+ job.Setenv("BridgeIface", unitTestNetworkBridge)
|
|
|
+ if err := job.Run(); err != nil {
|
|
|
+ log.Fatalf("Unable to create a runtime for tests:", err)
|
|
|
}
|
|
|
+ srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
|
|
|
|
|
|
// If the unit test is not found, try to download it.
|
|
|
- if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
|
|
|
+ if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
|
|
|
// Retrieve the Image
|
|
|
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
|
|
|
- log.Fatalf("Unable to pull the test image:", err)
|
|
|
+ log.Fatalf("Unable to pull the test image: %s", err)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -149,18 +142,22 @@ func spawnGlobalDaemon() {
|
|
|
utils.Debugf("Global runtime already exists. Skipping.")
|
|
|
return
|
|
|
}
|
|
|
- globalRuntime = mkRuntime(log.New(os.Stderr, "", 0))
|
|
|
- srv := &Server{
|
|
|
- runtime: globalRuntime,
|
|
|
- pullingPool: make(map[string]struct{}),
|
|
|
- pushingPool: make(map[string]struct{}),
|
|
|
- }
|
|
|
+ t := log.New(os.Stderr, "", 0)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ globalEngine = eng
|
|
|
+ globalRuntime = mkRuntimeFromEngine(eng, t)
|
|
|
|
|
|
// Spawn a Daemon
|
|
|
go func() {
|
|
|
utils.Debugf("Spawning global daemon for integration tests")
|
|
|
- if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
|
|
|
- log.Fatalf("Unable to spawn the test daemon:", err)
|
|
|
+ listenURL := &url.URL{
|
|
|
+ Scheme: testDaemonProto,
|
|
|
+ Host: testDaemonAddr,
|
|
|
+ }
|
|
|
+ job := eng.Job("serveapi", listenURL.String())
|
|
|
+ job.SetenvBool("Logging", os.Getenv("DEBUG") != "")
|
|
|
+ if err := job.Run(); err != nil {
|
|
|
+ log.Fatalf("Unable to spawn the test daemon: %s", err)
|
|
|
}
|
|
|
}()
|
|
|
// Give some time to ListenAndServer to actually start
|
|
@@ -170,8 +167,8 @@ func spawnGlobalDaemon() {
|
|
|
|
|
|
// FIXME: test that ImagePull(json=true) send correct json output
|
|
|
|
|
|
-func GetTestImage(runtime *Runtime) *Image {
|
|
|
- imgs, err := runtime.graph.Map()
|
|
|
+func GetTestImage(runtime *docker.Runtime) *docker.Image {
|
|
|
+ imgs, err := runtime.Graph().Map()
|
|
|
if err != nil {
|
|
|
log.Fatalf("Unable to get the test image:", err)
|
|
|
}
|
|
@@ -180,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image {
|
|
|
return image
|
|
|
}
|
|
|
}
|
|
|
- log.Fatalf("Test image %v not found", unitTestImageID)
|
|
|
+ log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
|
|
|
return nil
|
|
|
}
|
|
|
|
|
@@ -193,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|
|
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
|
|
|
}
|
|
|
|
|
|
- container, _, err := runtime.Create(&Config{
|
|
|
+ container, _, err := runtime.Create(&docker.Config{
|
|
|
Image: GetTestImage(runtime).ID,
|
|
|
Cmd: []string{"ls", "-al"},
|
|
|
},
|
|
@@ -234,13 +231,25 @@ func TestRuntimeCreate(t *testing.T) {
|
|
|
t.Errorf("Exists() returned false for a newly created container")
|
|
|
}
|
|
|
|
|
|
+ // Test that conflict error displays correct details
|
|
|
+ testContainer, _, _ := runtime.Create(
|
|
|
+ &docker.Config{
|
|
|
+ Image: GetTestImage(runtime).ID,
|
|
|
+ Cmd: []string{"ls", "-al"},
|
|
|
+ },
|
|
|
+ "conflictname",
|
|
|
+ )
|
|
|
+ if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
|
|
|
+ t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
|
|
|
+ }
|
|
|
+
|
|
|
// Make sure create with bad parameters returns an error
|
|
|
- if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
|
|
|
+ if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
|
|
|
t.Fatal("Builder.Create should throw an error when Cmd is missing")
|
|
|
}
|
|
|
|
|
|
if _, _, err := runtime.Create(
|
|
|
- &Config{
|
|
|
+ &docker.Config{
|
|
|
Image: GetTestImage(runtime).ID,
|
|
|
Cmd: []string{},
|
|
|
},
|
|
@@ -249,7 +258,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|
|
t.Fatal("Builder.Create should throw an error when Cmd is empty")
|
|
|
}
|
|
|
|
|
|
- config := &Config{
|
|
|
+ config := &docker.Config{
|
|
|
Image: GetTestImage(runtime).ID,
|
|
|
Cmd: []string{"/bin/ls"},
|
|
|
PortSpecs: []string{"80"},
|
|
@@ -262,7 +271,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|
|
}
|
|
|
|
|
|
// test expose 80:8000
|
|
|
- container, warnings, err := runtime.Create(&Config{
|
|
|
+ container, warnings, err := runtime.Create(&docker.Config{
|
|
|
Image: GetTestImage(runtime).ID,
|
|
|
Cmd: []string{"ls", "-al"},
|
|
|
PortSpecs: []string{"80:8000"},
|
|
@@ -281,7 +290,7 @@ func TestDestroy(t *testing.T) {
|
|
|
runtime := mkRuntime(t)
|
|
|
defer nuke(runtime)
|
|
|
|
|
|
- container, _, err := runtime.Create(&Config{
|
|
|
+ container, _, err := runtime.Create(&docker.Config{
|
|
|
Image: GetTestImage(runtime).ID,
|
|
|
Cmd: []string{"ls", "-al"},
|
|
|
}, "")
|
|
@@ -308,12 +317,6 @@ func TestDestroy(t *testing.T) {
|
|
|
t.Errorf("Unable to get newly created container")
|
|
|
}
|
|
|
|
|
|
- // Make sure the container root directory does not exist anymore
|
|
|
- _, err = os.Stat(container.root)
|
|
|
- if err == nil || !os.IsNotExist(err) {
|
|
|
- t.Errorf("Container root directory still exists after destroy")
|
|
|
- }
|
|
|
-
|
|
|
// Test double destroy
|
|
|
if err := runtime.Destroy(container); err == nil {
|
|
|
// It should have failed
|
|
@@ -325,13 +328,13 @@ func TestGet(t *testing.T) {
|
|
|
runtime := mkRuntime(t)
|
|
|
defer nuke(runtime)
|
|
|
|
|
|
- container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
+ container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
defer runtime.Destroy(container1)
|
|
|
|
|
|
- container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
+ container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
defer runtime.Destroy(container2)
|
|
|
|
|
|
- container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
+ container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
|
defer runtime.Destroy(container3)
|
|
|
|
|
|
if runtime.Get(container1.ID) != container1 {
|
|
@@ -348,15 +351,21 @@ func TestGet(t *testing.T) {
|
|
|
|
|
|
}
|
|
|
|
|
|
-func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
|
|
|
+func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
|
|
|
var (
|
|
|
- err error
|
|
|
- container *Container
|
|
|
- strPort string
|
|
|
- runtime = mkRuntime(t)
|
|
|
- port = 5554
|
|
|
- p Port
|
|
|
+ err error
|
|
|
+ id string
|
|
|
+ strPort string
|
|
|
+ eng = NewTestEngine(t)
|
|
|
+ runtime = mkRuntimeFromEngine(eng, t)
|
|
|
+ port = 5554
|
|
|
+ p docker.Port
|
|
|
)
|
|
|
+ defer func() {
|
|
|
+ if err != nil {
|
|
|
+ runtime.Nuke()
|
|
|
+ }
|
|
|
+ }()
|
|
|
|
|
|
for {
|
|
|
port += 1
|
|
@@ -369,40 +378,48 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
|
|
|
} else {
|
|
|
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
|
|
|
}
|
|
|
- ep := make(map[Port]struct{}, 1)
|
|
|
- p = Port(fmt.Sprintf("%s/%s", strPort, proto))
|
|
|
+ ep := make(map[docker.Port]struct{}, 1)
|
|
|
+ p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
|
|
|
ep[p] = struct{}{}
|
|
|
|
|
|
- container, _, err = runtime.Create(&Config{
|
|
|
- Image: GetTestImage(runtime).ID,
|
|
|
- Cmd: []string{"sh", "-c", cmd},
|
|
|
- PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
|
|
|
- ExposedPorts: ep,
|
|
|
- }, "")
|
|
|
- if err != nil {
|
|
|
- nuke(runtime)
|
|
|
+ jobCreate := eng.Job("create")
|
|
|
+ jobCreate.Setenv("Image", unitTestImageID)
|
|
|
+ jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
|
|
|
+ jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
|
|
|
+ jobCreate.SetenvJson("ExposedPorts", ep)
|
|
|
+ jobCreate.StdoutParseString(&id)
|
|
|
+ if err := jobCreate.Run(); err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
-
|
|
|
- if container != nil {
|
|
|
+ // FIXME: this relies on the undocumented behavior of runtime.Create
|
|
|
+ // which will return a nil error AND container if the exposed ports
|
|
|
+ // are invalid. That behavior should be fixed!
|
|
|
+ if id != "" {
|
|
|
break
|
|
|
}
|
|
|
t.Logf("Port %v already in use, trying another one", strPort)
|
|
|
- }
|
|
|
|
|
|
- container.hostConfig = &HostConfig{
|
|
|
- PortBindings: make(map[Port][]PortBinding),
|
|
|
}
|
|
|
- container.hostConfig.PortBindings[p] = []PortBinding{
|
|
|
+
|
|
|
+ jobStart := eng.Job("start", id)
|
|
|
+ portBindings := make(map[docker.Port][]docker.PortBinding)
|
|
|
+ portBindings[p] = []docker.PortBinding{
|
|
|
{},
|
|
|
}
|
|
|
- if err := container.Start(); err != nil {
|
|
|
- nuke(runtime)
|
|
|
+ if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ if err := jobStart.Run(); err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
+ container := runtime.Get(id)
|
|
|
+ if container == nil {
|
|
|
+ t.Fatalf("Couldn't fetch test container %s", id)
|
|
|
+ }
|
|
|
+
|
|
|
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
|
|
- for !container.State.Running {
|
|
|
+ for !container.State.IsRunning() {
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
}
|
|
|
})
|
|
@@ -500,14 +517,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
|
|
|
}
|
|
|
|
|
|
func TestRestore(t *testing.T) {
|
|
|
- runtime1 := mkRuntime(t)
|
|
|
- defer nuke(runtime1)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ runtime1 := mkRuntimeFromEngine(eng, t)
|
|
|
+ defer runtime1.Nuke()
|
|
|
// Create a container with one instance of docker
|
|
|
- container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
|
|
+ container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
|
|
defer runtime1.Destroy(container1)
|
|
|
|
|
|
// Create a second container meant to be killed
|
|
|
- container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
|
+ container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
|
defer runtime1.Destroy(container2)
|
|
|
|
|
|
// Start the container non blocking
|
|
@@ -515,7 +533,7 @@ func TestRestore(t *testing.T) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- if !container2.State.Running {
|
|
|
+ if !container2.State.IsRunning() {
|
|
|
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
|
|
}
|
|
|
|
|
@@ -525,7 +543,7 @@ func TestRestore(t *testing.T) {
|
|
|
if err := container2.WaitTimeout(2 * time.Second); err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
- container2.State.Running = true
|
|
|
+ container2.State.SetRunning(42)
|
|
|
container2.ToDisk()
|
|
|
|
|
|
if len(runtime1.List()) != 2 {
|
|
@@ -535,24 +553,31 @@ func TestRestore(t *testing.T) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- if !container2.State.Running {
|
|
|
+ if !container2.State.IsRunning() {
|
|
|
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
|
|
}
|
|
|
|
|
|
// Here are are simulating a docker restart - that is, reloading all containers
|
|
|
// from scratch
|
|
|
- runtime1.config.AutoRestart = false
|
|
|
- runtime2, err := NewRuntimeFromDirectory(runtime1.config)
|
|
|
+ root := eng.Root()
|
|
|
+ eng, err := engine.New(root)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
- defer nuke(runtime2)
|
|
|
+ job := eng.Job("initapi")
|
|
|
+ job.Setenv("Root", eng.Root())
|
|
|
+ job.SetenvBool("Autorestart", false)
|
|
|
+ if err := job.Run(); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ runtime2 := mkRuntimeFromEngine(eng, t)
|
|
|
if len(runtime2.List()) != 2 {
|
|
|
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
|
|
|
}
|
|
|
runningCount := 0
|
|
|
for _, c := range runtime2.List() {
|
|
|
- if c.State.Running {
|
|
|
+ if c.State.IsRunning() {
|
|
|
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
|
|
|
runningCount++
|
|
|
}
|
|
@@ -567,18 +592,35 @@ func TestRestore(t *testing.T) {
|
|
|
if err := container3.Run(); err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
- container2.State.Running = false
|
|
|
+ container2.State.SetStopped(0)
|
|
|
}
|
|
|
|
|
|
func TestReloadContainerLinks(t *testing.T) {
|
|
|
- runtime1 := mkRuntime(t)
|
|
|
+ // FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
|
|
|
+ // and we want to set it to true.
|
|
|
+ root, err := newTestDirectory(unitTestStoreBase)
|
|
|
+ if err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ eng, err := engine.New(root)
|
|
|
+ if err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+ job := eng.Job("initapi")
|
|
|
+ job.Setenv("Root", eng.Root())
|
|
|
+ job.SetenvBool("Autorestart", true)
|
|
|
+ if err := job.Run(); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ runtime1 := mkRuntimeFromEngine(eng, t)
|
|
|
defer nuke(runtime1)
|
|
|
// Create a container with one instance of docker
|
|
|
- container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
|
|
+ container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
|
|
defer runtime1.Destroy(container1)
|
|
|
|
|
|
// Create a second container meant to be killed
|
|
|
- container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
|
+ container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
|
defer runtime1.Destroy(container2)
|
|
|
|
|
|
// Start the container non blocking
|
|
@@ -586,7 +628,9 @@ func TestReloadContainerLinks(t *testing.T) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
// Add a link to container 2
|
|
|
- container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
|
|
|
+ // FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
|
|
|
+ // Why do we need it @crosbymichael?
|
|
|
+ // container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
|
|
|
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
@@ -594,11 +638,11 @@ func TestReloadContainerLinks(t *testing.T) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- if !container2.State.Running {
|
|
|
+ if !container2.State.IsRunning() {
|
|
|
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
|
|
}
|
|
|
|
|
|
- if !container1.State.Running {
|
|
|
+ if !container1.State.IsRunning() {
|
|
|
t.Fatalf("Container %s should appear as running but isn't", container1.ID)
|
|
|
}
|
|
|
|
|
@@ -608,18 +652,24 @@ func TestReloadContainerLinks(t *testing.T) {
|
|
|
|
|
|
// Here are are simulating a docker restart - that is, reloading all containers
|
|
|
// from scratch
|
|
|
- runtime1.config.AutoRestart = true
|
|
|
- runtime2, err := NewRuntimeFromDirectory(runtime1.config)
|
|
|
+ eng, err = engine.New(root)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
- defer nuke(runtime2)
|
|
|
+ job = eng.Job("initapi")
|
|
|
+ job.Setenv("Root", eng.Root())
|
|
|
+ job.SetenvBool("Autorestart", false)
|
|
|
+ if err := job.Run(); err != nil {
|
|
|
+ t.Fatal(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ runtime2 := mkRuntimeFromEngine(eng, t)
|
|
|
if len(runtime2.List()) != 2 {
|
|
|
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
|
|
|
}
|
|
|
runningCount := 0
|
|
|
for _, c := range runtime2.List() {
|
|
|
- if c.State.Running {
|
|
|
+ if c.State.IsRunning() {
|
|
|
runningCount++
|
|
|
}
|
|
|
}
|
|
@@ -627,109 +677,85 @@ func TestReloadContainerLinks(t *testing.T) {
|
|
|
t.Fatalf("Expected 2 container alive, %d found", runningCount)
|
|
|
}
|
|
|
|
|
|
+ // FIXME: we no longer test if containers were registered in the right order,
|
|
|
+ // because there is no public
|
|
|
// Make sure container 2 ( the child of container 1 ) was registered and started first
|
|
|
// with the runtime
|
|
|
- first := runtime2.containers.Front()
|
|
|
- if first.Value.(*Container).ID != container2.ID {
|
|
|
+ //
|
|
|
+ containers := runtime2.List()
|
|
|
+ if len(containers) == 0 {
|
|
|
+ t.Fatalf("Runtime has no containers")
|
|
|
+ }
|
|
|
+ first := containers[0]
|
|
|
+ if first.ID != container2.ID {
|
|
|
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
|
|
|
}
|
|
|
|
|
|
// Verify that the link is still registered in the runtime
|
|
|
- entity := runtime2.containerGraph.Get(container1.Name)
|
|
|
- if entity == nil {
|
|
|
- t.Fatal("Entity should not be nil")
|
|
|
+ if c := runtime2.Get(container1.Name); c == nil {
|
|
|
+ t.Fatal("Named container is no longer registered after restart")
|
|
|
}
|
|
|
}
|
|
|
|
|
|
func TestDefaultContainerName(t *testing.T) {
|
|
|
- runtime := mkRuntime(t)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ runtime := mkRuntimeFromEngine(eng, t)
|
|
|
defer nuke(runtime)
|
|
|
- srv := &Server{runtime: runtime}
|
|
|
|
|
|
- config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
+ config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- shortId, _, err := srv.ContainerCreate(config, "some_name")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
- container := runtime.Get(shortId)
|
|
|
+ container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
|
|
|
containerID := container.ID
|
|
|
|
|
|
if container.Name != "/some_name" {
|
|
|
t.Fatalf("Expect /some_name got %s", container.Name)
|
|
|
}
|
|
|
|
|
|
- paths := runtime.containerGraph.RefPaths(containerID)
|
|
|
- if paths == nil || len(paths) == 0 {
|
|
|
- t.Fatalf("Could not find edges for %s", containerID)
|
|
|
- }
|
|
|
- edge := paths[0]
|
|
|
- if edge.ParentID != "0" {
|
|
|
- t.Fatalf("Expected engine got %s", edge.ParentID)
|
|
|
- }
|
|
|
- if edge.EntityID != containerID {
|
|
|
- t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
|
|
|
- }
|
|
|
- if edge.Name != "some_name" {
|
|
|
- t.Fatalf("Expected some_name got %s", edge.Name)
|
|
|
+ if c := runtime.Get("/some_name"); c == nil {
|
|
|
+ t.Fatalf("Couldn't retrieve test container as /some_name")
|
|
|
+ } else if c.ID != containerID {
|
|
|
+ t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
func TestRandomContainerName(t *testing.T) {
|
|
|
- runtime := mkRuntime(t)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ runtime := mkRuntimeFromEngine(eng, t)
|
|
|
defer nuke(runtime)
|
|
|
- srv := &Server{runtime: runtime}
|
|
|
|
|
|
- config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
+ config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- shortId, _, err := srv.ContainerCreate(config, "")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
- container := runtime.Get(shortId)
|
|
|
+ container := runtime.Get(createTestContainer(eng, config, t))
|
|
|
containerID := container.ID
|
|
|
|
|
|
if container.Name == "" {
|
|
|
t.Fatalf("Expected not empty container name")
|
|
|
}
|
|
|
|
|
|
- paths := runtime.containerGraph.RefPaths(containerID)
|
|
|
- if paths == nil || len(paths) == 0 {
|
|
|
- t.Fatalf("Could not find edges for %s", containerID)
|
|
|
- }
|
|
|
- edge := paths[0]
|
|
|
- if edge.ParentID != "0" {
|
|
|
- t.Fatalf("Expected engine got %s", edge.ParentID)
|
|
|
- }
|
|
|
- if edge.EntityID != containerID {
|
|
|
- t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
|
|
|
- }
|
|
|
- if edge.Name == "" {
|
|
|
- t.Fatalf("Expected not empty container name")
|
|
|
+ if c := runtime.Get(container.Name); c == nil {
|
|
|
+ log.Fatalf("Could not lookup container %s by its name", container.Name)
|
|
|
+ } else if c.ID != containerID {
|
|
|
+ log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
func TestLinkChildContainer(t *testing.T) {
|
|
|
- runtime := mkRuntime(t)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ runtime := mkRuntimeFromEngine(eng, t)
|
|
|
defer nuke(runtime)
|
|
|
- srv := &Server{runtime: runtime}
|
|
|
|
|
|
- config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
+ config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
- container := runtime.Get(shortId)
|
|
|
+ container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
|
|
|
|
|
webapp, err := runtime.GetByName("/webapp")
|
|
|
if err != nil {
|
|
@@ -740,17 +766,12 @@ func TestLinkChildContainer(t *testing.T) {
|
|
|
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
|
|
|
}
|
|
|
|
|
|
- config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
+ config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- shortId, _, err = srv.ContainerCreate(config, "")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
-
|
|
|
- childContainer := runtime.Get(shortId)
|
|
|
+ childContainer := runtime.Get(createTestContainer(eng, config, t))
|
|
|
|
|
|
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
|
|
t.Fatal(err)
|
|
@@ -767,20 +788,16 @@ func TestLinkChildContainer(t *testing.T) {
|
|
|
}
|
|
|
|
|
|
func TestGetAllChildren(t *testing.T) {
|
|
|
- runtime := mkRuntime(t)
|
|
|
+ eng := NewTestEngine(t)
|
|
|
+ runtime := mkRuntimeFromEngine(eng, t)
|
|
|
defer nuke(runtime)
|
|
|
- srv := &Server{runtime: runtime}
|
|
|
|
|
|
- config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
+ config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
- container := runtime.Get(shortId)
|
|
|
+ container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
|
|
|
|
|
webapp, err := runtime.GetByName("/webapp")
|
|
|
if err != nil {
|
|
@@ -791,17 +808,12 @@ func TestGetAllChildren(t *testing.T) {
|
|
|
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
|
|
|
}
|
|
|
|
|
|
- config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
-
|
|
|
- shortId, _, err = srv.ContainerCreate(config, "")
|
|
|
+ config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
|
|
|
if err != nil {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- childContainer := runtime.Get(shortId)
|
|
|
+ childContainer := runtime.Get(createTestContainer(eng, config, t))
|
|
|
|
|
|
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
|
|
t.Fatal(err)
|
|
@@ -828,19 +840,3 @@ func TestGetAllChildren(t *testing.T) {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
-func TestGetFullName(t *testing.T) {
|
|
|
- runtime := mkRuntime(t)
|
|
|
- defer nuke(runtime)
|
|
|
-
|
|
|
- name, err := runtime.getFullName("testing")
|
|
|
- if err != nil {
|
|
|
- t.Fatal(err)
|
|
|
- }
|
|
|
- if name != "/testing" {
|
|
|
- t.Fatalf("Expected /testing got %s", name)
|
|
|
- }
|
|
|
- if _, err := runtime.getFullName(""); err == nil {
|
|
|
- t.Fatal("Error should not be nil")
|
|
|
- }
|
|
|
-}
|