소스 검색

Fix 2585, rebase to master

Andy Rothfusz 11 년 전
부모
커밋
34d294c461
38개의 변경된 파일583개의 추가작업 그리고 305개의 파일을 삭제
  1. 1 0
      .gitignore
  2. 0 1
      CHANGELOG.md
  3. 41 8
      commands.go
  4. 1 1
      config.go
  5. 28 10
      container.go
  6. 61 0
      container_test.go
  7. 1 1
      docker/docker.go
  8. 44 1
      docs/sources/commandline/cli.rst
  9. 5 1
      docs/sources/conf.py
  10. 6 3
      docs/sources/contributing/contributing.rst
  11. 10 0
      docs/sources/use/builder.rst
  12. 12 14
      engine/engine.go
  13. 5 5
      engine/init_test.go
  14. 29 29
      engine/job.go
  15. 79 42
      gograph/gograph.go
  16. 14 10
      hack/infrastructure/docker-ci/Dockerfile
  17. 1 0
      hack/infrastructure/docker-ci/VERSION
  18. 18 9
      hack/infrastructure/docker-ci/buildbot/master.cfg
  19. 68 53
      hack/infrastructure/docker-ci/deployment.py
  20. 6 11
      hack/infrastructure/docker-ci/docker-test/Dockerfile
  21. 9 14
      hack/infrastructure/docker-ci/docker-test/test_docker.sh
  22. 2 1
      hack/infrastructure/docker-ci/functionaltests/test_registry.sh
  23. 9 13
      hack/infrastructure/docker-ci/nightlyrelease/Dockerfile
  24. 0 50
      hack/infrastructure/docker-ci/nightlyrelease/dockerbuild
  25. 40 0
      hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh
  26. 0 1
      hack/infrastructure/docker-ci/nightlyrelease/release_credentials.json
  27. 18 0
      hack/infrastructure/docker-ci/registry-coverage/Dockerfile
  28. 18 0
      hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh
  29. 1 1
      hack/infrastructure/docker-ci/report/deployment.py
  30. 1 1
      namesgenerator/names-generator.go
  31. 0 2
      netlink/netlink_darwin.go
  32. 2 3
      runtime.go
  33. 9 0
      utils.go
  34. 1 1
      utils/daemon.go
  35. 1 1
      utils/random.go
  36. 2 2
      utils/utils.go
  37. 13 13
      utils/utils_test.go
  38. 27 3
      utils_test.go

+ 1 - 0
.gitignore

@@ -18,3 +18,4 @@ bundles/
 .hg/
 .git/
 vendor/pkg/
+pyenv

+ 0 - 1
CHANGELOG.md

@@ -17,7 +17,6 @@
 + Prevent DNS server conflicts in CreateBridgeIface
 + Validate bind mounts on the server side
 + Use parent image config in docker build
-* Fix regression in /etc/hosts
 
 #### Client
 

+ 41 - 8
commands.go

@@ -913,8 +913,16 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		cmd.Usage()
 		return nil
 	}
-	src := cmd.Arg(0)
-	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
+
+	var src, repository, tag string
+
+	if cmd.NArg() == 3 {
+		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
+		src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
+	} else {
+		src = cmd.Arg(0)
+		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+	}
 	v := url.Values{}
 	v.Set("repo", repository)
 	v.Set("tag", tag)
@@ -1349,8 +1357,16 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
-	name := cmd.Arg(0)
-	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
+
+	var name, repository, tag string
+
+	if cmd.NArg() == 3 {
+		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
+		name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
+	} else {
+		name = cmd.Arg(0)
+		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+	}
 
 	if name == "" {
 		cmd.Usage()
@@ -1387,7 +1403,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 
 func (cli *DockerCli) CmdEvents(args ...string) error {
 	cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server")
-	since := cmd.String("since", "", "Show events previously created (used for polling).")
+	since := cmd.String("since", "", "Show previously created events and then stream.")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1399,7 +1415,17 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 
 	v := url.Values{}
 	if *since != "" {
-		v.Set("since", *since)
+		loc := time.FixedZone(time.Now().Zone())
+		format := "2006-01-02 15:04:05 -0700 MST"
+		if len(*since) < len(format) {
+			format = format[:len(*since)]
+		}
+
+		if t, err := time.ParseInLocation(format, *since, loc); err == nil {
+			v.Set("since", strconv.FormatInt(t.Unix(), 10))
+		} else {
+			v.Set("since", *since)
+		}
 	}
 
 	if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
@@ -1656,9 +1682,16 @@ func (cli *DockerCli) CmdTag(args ...string) error {
 		return nil
 	}
 
-	v := url.Values{}
-	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
+	var repository, tag string
 
+	if cmd.NArg() == 3 {
+		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
+		repository, tag = cmd.Arg(1), cmd.Arg(2)
+	} else {
+		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+	}
+
+	v := url.Values{}
 	v.Set("repo", repository)
 	v.Set("tag", tag)
 

+ 1 - 1
config.go

@@ -1,8 +1,8 @@
 package docker
 
 import (
-	"net"
 	"github.com/dotcloud/docker/engine"
+	"net"
 )
 
 // FIXME: separate runtime configuration from http api configuration

+ 28 - 10
container.go

@@ -133,7 +133,11 @@ type PortBinding struct {
 type Port string
 
 func (p Port) Proto() string {
-	return strings.Split(string(p), "/")[1]
+	parts := strings.Split(string(p), "/")
+	if len(parts) == 1 {
+		return "tcp"
+	}
+	return parts[1]
 }
 
 func (p Port) Port() string {
@@ -199,7 +203,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
 
 	var flVolumesFrom utils.ListOpts
-	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container")
+	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
 
 	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 
@@ -394,9 +398,9 @@ func (container *Container) Inject(file io.Reader, pth string) error {
 	if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil {
 		// Since err is nil, the path could be stat'd and it exists
 		return fmt.Errorf("%s exists", pth)
-	} else if ! os.IsNotExist(err) {
+	} else if !os.IsNotExist(err) {
 		// Expect err might be that the file doesn't exist, so
-		// if it's some other error, return that. 
+		// if it's some other error, return that.
 
 		return err
 	}
@@ -763,9 +767,23 @@ func (container *Container) Start() (err error) {
 
 	// Apply volumes from another container if requested
 	if container.Config.VolumesFrom != "" {
-		volumes := strings.Split(container.Config.VolumesFrom, ",")
-		for _, v := range volumes {
-			c := container.runtime.Get(v)
+		containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
+		for _, containerSpec := range containerSpecs {
+			mountRW := true
+			specParts := strings.SplitN(containerSpec, ":", 2)
+			switch len(specParts) {
+			case 0:
+				return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
+			case 2:
+				switch specParts[1] {
+				case "ro":
+					mountRW = false
+				case "rw": // mountRW is already true
+				default:
+					return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
+				}
+			}
+			c := container.runtime.Get(specParts[0])
 			if c == nil {
 				return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
 			}
@@ -778,7 +796,7 @@ func (container *Container) Start() (err error) {
 				}
 				container.Volumes[volPath] = id
 				if isRW, exists := c.VolumesRW[volPath]; exists {
-					container.VolumesRW[volPath] = isRW
+					container.VolumesRW[volPath] = isRW && mountRW
 				}
 			}
 
@@ -819,7 +837,7 @@ func (container *Container) Start() (err error) {
 		// Create the mountpoint
 		rootVolPath := path.Join(container.RootfsPath(), volPath)
 		if err := os.MkdirAll(rootVolPath, 0755); err != nil {
-			return nil
+			return err
 		}
 
 		// Do not copy or change permissions if we are mounting from the host
@@ -1086,7 +1104,7 @@ func (container *Container) allocateNetwork() error {
 				Gateway: manager.bridgeNetwork.IP,
 				manager: manager,
 			}
-			if iface !=nil && iface.IPNet.IP != nil {
+			if iface != nil && iface.IPNet.IP != nil {
 				ipNum := ipToInt(iface.IPNet.IP)
 				manager.ipAllocator.inUse[ipNum] = struct{}{}
 			} else {

+ 61 - 0
container_test.go

@@ -1338,6 +1338,67 @@ func TestBindMounts(t *testing.T) {
 	}
 }
 
+// Test that -volumes-from supports both read-only mounts
+func TestFromVolumesInReadonlyMode(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+	container, _, err := runtime.Create(
+		&Config{
+			Image:   GetTestImage(runtime).ID,
+			Cmd:     []string{"/bin/echo", "-n", "foobar"},
+			Volumes: map[string]struct{}{"/test": {}},
+		},
+		"",
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer runtime.Destroy(container)
+	_, err = container.Output()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !container.VolumesRW["/test"] {
+		t.Fail()
+	}
+
+	container2, _, err := runtime.Create(
+		&Config{
+			Image:       GetTestImage(runtime).ID,
+			Cmd:         []string{"/bin/echo", "-n", "foobar"},
+			VolumesFrom: container.ID + ":ro",
+		},
+		"",
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer runtime.Destroy(container2)
+
+	_, err = container2.Output()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if container.Volumes["/test"] != container2.Volumes["/test"] {
+		t.Logf("container volumes do not match: %s | %s ",
+			container.Volumes["/test"],
+			container2.Volumes["/test"])
+		t.Fail()
+	}
+
+	_, exists := container2.VolumesRW["/test"]
+	if !exists {
+		t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW)
+		t.Fail()
+	}
+
+	if container2.VolumesRW["/test"] != false {
+		t.Log("'/test' volume mounted in read-write mode, expected read-only")
+		t.Fail()
+	}
+}
+
 // Test that VolumesRW values are copied to the new container.  Regression test for #1201
 func TestVolumesFromReadonlyMount(t *testing.T) {
 	runtime := mkRuntime(t)

+ 1 - 1
docker/docker.go

@@ -4,9 +4,9 @@ import (
 	"flag"
 	"fmt"
 	"github.com/dotcloud/docker"
+	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/engine"
 	"log"
 	"os"
 	"strings"

+ 44 - 1
docs/sources/commandline/cli.rst

@@ -245,6 +245,9 @@ Full -run example
     Usage: docker events
 
     Get real time events from the server
+    
+    -since="": Show previously created events and then stream.
+               (either seconds since epoch, or date string as below)
 
 .. _cli_events_example:
 
@@ -277,6 +280,23 @@ Shell 1: (Again .. now showing events)
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
 
+Show events in the past from a specified time
+.............................................
+
+.. code-block:: bash
+
+    $ sudo docker events -since 1378216169
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+
+    $ sudo docker events -since '2013-09-03'
+    [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+
+    $ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
+    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
 
 .. _cli_export:
 
@@ -460,6 +480,12 @@ Insert file from github
 
 The main process inside the container will be sent SIGKILL.
 
+Known Issues (kill)
+~~~~~~~~~~~~~~~~~~~
+
+* :issue:`197` indicates that ``docker kill`` may leave directories
+  behind and make it difficult to remove the container.
+
 .. _cli_login:
 
 ``login``
@@ -568,6 +594,12 @@ The main process inside the container will be sent SIGKILL.
     Remove one or more containers
         -link="": Remove the link instead of the actual container
 
+Known Issues (rm)
+~~~~~~~~~~~~~~~~~~~
+
+* :issue:`197` indicates that ``docker kill`` may leave directories
+  behind and make it difficult to remove the container.
+
 
 Examples:
 ~~~~~~~~~
@@ -628,7 +660,7 @@ network communication.
       -u="": Username or UID
       -dns=[]: Set custom dns servers for the container
       -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
-      -volumes-from="": Mount all volumes from the given container
+      -volumes-from="": Mount all volumes from the given container(s)
       -entrypoint="": Overwrite the default entrypoint set by the image
       -w="": Working directory inside the container
       -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
@@ -720,6 +752,17 @@ can access the network and environment of the redis container via
 environment variables.  The ``-name`` flag will assign the name ``console``
 to the newly created container.
 
+.. code-block:: bash
+
+   docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
+
+The ``-volumes-from`` flag mounts all the defined volumes from the
+refrence containers. Containers can be specified by a comma seperated
+list or by repetitions of the ``-volumes-from`` argument. The container
+id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
+read-only or read-write mode, respectively. By default, the volumes are mounted
+in the same mode (rw or ro) as the reference container.
+
 .. _cli_search:
 
 ``search``

+ 5 - 1
docs/sources/conf.py

@@ -40,7 +40,11 @@ html_additional_pages = {
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinxcontrib.httpdomain']
+extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
+
+# Configure extlinks
+extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
+	               'Issue ') }
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']

+ 6 - 3
docs/sources/contributing/contributing.rst

@@ -10,13 +10,16 @@ Want to hack on Docker? Awesome!
 The repository includes `all the instructions you need to get
 started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
 
-The `developer environment Dockerfile <https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
+The `developer environment Dockerfile
+<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
 specifies the tools and versions used to test and build Docker.
 
 If you're making changes to the documentation, see the
 `README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
 
-The `documentation environment Dockerfile <https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
+The `documentation environment Dockerfile
+<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
 specifies the tools and versions used to build the Documentation.
 
-Further interesting details can be found in the `Packaging hints <https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.
+Further interesting details can be found in the `Packaging hints
+<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.

+ 10 - 0
docs/sources/use/builder.rst

@@ -116,6 +116,16 @@ core concepts of Docker where commits are cheap and containers can be
 created from any point in an image's history, much like source
 control.
 
+Known Issues (RUN)
+..................
+
+* :issue:`783` is about file permissions problems that can occur when
+  using the AUFS file system. You might notice it during an attempt to
+  ``rm`` a file, for example. The issue describes a workaround.
+* :issue:`2424` Locale will not be set automatically.
+
+
+
 3.4 CMD
 -------
 

+ 12 - 14
engine/engine.go

@@ -2,13 +2,12 @@ package engine
 
 import (
 	"fmt"
-	"os"
+	"github.com/dotcloud/docker/utils"
 	"log"
+	"os"
 	"runtime"
-	"github.com/dotcloud/docker/utils"
 )
 
-
 type Handler func(*Job) string
 
 var globalHandlers map[string]Handler
@@ -25,8 +24,8 @@ func Register(name string, handler Handler) error {
 // It acts as a store for *containers*, and allows manipulation of these
 // containers by executing *jobs*.
 type Engine struct {
-	root		string
-	handlers	map[string]Handler
+	root     string
+	handlers map[string]Handler
 }
 
 // New initializes a new engine managing the directory specified at `root`.
@@ -56,8 +55,8 @@ func New(root string) (*Engine, error) {
 		return nil, err
 	}
 	eng := &Engine{
-		root:		root,
-		handlers:	globalHandlers,
+		root:     root,
+		handlers: globalHandlers,
 	}
 	return eng, nil
 }
@@ -66,12 +65,12 @@ func New(root string) (*Engine, error) {
 // This function mimics `Command` from the standard os/exec package.
 func (eng *Engine) Job(name string, args ...string) *Job {
 	job := &Job{
-		eng:		eng,
-		Name:		name,
-		Args:		args,
-		Stdin:		os.Stdin,
-		Stdout:		os.Stdout,
-		Stderr:		os.Stderr,
+		eng:    eng,
+		Name:   name,
+		Args:   args,
+		Stdin:  os.Stdin,
+		Stdout: os.Stdout,
+		Stderr: os.Stderr,
 	}
 	handler, exists := eng.handlers[name]
 	if exists {
@@ -79,4 +78,3 @@ func (eng *Engine) Job(name string, args ...string) *Job {
 	}
 	return job
 }
-

+ 5 - 5
engine/init_test.go

@@ -1,18 +1,18 @@
 package engine
 
 import (
-	"testing"
-	"runtime"
-	"strings"
 	"fmt"
-	"io/ioutil"
 	"github.com/dotcloud/docker/utils"
+	"io/ioutil"
+	"runtime"
+	"strings"
+	"testing"
 )
 
 var globalTestID string
 
 func init() {
-	Register("dummy", func(job *Job) string { return ""; })
+	Register("dummy", func(job *Job) string { return "" })
 }
 
 func mkEngine(t *testing.T) *Engine {

+ 29 - 29
engine/job.go

@@ -1,11 +1,11 @@
 package engine
 
 import (
-	"io"
-	"strings"
-	"fmt"
 	"encoding/json"
+	"fmt"
 	"github.com/dotcloud/docker/utils"
+	"io"
+	"strings"
 )
 
 // A job is the fundamental unit of work in the docker engine.
@@ -20,17 +20,17 @@ import (
 // One slight variation is that jobs report their status as a string. The
 // string "0" indicates success, and any other strings indicates an error.
 // This allows for richer error reporting.
-// 
+//
 type Job struct {
-	eng	*Engine
-	Name	string
-	Args	[]string
-	env	[]string
-	Stdin	io.ReadCloser
-	Stdout	io.WriteCloser
-	Stderr	io.WriteCloser
-	handler	func(*Job) string
-	status	string
+	eng     *Engine
+	Name    string
+	Args    []string
+	env     []string
+	Stdin   io.ReadCloser
+	Stdout  io.WriteCloser
+	Stderr  io.WriteCloser
+	handler func(*Job) string
+	status  string
 }
 
 // Run executes the job and blocks until the job completes.
@@ -57,21 +57,21 @@ func (job *Job) String() string {
 }
 
 func (job *Job) Getenv(key string) (value string) {
-        for _, kv := range job.env {
-                if strings.Index(kv, "=") == -1 {
-                        continue
-                }
-                parts := strings.SplitN(kv, "=", 2)
-                if parts[0] != key {
-                        continue
-                }
-                if len(parts) < 2 {
-                        value = ""
-                } else {
-                        value = parts[1]
-                }
-        }
-        return
+	for _, kv := range job.env {
+		if strings.Index(kv, "=") == -1 {
+			continue
+		}
+		parts := strings.SplitN(kv, "=", 2)
+		if parts[0] != key {
+			continue
+		}
+		if len(parts) < 2 {
+			value = ""
+		} else {
+			value = parts[1]
+		}
+	}
+	return
 }
 
 func (job *Job) GetenvBool(key string) (value bool) {
@@ -109,5 +109,5 @@ func (job *Job) SetenvList(key string, value []string) error {
 }
 
 func (job *Job) Setenv(key, value string) {
-	job.env = append(job.env, key + "=" + value)
+	job.env = append(job.env, key+"="+value)
 }

+ 79 - 42
gograph/gograph.go

@@ -48,7 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
 // Graph database for storing entities and their relationships
 type Database struct {
 	conn *sql.DB
-	mux  sync.Mutex
+	mux  sync.RWMutex
 }
 
 // Create a new graph database initialized with a root entity
@@ -138,7 +138,14 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
 
 // Return true if a name already exists in the database
 func (db *Database) Exists(name string) bool {
-	return db.Get(name) != nil
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
+	e, err := db.get(name)
+	if err != nil {
+		return false
+	}
+	return e != nil
 }
 
 func (db *Database) setEdge(parentPath, name string, e *Entity) error {
@@ -165,6 +172,9 @@ func (db *Database) RootEntity() *Entity {
 
 // Return the entity for a given path
 func (db *Database) Get(name string) *Entity {
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
 	e, err := db.get(name)
 	if err != nil {
 		return nil
@@ -200,23 +210,36 @@ func (db *Database) get(name string) (*Entity, error) {
 // List all entities by from the name
 // The key will be the full path of the entity
 func (db *Database) List(name string, depth int) Entities {
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
 	out := Entities{}
 	e, err := db.get(name)
 	if err != nil {
 		return out
 	}
-	for c := range db.children(e, name, depth) {
+
+	children, err := db.children(e, name, depth, nil)
+	if err != nil {
+		return out
+	}
+
+	for _, c := range children {
 		out[c.FullPath] = c.Entity
 	}
 	return out
 }
 
+// Walk through the child graph of an entity, calling walkFunc for each child entity.
+// It is safe for walkFunc to call graph functions.
 func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
-	e, err := db.get(name)
+	children, err := db.Children(name, depth)
 	if err != nil {
 		return err
 	}
-	for c := range db.children(e, name, depth) {
+
+	// Note: the database lock must not be held while calling walkFunc
+	for _, c := range children {
 		if err := walkFunc(c.FullPath, c.Entity); err != nil {
 			return err
 		}
@@ -224,8 +247,24 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
 	return nil
 }
 
+// Return the children of the specified entity
+func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
+	e, err := db.get(name)
+	if err != nil {
+		return nil, err
+	}
+
+	return db.children(e, name, depth, nil)
+}
+
 // Return the refrence count for a specified id
 func (db *Database) Refs(id string) int {
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
 	var count int
 	if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
 		return 0
@@ -235,6 +274,9 @@ func (db *Database) Refs(id string) int {
 
 // Return all the id's path references
 func (db *Database) RefPaths(id string) Edges {
+	db.mux.RLock()
+	defer db.mux.RUnlock()
+
 	refs := Edges{}
 
 	rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
@@ -356,56 +398,51 @@ type WalkMeta struct {
 	Edge     *Edge
 }
 
-func (db *Database) children(e *Entity, name string, depth int) <-chan WalkMeta {
-	out := make(chan WalkMeta)
+func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
 	if e == nil {
-		close(out)
-		return out
+		return entities, nil
+	}
+
+	rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
+	if err != nil {
+		return nil, err
 	}
+	defer rows.Close()
 
-	go func() {
-		rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
-		if err != nil {
-			close(out)
+	for rows.Next() {
+		var entityId, entityName string
+		if err := rows.Scan(&entityId, &entityName); err != nil {
+			return nil, err
+		}
+		child := &Entity{entityId}
+		edge := &Edge{
+			ParentID: e.id,
+			Name:     entityName,
+			EntityID: child.id,
 		}
-		defer rows.Close()
 
-		for rows.Next() {
-			var entityId, entityName string
-			if err := rows.Scan(&entityId, &entityName); err != nil {
-				// Log error
-				continue
-			}
-			child := &Entity{entityId}
-			edge := &Edge{
-				ParentID: e.id,
-				Name:     entityName,
-				EntityID: child.id,
-			}
+		meta := WalkMeta{
+			Parent:   e,
+			Entity:   child,
+			FullPath: path.Join(name, edge.Name),
+			Edge:     edge,
+		}
 
-			meta := WalkMeta{
-				Parent:   e,
-				Entity:   child,
-				FullPath: path.Join(name, edge.Name),
-				Edge:     edge,
-			}
+		entities = append(entities, meta)
 
-			out <- meta
-			if depth == 0 {
-				continue
-			}
+		if depth != 0 {
 			nDepth := depth
 			if depth != -1 {
 				nDepth -= 1
 			}
-			sc := db.children(child, meta.FullPath, nDepth)
-			for c := range sc {
-				out <- c
+			entities, err = db.children(child, meta.FullPath, nDepth, entities)
+			if err != nil {
+				return nil, err
 			}
 		}
-		close(out)
-	}()
-	return out
+	}
+
+	return entities, nil
 }
 
 // Return the entity based on the parent path and name

+ 14 - 10
hack/infrastructure/docker-ci/Dockerfile

@@ -1,14 +1,16 @@
-# VERSION:        0.22
-# DOCKER-VERSION  0.6.3
-# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
-# DESCRIPTION:    Deploy docker-ci on Amazon EC2
+# VERSION:        0.25
+# DOCKER-VERSION  0.6.6
+# AUTHOR:         Daniel Mizyrycki <daniel@docker.com>
+# DESCRIPTION:    Deploy docker-ci on Digital Ocean
 # COMMENTS:
 #     CONFIG_JSON is an environment variable json string loaded as:
 #
 # export CONFIG_JSON='
-#     { "AWS_TAG":             "EC2_instance_name",
-#       "AWS_ACCESS_KEY":      "EC2_access_key",
-#       "AWS_SECRET_KEY":      "EC2_secret_key",
+#     { "DROPLET_NAME":        "docker-ci",
+#       "DO_CLIENT_ID":        "Digital_Ocean_client_id",
+#       "DO_API_KEY":          "Digital_Ocean_api_key",
+#       "DOCKER_KEY_ID":       "Digital_Ocean_ssh_key_id",
+#       "DOCKER_CI_KEY_PATH":  "docker-ci_private_key_path",
 #       "DOCKER_CI_PUB":       "$(cat docker-ci_ssh_public_key.pub)",
 #       "DOCKER_CI_KEY":       "$(cat docker-ci_ssh_private_key.key)",
 #       "BUILDBOT_PWD":        "Buildbot_server_password",
@@ -33,9 +35,11 @@
 
 from ubuntu:12.04
 
-run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-run apt-get update; apt-get install -y python2.7 python-dev python-pip ssh rsync less vim
-run pip install boto fabric
+run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
+    > /etc/apt/sources.list
+run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
+    python-pip ssh rsync less vim
+run pip install requests fabric
 
 # Add deployment code and set default container command
 add . /docker-ci

+ 1 - 0
hack/infrastructure/docker-ci/VERSION

@@ -0,0 +1 @@
+0.4.5

+ 18 - 9
hack/infrastructure/docker-ci/buildbot/master.cfg

@@ -43,7 +43,7 @@ c['slavePortnum'] = PORT_MASTER
 
 # Schedulers
 c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
-    'index','registry','coverage','nightlyrelease'])]
+    'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
 c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
     change_filter=filter.ChangeFilter(branch='master',
     repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
@@ -51,7 +51,7 @@ c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
     change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
     builderNames=['pullrequest'])]
 c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
-    'coverage'], hour=7, minute=00)]
+    'docker-coverage','registry-coverage'], hour=7, minute=00)]
 c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
     hour=range(0,24,4), minute=15)]
 
@@ -76,17 +76,25 @@ c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
 
 # Docker coverage test
 factory = BuildFactory()
-factory.addStep(ShellCommand(description='Coverage', logEnviron=False,
+factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
     usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
     DOCKER_CI_PATH)))
-c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
+c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
+    factory=factory)]
+
+# Docker registry coverage test
+factory = BuildFactory()
+factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
+    usePTY=True, command='docker run registry_coverage'.format(
+    DOCKER_CI_PATH)))
+c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
     factory=factory)]
 
 # Registry functional test
 factory = BuildFactory()
 factory.addStep(ShellCommand(description='registry', logEnviron=False,
     command='. {0}/master/credentials.cfg; '
-    '/docker-ci/functionaltests/test_registry.sh'.format(BUILDBOT_PATH),
+    '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
     usePTY=True))
 c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
     factory=factory)]
@@ -95,16 +103,17 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
 factory = BuildFactory()
 factory.addStep(ShellCommand(description='index', logEnviron=False,
     command='. {0}/master/credentials.cfg; '
-    '/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH),
+    '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
     usePTY=True))
 c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
     factory=factory)]
 
 # Docker nightly release
+nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
+    'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
 factory = BuildFactory()
-factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False,
-    usePTY=True, command='docker run -privileged'
-    ' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
+factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
+    usePTY=True, command=nightlyrelease_cmd))
 c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
     factory=factory)]
 

+ 68 - 53
hack/infrastructure/docker-ci/deployment.py

@@ -1,11 +1,11 @@
 #!/usr/bin/env python
 
-import os, sys, re, json, base64
-from boto.ec2.connection import EC2Connection
+import os, sys, re, json, requests, base64
 from subprocess import call
 from fabric import api
 from fabric.api import cd, run, put, sudo
 from os import environ as env
+from datetime import datetime
 from time import sleep
 
 # Remove SSH private key as it needs more processing
@@ -20,42 +20,41 @@ for key in CONFIG:
 env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
     env['CONFIG_JSON'],flags=re.DOTALL)
 
-
-AWS_TAG = env.get('AWS_TAG','docker-ci')
-AWS_KEY_NAME = 'dotcloud-dev'       # Same as CONFIG_JSON['DOCKER_CI_PUB']
-AWS_AMI = 'ami-d582d6bc'            # Ubuntu 13.04
-AWS_REGION = 'us-east-1'
-AWS_TYPE = 'm1.small'
-AWS_SEC_GROUPS = 'gateway'
-AWS_IMAGE_USER = 'ubuntu'
+DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
+TIMEOUT = 120            # Seconds before timeout droplet creation
+IMAGE_ID = 1004145       # Docker on Ubuntu 13.04
+REGION_ID = 4            # New York 2
+SIZE_ID = 62             # memory 2GB
+DO_IMAGE_USER = 'root'   # Image user on Digital Ocean
+API_URL = 'https://api.digitalocean.com/'
 DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
 DOCKER_CI_PATH = '/docker-ci'
 CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
 
 
-class AWS_EC2:
-    '''Amazon EC2'''
-    def __init__(self, access_key, secret_key):
+class DigitalOcean():
+
+    def __init__(self, key, client):
         '''Set default API parameters'''
-        self.handler = EC2Connection(access_key, secret_key)
-    def create_instance(self, tag, instance_type):
-        reservation = self.handler.run_instances(**instance_type)
-        instance = reservation.instances[0]
-        sleep(10)
-        while instance.state != 'running':
-            sleep(5)
-            instance.update()
-            print "Instance state: %s" % (instance.state)
-        instance.add_tag("Name",tag)
-        print "instance %s done!" % (instance.id)
-        return instance.ip_address
-    def get_instances(self):
-        return self.handler.get_all_instances()
-    def get_tags(self):
-        return dict([(i.instances[0].id, i.instances[0].tags['Name'])
-            for i in self.handler.get_all_instances() if i.instances[0].tags])
-    def del_instance(self, instance_id):
-        self.handler.terminate_instances(instance_ids=[instance_id])
+        self.key = key
+        self.client = client
+        self.api_url = API_URL
+
+    def api(self, cmd_path, api_arg={}):
+        '''Make api call'''
+        api_arg.update({'api_key':self.key, 'client_id':self.client})
+        resp = requests.get(self.api_url + cmd_path, params=api_arg).text
+        resp = json.loads(resp)
+        if resp['status'] != 'OK':
+            raise Exception(resp['error_message'])
+        return resp
+
+    def droplet_data(self, name):
+        '''Get droplet data'''
+        data = self.api('droplets')
+        data = [droplet for droplet in data['droplets']
+            if droplet['name'] == name]
+        return data[0] if data else {}
 
 
 def json_fmt(data):
@@ -63,20 +62,36 @@ def json_fmt(data):
     return json.dumps(data, sort_keys = True, indent = 2)
 
 
-# Create EC2 API handler
-ec2 = AWS_EC2(env['AWS_ACCESS_KEY'], env['AWS_SECRET_KEY'])
+do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
+
+# Get DROPLET_NAME data
+data = do.droplet_data(DROPLET_NAME)
 
-# Stop processing if AWS_TAG exists on EC2
-if AWS_TAG in ec2.get_tags().values():
-    print ('Instance: {} already deployed. Not further processing.'
-        .format(AWS_TAG))
+# Stop processing if DROPLET_NAME exists on Digital Ocean
+if data:
+    print ('Droplet: {} already deployed. Not further processing.'
+        .format(DROPLET_NAME))
     exit(1)
 
-ip = ec2.create_instance(AWS_TAG, {'image_id':AWS_AMI, 'instance_type':AWS_TYPE,
-    'security_groups':[AWS_SEC_GROUPS], 'key_name':AWS_KEY_NAME})
+# Create droplet
+do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
+    'image_id':IMAGE_ID, 'size_id':SIZE_ID,
+    'ssh_key_ids':[env['DOCKER_KEY_ID']]})
 
-# Wait 30 seconds for the machine to boot
-sleep(30)
+# Wait for droplet to be created.
+start_time = datetime.now()
+while (data.get('status','') != 'active' and (
+ datetime.now()-start_time).seconds < TIMEOUT):
+    data = do.droplet_data(DROPLET_NAME)
+    print data['status']
+    sleep(3)
+
+# Wait for the machine to boot
+sleep(15)
+
+# Get droplet IP
+ip = str(data['ip_address'])
+print 'droplet: {}    ip: {}'.format(DROPLET_NAME, ip)
 
 # Create docker-ci ssh private key so docker-ci docker container can communicate
 # with its EC2 instance
@@ -86,7 +101,7 @@ os.chmod('/root/.ssh/id_rsa',0600)
 open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
 
 api.env.host_string = ip
-api.env.user = AWS_IMAGE_USER
+api.env.user = DO_IMAGE_USER
 api.env.key_filename = '/root/.ssh/id_rsa'
 
 # Correct timezone
@@ -100,20 +115,17 @@ sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
 credentials = {
     'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
     'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
-    'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE'],
-    'INDEX_AUTH': env['INDEX_AUTH']}
+    'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
 open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
     base64.b64encode(json.dumps(credentials)))
 
 # Transfer docker
 sudo('mkdir -p ' + DOCKER_CI_PATH)
-sudo('chown {}.{} {}'.format(AWS_IMAGE_USER, AWS_IMAGE_USER, DOCKER_CI_PATH))
-call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, AWS_IMAGE_USER, ip,
+sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
+call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
     os.path.dirname(DOCKER_CI_PATH)), shell=True)
 
 # Install Docker and Buildbot dependencies
-sudo('addgroup docker')
-sudo('usermod -a -G docker ubuntu')
 sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
 sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
 sudo('echo deb https://get.docker.io/ubuntu docker main >'
@@ -123,7 +135,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
     ' > /etc/apt/sources.list; apt-get update')
 sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
     ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
-    ' aufs-tools make libfontconfig libevent-dev')
+    ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
 sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
     'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
 sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
@@ -135,13 +147,13 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
     'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
     ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
 
-# Preventively reboot docker-ci daily
-sudo('ln -s /sbin/reboot /etc/cron.daily')
-
 # Build docker-ci containers
 sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
+sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
 sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
     DOCKER_CI_PATH))
+sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
+    DOCKER_CI_PATH))
 
 # Download docker-ci testing container
 sudo('docker pull mzdaniel/test_docker')
@@ -154,3 +166,6 @@ sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
     env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
     env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
     env['REGISTRY_SECRET_KEY']))
+
+# Preventively reboot docker-ci daily
+sudo('ln -s /sbin/reboot /etc/cron.daily')

+ 6 - 11
hack/infrastructure/docker-ci/docker-test/Dockerfile

@@ -1,6 +1,6 @@
-# VERSION:        0.3
-# DOCKER-VERSION  0.6.3
-# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# VERSION:        0.4
+# DOCKER-VERSION  0.6.6
+# AUTHOR:         Daniel Mizyrycki <daniel@docker.com>
 # DESCRIPTION:    Testing docker PRs and commits on top of master using
 # REFERENCES:     This code reuses the excellent implementation of
 #                 Docker in Docker made by Jerome Petazzoni.
@@ -15,15 +15,10 @@
 # TO_RUN:         docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
 
 from docker
-maintainer Daniel Mizyrycki <daniel@dotcloud.com>
+maintainer Daniel Mizyrycki <daniel@docker.com>
 
-# Setup go environment. Extracted from /Dockerfile
-env     CGO_ENABLED 0
-env     GOROOT  /goroot
-env     PATH    $PATH:/goroot/bin
-env     GOPATH  /go:/go/src/github.com/dotcloud/docker/vendor
-volume  /var/lib/docker
-workdir /go/src/github.com/dotcloud/docker
+# Setup go in PATH. Extracted from /Dockerfile
+env PATH /usr/local/go/bin:$PATH
 
 # Add test_docker.sh
 add test_docker.sh /usr/bin/test_docker.sh

+ 9 - 14
hack/infrastructure/docker-ci/docker-test/test_docker.sh

@@ -8,31 +8,26 @@ BRANCH=${3-master}
 # Compute test paths
 DOCKER_PATH=/go/src/github.com/dotcloud/docker
 
+# Timestamp
+echo
+date; echo
+
 # Fetch latest master
+cd /
 rm -rf /go
-mkdir -p $DOCKER_PATH
+git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
 cd $DOCKER_PATH
-git init .
-git fetch -q http://github.com/dotcloud/docker master
-git reset --hard FETCH_HEAD
 
 # Merge commit
-#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
-git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
-
-# Merge commit in top of master
 git fetch -q "$REPO" "$BRANCH"
-git merge --no-edit $COMMIT || exit 1
+git merge --no-edit $COMMIT || exit 255
 
 # Test commit
-go test -v; exit_status=$?
+./hack/make.sh test; exit_status=$?
 
 # Display load if test fails
-if [ $exit_status -eq 1 ] ; then
+if [ $exit_status -ne 0 ] ; then
     uptime; echo; free
 fi
 
-# Cleanup testing directory
-rm -rf $BASE_PATH
-
 exit $exit_status

+ 2 - 1
hack/infrastructure/docker-ci/functionaltests/test_registry.sh

@@ -8,10 +8,12 @@ rm -rf docker-registry
 # Setup the environment
 export SETTINGS_FLAVOR=test
 export DOCKER_REGISTRY_CONFIG=config_test.yml
+export PYTHONPATH=$(pwd)/docker-registry/test
 
 # Get latest docker registry
 git clone -q https://github.com/dotcloud/docker-registry.git
 cd docker-registry
+sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
 
 # Get dependencies
 pip install -q -r requirements.txt
@@ -20,7 +22,6 @@ pip install -q tox
 
 # Run registry tests
 tox || exit 1
-export PYTHONPATH=$(pwd)/docker-registry
 python -m unittest discover -p s3.py -s test || exit 1
 python -m unittest discover -p workflow.py -s test
 

+ 9 - 13
hack/infrastructure/docker-ci/nightlyrelease/Dockerfile

@@ -1,20 +1,19 @@
-# VERSION:        1.2
-# DOCKER-VERSION  0.6.3
-# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# VERSION:        1.6
+# DOCKER-VERSION  0.6.6
+# AUTHOR:         Daniel Mizyrycki <daniel@docker.com>
 # DESCRIPTION:    Build docker nightly release using Docker in Docker.
 # REFERENCES:     This code reuses the excellent implementation of docker in docker
 #                 made by Jerome Petazzoni.  https://github.com/jpetazzo/dind
 # COMMENTS:
 #   release_credentials.json is a base64 json encoded file containing:
 #       { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
-#         "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key'
-#         "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
-#         "INDEX_AUTH='Encripted_index_authentication' }
+#         "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
+#         "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
 # TO_BUILD:       docker build -t dockerbuilder .
-# TO_RELEASE:     docker run -i -t -privileged  -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
+# TO_RELEASE:     docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
 
 from docker
-maintainer Daniel Mizyrycki <daniel@dotcloud.com>
+maintainer Daniel Mizyrycki <daniel@docker.com>
 
 # Add docker dependencies and downloading packages
 run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
@@ -24,11 +23,8 @@ run apt-get update; apt-get install -y -q wget python2.7
 run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
 
 # Add proto docker builder
-add ./dockerbuild /usr/bin/dockerbuild
-run chmod +x /usr/bin/dockerbuild
+add ./dockerbuild.sh /usr/bin/dockerbuild.sh
+run chmod +x /usr/bin/dockerbuild.sh
 
 # Add release credentials
 add ./release_credentials.json /root/release_credentials.json
-
-# Launch build process in a container
-cmd dockerbuild

+ 0 - 50
hack/infrastructure/docker-ci/nightlyrelease/dockerbuild

@@ -1,50 +0,0 @@
-#!/bin/bash
-
-# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
-# are decoded from /root/release_credentials.json
-# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
-
-# Enable debugging
-set -x
-
-# Fetch docker master branch
-rm -rf  /go/src/github.com/dotcloud/docker
-cd /
-git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
-cd /go/src/github.com/dotcloud/docker
-
-# Launch docker daemon using dind inside the container
-./hack/dind /usr/bin/docker -d &
-sleep 5
-
-# Add an uncommitted change to generate a timestamped release
-date > timestamp
-
-# Build the docker package using /Dockerfile
-docker build -t docker .
-
-# Run Docker unittests binary and Ubuntu package
-docker run -privileged docker hack/make.sh
-exit_status=$?
-
-# Display load if test fails
-if [ $exit_status -eq 1 ] ; then
-    uptime; echo; free
-    exit 1
-fi
-
-# Commit binary and ubuntu bundles for release
-docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
-
-# Turn debug off to load credentials from the environment
-set +x
-eval $(cat /root/release_credentials.json  | python -c '
-import sys,json,base64;
-d=json.loads(base64.b64decode(sys.stdin.read()));
-exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
-set -x
-
-# Push docker nightly
-echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX release  hack/release.sh
-set +x
-docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE  release  hack/release.sh

+ 40 - 0
hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh

@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
+# from /root/release_credentials.json
+# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
+
+# Turn debug off to load credentials from the environment
+set +x
+eval $(cat /root/release_credentials.json  | python -c '
+import sys,json,base64;
+d=json.loads(base64.b64decode(sys.stdin.read()));
+exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
+
+# Fetch docker master branch
+set -x
+cd /
+rm -rf /go
+git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
+cd /go/src/github.com/dotcloud/docker
+
+# Launch docker daemon using dind inside the container
+/usr/bin/docker version
+/usr/bin/docker -d &
+sleep 5
+
+# Build Docker release container
+docker build -t docker .
+
+# Test docker and if everything works well, release
+echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
+set +x
+docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
+exit_status=$?
+
+# Display load if test fails
+set -x
+if [ $exit_status -ne 0 ] ; then
+    uptime; echo; free
+    exit 1
+fi

+ 0 - 1
hack/infrastructure/docker-ci/nightlyrelease/release_credentials.json

@@ -1 +0,0 @@
-eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=

+ 18 - 0
hack/infrastructure/docker-ci/registry-coverage/Dockerfile

@@ -0,0 +1,18 @@
+# VERSION:        0.1
+# DOCKER-VERSION  0.6.4
+# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# DESCRIPTION:    Docker registry coverage
+# COMMENTS:       Add registry coverage into the docker-ci image
+# TO_BUILD:       docker build -t registry_coverage .
+# TO_RUN:         docker run registry_coverage
+
+from docker-ci
+maintainer Daniel Mizyrycki <daniel@dotcloud.com>
+
+# Add registry_coverager.sh and dependencies
+run pip install coverage flask pyyaml requests simplejson python-glanceclient \
+    blinker redis boto gevent rsa mock
+add registry_coverage.sh /usr/bin/registry_coverage.sh
+run chmod +x /usr/bin/registry_coverage.sh
+
+cmd "/usr/bin/registry_coverage.sh"

+ 18 - 0
hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh

@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -x
+
+# Setup the environment
+REGISTRY_PATH=/data/docker-registry
+export SETTINGS_FLAVOR=test
+export DOCKER_REGISTRY_CONFIG=config_test.yml
+export PYTHONPATH=$REGISTRY_PATH/test
+
+# Fetch latest docker-registry master
+rm -rf $REGISTRY_PATH
+git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
+cd $REGISTRY_PATH
+
+# Generate coverage
+coverage run -m unittest discover test || exit 1
+coverage report --include='./*' --omit='./test/*'

+ 1 - 1
hack/infrastructure/docker-ci/report/deployment.py

@@ -34,7 +34,7 @@ env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
 
 DROPLET_NAME = env.get('DROPLET_NAME','report')
 TIMEOUT = 120            # Seconds before timeout droplet creation
-IMAGE_ID = 894856        # Docker on Ubuntu 13.04
+IMAGE_ID = 1004145       # Docker on Ubuntu 13.04
 REGION_ID = 4            # New York 2
 SIZE_ID = 66             # memory 512MB
 DO_IMAGE_USER = 'root'   # Image user on Digital Ocean

+ 1 - 1
namesgenerator/names-generator.go

@@ -12,7 +12,7 @@ type NameChecker interface {
 
 var (
 	colors  = [...]string{"white", "silver", "gray", "black", "blue", "green", "cyan", "yellow", "gold", "orange", "brown", "red", "violet", "pink", "magenta", "purple", "maroon", "crimson", "plum", "fuchsia", "lavender", "slate", "navy", "azure", "aqua", "olive", "teal", "lime", "beige", "tan", "sienna"}
-  animals = [...]string{"ant", "bear", "bird", "cat", "chicken", "cow", "deer", "dog", "donkey", "duck", "fish", "fox", "frog", "horse", "kangaroo", "koala", "lemur", "lion", "lizard", "monkey", "octopus", "pig", "shark", "sheep", "sloth", "spider", "squirrel", "tiger", "toad", "weasel", "whale", "wolf"}
+	animals = [...]string{"ant", "bear", "bird", "cat", "chicken", "cow", "deer", "dog", "donkey", "duck", "fish", "fox", "frog", "horse", "kangaroo", "koala", "lemur", "lion", "lizard", "monkey", "octopus", "pig", "shark", "sheep", "sloth", "spider", "squirrel", "tiger", "toad", "weasel", "whale", "wolf"}
 )
 
 func GenerateRandomName(checker NameChecker) (string, error) {

+ 0 - 2
netlink/netlink_darwin.go

@@ -9,7 +9,6 @@ func NetworkGetRoutes() ([]*net.IPNet, error) {
 	return nil, fmt.Errorf("Not implemented")
 }
 
-
 func NetworkLinkAdd(name string, linkType string) error {
 	return fmt.Errorf("Not implemented")
 }
@@ -18,7 +17,6 @@ func NetworkLinkUp(iface *net.Interface) error {
 	return fmt.Errorf("Not implemented")
 }
 
-
 func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
 	return fmt.Errorf("Not implemented")
 }

+ 2 - 3
runtime.go

@@ -286,13 +286,12 @@ func (runtime *Runtime) restore() error {
 	// Any containers that are left over do not exist in the graph
 	for _, container := range containers {
 		// Try to set the default name for a container if it exists prior to links
-		name, err := generateRandomName(runtime)
+		container.Name, err = generateRandomName(runtime)
 		if err != nil {
 			container.Name = container.ShortID()
 		}
-		container.Name = name
 
-		if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
+		if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
 			utils.Debugf("Setting default id - %s", err)
 		}
 		register(container)

+ 9 - 0
utils.go

@@ -89,6 +89,15 @@ func MergeConfig(userConf, imageConf *Config) error {
 	}
 	if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 {
 		userConf.ExposedPorts = imageConf.ExposedPorts
+	} else if imageConf.ExposedPorts != nil {
+		if userConf.ExposedPorts == nil {
+			userConf.ExposedPorts = make(map[Port]struct{})
+		}
+		for port := range imageConf.ExposedPorts {
+			if _, exists := userConf.ExposedPorts[port]; !exists {
+				userConf.ExposedPorts[port] = struct{}{}
+			}
+		}
 	}
 
 	if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 {

+ 1 - 1
utils/daemon.go

@@ -1,10 +1,10 @@
 package utils
 
 import (
-	"os"
 	"fmt"
 	"io/ioutil"
 	"log"
+	"os"
 	"strconv"
 )
 

+ 1 - 1
utils/random.go

@@ -1,9 +1,9 @@
 package utils
 
 import (
-	"io"
 	"crypto/rand"
 	"encoding/hex"
+	"io"
 )
 
 func RandomString() string {

+ 2 - 2
utils/utils.go

@@ -15,8 +15,8 @@ import (
 	"os"
 	"os/exec"
 	"path/filepath"
-	"runtime"
 	"regexp"
+	"runtime"
 	"strconv"
 	"strings"
 	"sync"
@@ -904,7 +904,7 @@ func StripComments(input []byte, commentMarker []byte) []byte {
 	return output
 }
 
-// GetNameserversAsCIDR returns nameservers (if any) listed in 
+// GetNameserversAsCIDR returns nameservers (if any) listed in
 // /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
 // This function's output is intended for net.ParseCIDR
 func GetNameserversAsCIDR(resolvConf []byte) []string {

+ 13 - 13
utils/utils_test.go

@@ -453,20 +453,20 @@ search example.com`: {"1.2.3.4/32", "4.3.2.1/32"},
 		`search example.com`: {},
 		`nameserver 1.2.3.4
 search example.com
-nameserver 4.3.2.1`: []string{"1.2.3.4/32", "4.3.2.1/32"},
-    ``: []string{},
-    `  nameserver 1.2.3.4   `: []string{"1.2.3.4/32"},
-    `search example.com
+nameserver 4.3.2.1`: {"1.2.3.4/32", "4.3.2.1/32"},
+		``: {},
+		`  nameserver 1.2.3.4   `: {"1.2.3.4/32"},
+		`search example.com
 nameserver 1.2.3.4
-#nameserver 4.3.2.1`: []string{"1.2.3.4/32"},
-    `search example.com
-nameserver 1.2.3.4 # not 4.3.2.1`: []string{"1.2.3.4/32"},
-    } {
-        test := GetNameserversAsCIDR([]byte(resolv))
-        if !StrSlicesEqual(test, result) {
-            t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
-        }
-    }
+#nameserver 4.3.2.1`: {"1.2.3.4/32"},
+		`search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
+	} {
+		test := GetNameserversAsCIDR([]byte(resolv))
+		if !StrSlicesEqual(test, result) {
+			t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+		}
+	}
 }
 
 func StrSlicesEqual(a, b []string) bool {

+ 27 - 3
utils_test.go

@@ -67,7 +67,7 @@ func newTestRuntime(prefix string) (runtime *Runtime, err error) {
 	}
 
 	config := &DaemonConfig{
-		Root:   root,
+		Root:        root,
 		AutoRestart: false,
 	}
 	runtime, err = NewRuntimeFromDirectory(config)
@@ -247,7 +247,9 @@ func TestMergeConfig(t *testing.T) {
 		Volumes:   volumesUser,
 	}
 
-	MergeConfig(configUser, configImage)
+	if err := MergeConfig(configUser, configImage); err != nil {
+		t.Error(err)
+	}
 
 	if len(configUser.Dns) != 3 {
 		t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
@@ -259,7 +261,7 @@ func TestMergeConfig(t *testing.T) {
 	}
 
 	if len(configUser.ExposedPorts) != 3 {
-		t.Fatalf("Expected 3 portSpecs, 1111, 2222 and 3333, found %d", len(configUser.PortSpecs))
+		t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
 	}
 	for portSpecs := range configUser.ExposedPorts {
 		if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
@@ -287,6 +289,28 @@ func TestMergeConfig(t *testing.T) {
 	if configUser.VolumesFrom != "1111" {
 		t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
 	}
+
+	ports, _, err := parsePortSpecs([]string{"0000"})
+	if err != nil {
+		t.Error(err)
+	}
+	configImage2 := &Config{
+		ExposedPorts: ports,
+	}
+
+	if err := MergeConfig(configUser, configImage2); err != nil {
+		t.Error(err)
+	}
+
+	if len(configUser.ExposedPorts) != 4 {
+		t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
+	}
+	for portSpecs := range configUser.ExposedPorts {
+		if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
+			t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
+		}
+	}
+
 }
 
 func TestParseLxcConfOpt(t *testing.T) {