diff --git a/CHANGELOG.md b/CHANGELOG.md index e016472406..a6a93dc97d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,50 @@ # Changelog +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgresSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + ## 0.8.0 (2014-02-04) #### Notable features since 0.7.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 93c1d11b1f..c4095641cb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,8 +7,10 @@ feels wrong or incomplete. ## Reporting Issues When reporting [issues](https://github.com/dotcloud/docker/issues) -on GitHub please include your host OS ( Ubuntu 12.04, Fedora 19, etc... ) -and the output of `docker version` along with the output of `docker info` if possible. +on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), +the output of `uname -a` and the output of `docker version` along with +the output of `docker info`. Please include the steps required to reproduce +the problem if possible and applicable. This information will help us review and fix your issue faster. ## Build Environment @@ -86,6 +88,8 @@ curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/maste Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. +Pull requests mustn't contain commits from other users or branches. + Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be sure to post a comment after pushing. The new commits will show up in the pull @@ -105,6 +109,18 @@ name and email address match your git configuration. The AUTHORS file is regenerated occasionally from the git commit history, so a mismatch may result in your changes being overwritten. +### Merge approval + +Docker maintainers use LGTM (looks good to me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects docs/ and registry/, it +needs an absolute majority from the maintainers of docs/ AND, separately, an +absolute majority of the maintainers of registry + +For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) + ### Sign your work The sign-off is a simple line at the end of the explanation for the @@ -113,7 +129,7 @@ pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: ``` -Docker Developer Grant and Certificate of Origin 1.1 +Docker Developer Certificate of Origin 1.1 By making a contribution to the Docker Project ("Project"), I represent and warrant that: @@ -163,7 +179,7 @@ If you have any questions, please refer to the FAQ in the [docs](http://docs.doc * Step 1: learn the component inside out * Step 2: make yourself useful by contributing code, bugfixes, support etc. * Step 3: volunteer on the irc channel (#docker@freenode) -* Step 4: propose yourself at a scheduled #docker-meeting +* Step 4: propose yourself at a scheduled docker meeting in #docker-dev Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! diff --git a/FIXME b/FIXME index 91c8d92835..4f27d36856 100644 --- a/FIXME +++ b/FIXME @@ -11,20 +11,14 @@ They are just like FIXME comments in the source code, except we're not sure wher to put them - so we put them here :) -* Merge Runtime, Server and Builder into Runtime * Run linter on codebase * Unify build commands and regular commands * Move source code into src/ subdir for clarity * docker build: on non-existent local path for ADD, don't show full absolute path on the host -* docker tag foo REPO:TAG * use size header for progress bar in pull * Clean up context upload in build!!! * Parallel pull -* Always generate a resolv.conf per container, to avoid changing resolv.conf under thne container's feet -* Save metadata with import/export (#1974) * Upgrade dockerd without stopping containers * Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^/ { print $3 }')`) * Simple command to clean up containers for disk space -* Caching after an ADD (#880) * Clean up the ProgressReader api, it's a PITA to use -* Use netlink instead of iproute2/iptables (#925) diff --git a/Makefile b/Makefile index 168707a80f..e124d1d7e6 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) -DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles "$(DOCKER_IMAGE)" +DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)" default: binary diff --git a/VERSION b/VERSION index a3df0a6959..6f4eebdf6f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.8.0 +0.8.1 diff --git a/api/api.go b/api/api.go index 741dc69085..8d9bae978f 100644 --- a/api/api.go +++ b/api/api.go @@ -10,6 +10,7 @@ import ( "fmt" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/listenbuffer" "github.com/dotcloud/docker/pkg/systemd" "github.com/dotcloud/docker/utils" "github.com/gorilla/mux" @@ -25,15 +26,28 @@ import ( "strconv" "strings" "syscall" + "time" ) +// FIXME: move code common to client and server to common.go const ( APIVERSION = 1.9 DEFAULTHTTPHOST = "127.0.0.1" - DEFAULTHTTPPORT = 4243 DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) +var ( + activationLock chan struct{} +) + +func ValidateHost(val string) (string, error) { + host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) + if err != nil { + return val, err + } + return host, nil +} + type HttpApiFunc func(eng *engine.Engine, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error func init() { @@ -99,6 +113,15 @@ func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { return v.Encode(w) } +func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { + w.Header().Set("Content-Type", "application/json") + if flush { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } else { + job.Stdout.Add(w) + } +} + func getBoolParam(value string) (bool, error) { if value == "" { return false, nil @@ -205,7 +228,7 @@ func getImagesJSON(eng *engine.Engine, version float64, w http.ResponseWriter, r job.Setenv("all", r.Form.Get("all")) if version >= 1.7 { - job.Stdout.Add(w) + streamJSON(job, w, false) } else if outs, err = job.Stdout.AddListTable(); err != nil { return err } @@ -222,13 +245,14 @@ func getImagesJSON(eng *engine.Engine, version float64, w http.ResponseWriter, r outLegacy := &engine.Env{} outLegacy.Set("Repository", parts[0]) outLegacy.Set("Tag", parts[1]) - outLegacy.Set("ID", out.Get("ID")) + outLegacy.Set("Id", out.Get("Id")) outLegacy.SetInt64("Created", out.GetInt64("Created")) outLegacy.SetInt64("Size", out.GetInt64("Size")) outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) outsLegacy.Add(outLegacy) } } + w.Header().Set("Content-Type", "application/json") if _, err := outsLegacy.WriteListTo(w); err != nil { return err } @@ -256,9 +280,8 @@ func getEvents(eng *engine.Engine, version float64, w http.ResponseWriter, r *ht return err } - w.Header().Set("Content-Type", "application/json") var job = eng.Job("events", r.RemoteAddr) - job.Stdout.Add(utils.NewWriteFlusher(w)) + streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) return job.Run() } @@ -269,7 +292,7 @@ func getImagesHistory(eng *engine.Engine, version float64, w http.ResponseWriter } var job = eng.Job("history", vars["name"]) - job.Stdout.Add(w) + streamJSON(job, w, false) if err := job.Run(); err != nil { return err @@ -282,7 +305,7 @@ func getContainersChanges(eng *engine.Engine, version float64, w http.ResponseWr return fmt.Errorf("Missing parameter") } var job = eng.Job("changes", vars["name"]) - job.Stdout.Add(w) + streamJSON(job, w, false) return job.Run() } @@ -299,7 +322,7 @@ func getContainersTop(eng *engine.Engine, version float64, w http.ResponseWriter } job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) - job.Stdout.Add(w) + streamJSON(job, w, false) return job.Run() } @@ -320,7 +343,7 @@ func getContainersJSON(eng *engine.Engine, version float64, w http.ResponseWrite job.Setenv("limit", r.Form.Get("limit")) if version >= 1.5 { - job.Stdout.Add(w) + streamJSON(job, w, false) } else if outs, err = job.Stdout.AddTable(); err != nil { return err } @@ -333,6 +356,7 @@ func getContainersJSON(eng *engine.Engine, version float64, w http.ResponseWrite ports.ReadListFrom([]byte(out.Get("Ports"))) out.Set("Ports", displayablePorts(ports)) } + w.Header().Set("Content-Type", "application/json") if _, err = outs.WriteListTo(w); err != nil { return err } @@ -366,7 +390,7 @@ func postCommit(eng *engine.Engine, version float64, w http.ResponseWriter, r *h env engine.Env job = eng.Job("commit", r.Form.Get("container")) ) - if err := config.Import(r.Body); err != nil { + if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) } @@ -425,8 +449,12 @@ func postImagesCreate(eng *engine.Engine, version float64, w http.ResponseWriter job.Stdin.Add(r.Body) } - job.SetenvBool("json", version > 1.0) - job.Stdout.Add(utils.NewWriteFlusher(w)) + if version > 1.0 { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err @@ -465,7 +493,7 @@ func getImagesSearch(eng *engine.Engine, version float64, w http.ResponseWriter, var job = eng.Job("search", r.Form.Get("term")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) - job.Stdout.Add(w) + streamJSON(job, w, false) return job.Run() } @@ -482,8 +510,12 @@ func postImagesInsert(eng *engine.Engine, version float64, w http.ResponseWriter } job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) - job.SetenvBool("json", version > 1.0) - job.Stdout.Add(w) + if version > 1.0 { + job.SetenvBool("json", true) + streamJSON(job, w, false) + } else { + job.Stdout.Add(w) + } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err @@ -532,8 +564,12 @@ func postImagesPush(eng *engine.Engine, version float64, w http.ResponseWriter, job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) - job.SetenvBool("json", version > 1.0) - job.Stdout.Add(utils.NewWriteFlusher(w)) + if version > 1.0 { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } if err := job.Run(); err != nil { if !job.Stdout.Used() { @@ -635,7 +671,7 @@ func deleteImages(eng *engine.Engine, version float64, w http.ResponseWriter, r return fmt.Errorf("Missing parameter") } var job = eng.Job("image_delete", vars["name"]) - job.Stdout.Add(w) + streamJSON(job, w, false) job.SetenvBool("autoPrune", version > 1.1) return job.Run() @@ -815,7 +851,7 @@ func getContainersByName(eng *engine.Engine, version float64, w http.ResponseWri return fmt.Errorf("Missing parameter") } var job = eng.Job("inspect", vars["name"], "container") - job.Stdout.Add(w) + streamJSON(job, w, false) job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -825,7 +861,7 @@ func getImagesByName(eng *engine.Engine, version float64, w http.ResponseWriter, return fmt.Errorf("Missing parameter") } var job = eng.Job("inspect", vars["name"], "image") - job.Stdout.Add(w) + streamJSON(job, w, false) job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } @@ -865,11 +901,11 @@ func postBuild(eng *engine.Engine, version float64, w http.ResponseWriter, r *ht } if version >= 1.8 { - w.Header().Set("Content-Type", "application/json") job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) } - - job.Stdout.Add(utils.NewWriteFlusher(w)) job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) @@ -910,9 +946,12 @@ func postContainersCopy(eng *engine.Engine, version float64, w http.ResponseWrit } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) - job.Stdout.Add(w) + streamJSON(job, w, false) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) + if strings.Contains(err.Error(), "No such container") { + w.WriteHeader(http.StatusNotFound) + } } return nil } @@ -1126,7 +1165,7 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors } } - l, err := net.Listen(proto, addr) + l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock, 15*time.Minute) if err != nil { return err } @@ -1168,8 +1207,15 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors // ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { - protoAddrs := job.Args - chErrors := make(chan error, len(protoAddrs)) + var ( + protoAddrs = job.Args + chErrors = make(chan error, len(protoAddrs)) + ) + activationLock = make(chan struct{}) + + if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { + return job.Error(err) + } for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) @@ -1186,8 +1232,15 @@ func ServeApi(job *engine.Job) engine.Status { } } + return engine.StatusOK +} + +func AcceptConnections(job *engine.Job) engine.Status { // Tell the init daemon we are accepting requests go systemd.SdNotify("READY=1") + // close the lock so the listeners start accepting connections + close(activationLock) + return engine.StatusOK } diff --git a/commands.go b/api/client.go similarity index 85% rename from commands.go rename to api/client.go index 15d4507030..81e337b023 100644 --- a/commands.go +++ b/api/client.go @@ -1,21 +1,21 @@ -package docker +package api import ( - "archive/tar" "bufio" "bytes" "encoding/base64" "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -29,7 +29,6 @@ import ( "reflect" "regexp" "runtime" - "sort" "strconv" "strings" "syscall" @@ -38,11 +37,6 @@ import ( "time" ) -var ( - GITCOMMIT string - VERSION string -) - var ( ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?") ) @@ -80,7 +74,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { return nil } } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) + help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build a container from a Dockerfile"}, @@ -139,35 +133,10 @@ func (cli *DockerCli) CmdInsert(args ...string) error { return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) } -// mkBuildContext returns an archive of an empty context with the contents -// of `dockerfile` at the path ./Dockerfile -func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) { - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - files = append(files, [2]string{"Dockerfile", dockerfile}) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") - suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build") if err := cmd.Parse(args); err != nil { @@ -191,7 +160,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if err != nil { return err } - context, err = MkBuildContext(string(dockerfile), nil) + context, err = archive.Generate("Dockerfile", string(dockerfile)) } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { isRemote = true } else { @@ -209,7 +178,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context") + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") } // Upload the build context v := &url.Values{} @@ -266,11 +235,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { } serverAddress := auth.IndexServerAddress() if len(cmd.Args()) > 0 { - serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0)) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress) + serverAddress = cmd.Arg(0) } promptDefault := func(prompt string, configDefault string) { @@ -392,12 +357,12 @@ func (cli *DockerCli) CmdVersion(args ...string) error { cmd.Usage() return nil } - if VERSION != "" { - fmt.Fprintf(cli.out, "Client version: %s\n", VERSION) + if dockerversion.VERSION != "" { + fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) - if GITCOMMIT != "" { - fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT) + if dockerversion.GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } body, _, err := readBody(cli.call("GET", "/version", nil, false)) @@ -422,7 +387,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { release := utils.GetReleaseVersion() if release != "" { fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { + if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { fmt.Fprintf(cli.out, ", please update docker") } fmt.Fprintf(cli.out, "\n") @@ -803,7 +768,7 @@ func (cli *DockerCli) CmdPort(args ...string) error { return err } - if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil { + if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) } @@ -1313,19 +1278,6 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri } } -func displayablePorts(ports *engine.Table) string { - result := []string{} - for _, port := range ports.Data { - if port.Get("IP") == "" { - result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) - } else { - result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) - } - } - sort.Strings(result) - return strings.Join(result, ", ") -} - func (cli *DockerCli) CmdPs(args ...string) error { cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") @@ -1455,11 +1407,11 @@ func (cli *DockerCli) CmdCommit(args ...string) error { v.Set("comment", *flComment) v.Set("author", *flAuthor) var ( - config *Config + config *runconfig.Config env engine.Env ) if *flConfig != "" { - config = &Config{} + config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } @@ -1620,7 +1572,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return err } - if !container.State.IsRunning() { + if !container.State.Running { return fmt.Errorf("Impossible to attach to a stopped container, start it first") } @@ -1749,210 +1701,9 @@ func (cli *DockerCli) CmdTag(args ...string) error { return nil } -//FIXME Only used in tests -func ParseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { - cmd := flag.NewFlagSet("run", flag.ContinueOnError) - cmd.SetOutput(ioutil.Discard) - cmd.Usage = nil - return parseRun(cmd, args, sysInfo) -} - -func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { - var ( - // FIXME: use utils.ListOpts for attach and volumes? - flAttach = NewListOpts(ValidateAttach) - flVolumes = NewListOpts(ValidatePath) - flLinks = NewListOpts(ValidateLink) - flEnv = NewListOpts(ValidateEnv) - - flPublish ListOpts - flExpose ListOpts - flDns ListOpts - flVolumesFrom ListOpts - flLxcOpts ListOpts - - flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") - flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") - flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") - flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") - flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") - flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") - flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") - flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") - flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") - flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - - // For documentation purpose - _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") - _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") - ) - - cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") - cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") - cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") - cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") - - cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat)) - cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") - cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") - cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") - - if err := cmd.Parse(args); err != nil { - return nil, nil, cmd, err - } - - // Check if the kernel supports memory limit cgroup. - if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { - *flMemoryString = "" - } - - // Validate input params - if *flDetach && flAttach.Len() > 0 { - return nil, nil, cmd, ErrConflictAttachDetach - } - if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { - return nil, nil, cmd, ErrInvalidWorikingDirectory - } - if *flDetach && *flAutoRemove { - return nil, nil, cmd, ErrConflictDetachAutoRemove - } - - // If neither -d or -a are set, attach to everything by default - if flAttach.Len() == 0 && !*flDetach { - if !*flDetach { - flAttach.Set("stdout") - flAttach.Set("stderr") - if *flStdin { - flAttach.Set("stdin") - } - } - } - - var flMemory int64 - if *flMemoryString != "" { - parsedMemory, err := utils.RAMInBytes(*flMemoryString) - if err != nil { - return nil, nil, cmd, err - } - flMemory = parsedMemory - } - - var binds []string - // add any bind targets to the list of container volumes - for bind := range flVolumes.GetMap() { - if arr := strings.Split(bind, ":"); len(arr) > 1 { - if arr[0] == "/" { - return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") - } - dstDir := arr[1] - flVolumes.Set(dstDir) - binds = append(binds, bind) - flVolumes.Delete(bind) - } else if bind == "/" { - return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") - } - } - - var ( - parsedArgs = cmd.Args() - runCmd []string - entrypoint []string - image string - ) - if len(parsedArgs) >= 1 { - image = cmd.Arg(0) - } - if len(parsedArgs) > 1 { - runCmd = parsedArgs[1:] - } - if *flEntrypoint != "" { - entrypoint = []string{*flEntrypoint} - } - - lxcConf, err := parseLxcConfOpts(flLxcOpts) - if err != nil { - return nil, nil, cmd, err - } - - var ( - domainname string - hostname = *flHostname - parts = strings.SplitN(hostname, ".", 2) - ) - if len(parts) > 1 { - hostname = parts[0] - domainname = parts[1] - } - - ports, portBindings, err := parsePortSpecs(flPublish.GetAll()) - if err != nil { - return nil, nil, cmd, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range flExpose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) - } - p := NewPort(splitProtoPort(e)) - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - - config := &Config{ - Hostname: hostname, - Domainname: domainname, - PortSpecs: nil, // Deprecated - ExposedPorts: ports, - User: *flUser, - Tty: *flTty, - NetworkDisabled: !*flNetwork, - OpenStdin: *flStdin, - Memory: flMemory, - CpuShares: *flCpuShares, - AttachStdin: flAttach.Get("stdin"), - AttachStdout: flAttach.Get("stdout"), - AttachStderr: flAttach.Get("stderr"), - Env: flEnv.GetAll(), - Cmd: runCmd, - Dns: flDns.GetAll(), - Image: image, - Volumes: flVolumes.GetMap(), - VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), - Entrypoint: entrypoint, - WorkingDir: *flWorkingDir, - } - - hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - LxcConf: lxcConf, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - } - - if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { - //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") - config.MemorySwap = -1 - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - return config, hostConfig, cmd, nil -} - func (cli *DockerCli) CmdRun(args ...string) error { - config, hostConfig, cmd, err := parseRun(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) + // FIXME: just use runconfig.Parse already + config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) if err != nil { return err } @@ -1995,12 +1746,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) //if image not found try to pull it if statusCode == 404 { - _, tag := utils.ParseRepositoryTag(config.Image) - if tag == "" { - tag = DEFAULTTAG - } - - fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag) + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) v := url.Values{} repos, tag := utils.ParseRepositoryTag(config.Image) @@ -2215,6 +1961,9 @@ func (cli *DockerCli) CmdCp(args ...string) error { if stream != nil { defer stream.Close() } + if statusCode == 404 { + return fmt.Errorf("No such container: %v", info[0]) + } if err != nil { return err } @@ -2283,7 +2032,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), params) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params) if err != nil { return nil, -1, err } @@ -2307,7 +2056,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } } } - req.Header.Set("User-Agent", "Docker-Client/"+VERSION) + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Host = cli.addr if data != nil { req.Header.Set("Content-Type", "application/json") @@ -2337,7 +2086,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b return nil, -1, err } if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) + return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for api route and version %s, check if the server supports the requested api version", http.StatusText(resp.StatusCode), req.URL) } return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } @@ -2360,11 +2109,11 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), in) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in) if err != nil { return err } - req.Header.Set("User-Agent", "Docker-Client/"+VERSION) + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Host = cli.addr if method == "POST" { req.Header.Set("Content-Type", "plain/text") @@ -2405,7 +2154,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } - if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { + if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) } if _, err := io.Copy(out, resp.Body); err != nil { @@ -2424,11 +2173,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", api.APIVERSION, path), nil) + req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil) if err != nil { return err } - req.Header.Set("User-Agent", "Docker-Client/"+VERSION) + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Header.Set("Content-Type", "plain/text") req.Host = cli.addr @@ -2607,7 +2356,7 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { if err := json.Unmarshal(body, c); err != nil { return false, -1, err } - return c.State.IsRunning(), c.State.GetExitCode(), nil + return c.State.Running, c.State.ExitCode, nil } func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { diff --git a/api/container.go b/api/container.go new file mode 100644 index 0000000000..4cc73b2252 --- /dev/null +++ b/api/container.go @@ -0,0 +1,18 @@ +package api + +import ( + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runconfig" +) + +type Container struct { + Config runconfig.Config + HostConfig runconfig.HostConfig + State struct { + Running bool + ExitCode int + } + NetworkSettings struct { + Ports nat.PortMap + } +} diff --git a/archive/archive.go b/archive/archive.go index b1400c2210..16c01993b7 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -1,10 +1,11 @@ package archive import ( - "archive/tar" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "compress/bzip2" "compress/gzip" + "errors" "fmt" "github.com/dotcloud/docker/utils" "io" @@ -17,14 +18,19 @@ import ( "syscall" ) -type Archive io.Reader +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarOptions struct { + Includes []string + Compression Compression + } +) -type Compression int - -type TarOptions struct { - Includes []string - Compression Compression -} +var ( + ErrNotImplemented = errors.New("Function not implemented") +) const ( Uncompressed Compression = iota @@ -60,13 +66,13 @@ func DetectCompression(source []byte) Compression { return Uncompressed } -func xzDecompress(archive io.Reader) (io.Reader, error) { +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return CmdStream(exec.Command(args[0], args[1:]...), archive) } -func DecompressStream(archive io.Reader) (io.Reader, error) { +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { buf := make([]byte, 10) totalN := 0 for totalN < 10 { @@ -85,11 +91,11 @@ func DecompressStream(archive io.Reader) (io.Reader, error) { switch compression { case Uncompressed: - return wrap, nil + return ioutil.NopCloser(wrap), nil case Gzip: return gzip.NewReader(wrap) case Bzip2: - return bzip2.NewReader(wrap), nil + return ioutil.NopCloser(bzip2.NewReader(wrap)), nil case Xz: return xzDecompress(wrap) default: @@ -101,7 +107,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose switch compression { case Uncompressed: - return dest, nil + return utils.NopWriteCloser(dest), nil case Gzip: return gzip.NewWriter(dest), nil case Bzip2, Xz: @@ -180,20 +186,25 @@ func addTarFile(path, name string, tw *tar.Writer) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, os.FileMode(hdr.Mode)); err != nil { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode)) + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -236,14 +247,14 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } - if err := syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { return err } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if hdr.Typeflag != tar.TypeSymlink { - if err := syscall.Chmod(path, uint32(hdr.Mode&07777)); err != nil { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } @@ -251,7 +262,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and if hdr.Typeflag != tar.TypeSymlink { - if err := syscall.UtimesNano(path, ts); err != nil { + if err := UtimesNano(path, ts); err != nil { return err } } else { @@ -264,7 +275,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. -func Tar(path string, compression Compression) (io.Reader, error) { +func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarFilter(path, &TarOptions{Compression: compression}) } @@ -286,7 +297,7 @@ func escapeName(name string) string { // Tar creates an archive from the directory at `path`, only including files whose relative // paths are included in `filter`. If `filter` is nil, then all files are included. -func TarFilter(srcPath string, options *TarOptions) (io.Reader, error) { +func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) @@ -332,6 +343,9 @@ func TarFilter(srcPath string, options *TarOptions) (io.Reader, error) { if err := compressWriter.Close(); err != nil { utils.Debugf("Can't close compress writer: %s\n", err) } + if err := pipeWriter.Close(); err != nil { + utils.Debugf("Can't close pipe writer: %s\n", err) + } }() return pipeReader, nil @@ -347,12 +361,13 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { return fmt.Errorf("Empty archive") } - archive, err := DecompressStream(archive) + decompressedArchive, err := DecompressStream(archive) if err != nil { return err } + defer decompressedArchive.Close() - tr := tar.NewReader(archive) + tr := tar.NewReader(decompressedArchive) var dirs []*tar.Header @@ -427,15 +442,19 @@ func TarUntar(src string, dst string) error { if err != nil { return err } + defer archive.Close() return Untar(archive, dst, nil) } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { - if archive, err := os.Open(src); err != nil { + archive, err := os.Open(src) + if err != nil { return err - } else if err := Untar(archive, dst, nil); err != nil { + } + defer archive.Close() + if err := Untar(archive, dst, nil); err != nil { return err } return nil @@ -523,7 +542,7 @@ func CopyFileWithTar(src, dst string) (err error) { // CmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader) (io.Reader, error) { +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { if input != nil { stdin, err := cmd.StdinPipe() if err != nil { diff --git a/archive/archive_test.go b/archive/archive_test.go index 891f977dcf..8badd58bd7 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -1,8 +1,8 @@ package archive import ( - "archive/tar" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "fmt" "io" "io/ioutil" @@ -67,12 +67,13 @@ func tarUntar(t *testing.T, origin string, compression Compression) error { if err != nil { t.Fatal(err) } + defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return err } - archive = io.MultiReader(bytes.NewReader(buf), archive) + wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) if detectedCompression.Extension() != compression.Extension() { @@ -84,7 +85,7 @@ func tarUntar(t *testing.T, origin string, compression Compression) error { return err } defer os.RemoveAll(tmp) - if err := Untar(archive, tmp, nil); err != nil { + if err := Untar(wrap, tmp, nil); err != nil { return err } if _, err := os.Stat(tmp); err != nil { diff --git a/archive/changes.go b/archive/changes.go index 25406f5cec..b46b13bbe7 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -1,7 +1,7 @@ package archive import ( - "archive/tar" + "code.google.com/p/go/src/pkg/archive/tar" "fmt" "github.com/dotcloud/docker/utils" "io" diff --git a/archive/diff.go b/archive/diff.go index cdf06dd055..6a778390bb 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -1,8 +1,10 @@ package archive import ( - "archive/tar" + "code.google.com/p/go/src/pkg/archive/tar" + "fmt" "io" + "io/ioutil" "os" "path/filepath" "strings" @@ -28,7 +30,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) { // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. -func ApplyLayer(dest string, layer Archive) error { +func ApplyLayer(dest string, layer ArchiveReader) error { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) @@ -42,6 +44,9 @@ func ApplyLayer(dest string, layer Archive) error { var dirs []*tar.Header + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -72,6 +77,22 @@ func ApplyLayer(dest string, layer Archive) error { // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil { + return err + } + } continue } @@ -96,7 +117,26 @@ func ApplyLayer(dest string, layer Archive) error { } } - if err := createTarFile(path, dest, hdr, tr); err != nil { + srcData := io.Reader(tr) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData); err != nil { return err } diff --git a/archive/stat_linux.go b/archive/stat_linux.go index 2f7a520ccd..f87a99c55a 100644 --- a/archive/stat_linux.go +++ b/archive/stat_linux.go @@ -30,3 +30,10 @@ func LUtimesNano(path string, ts []syscall.Timespec) error { return nil } + +func UtimesNano(path string, ts []syscall.Timespec) error { + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + return nil +} diff --git a/archive/stat_darwin.go b/archive/stat_unsupported.go similarity index 67% rename from archive/stat_darwin.go rename to archive/stat_unsupported.go index 32203299dd..50ca461867 100644 --- a/archive/stat_darwin.go +++ b/archive/stat_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux !amd64 +// +build !linux package archive @@ -13,5 +13,9 @@ func getLastModification(stat *syscall.Stat_t) syscall.Timespec { } func LUtimesNano(path string, ts []syscall.Timespec) error { - return nil + return ErrNotImplemented +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotImplemented } diff --git a/archive/wrap.go b/archive/wrap.go new file mode 100644 index 0000000000..981420b3fe --- /dev/null +++ b/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "bytes" + "code.google.com/p/go/src/pkg/archive/tar" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/auth/auth.go b/auth/auth.go index 770a6a0c0f..cbca81f3e3 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -151,12 +151,15 @@ func SaveConfig(configFile *ConfigFile) error { // try to register/login to the registry server func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { - client := &http.Client{} - reqStatusCode := 0 - var status string - var reqBody []byte + var ( + status string + reqBody []byte + err error + client = &http.Client{} + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) - serverAddress := authConfig.ServerAddress if serverAddress == "" { serverAddress = IndexServerAddress() } diff --git a/buildfile.go b/buildfile.go index 2a4b163bec..c7181b9146 100644 --- a/buildfile.go +++ b/buildfile.go @@ -9,6 +9,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -38,7 +39,7 @@ type buildFile struct { image string maintainer string - config *Config + config *runconfig.Config contextPath string context *utils.TarSum @@ -64,8 +65,11 @@ type buildFile struct { func (b *buildFile) clearTmp(containers map[string]struct{}) { for c := range containers { tmp := b.runtime.Get(c) - b.runtime.Destroy(tmp) - fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + if err := b.runtime.Destroy(tmp); err != nil { + fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + } else { + fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } } } @@ -101,7 +105,7 @@ func (b *buildFile) CmdFrom(name string) error { } } b.image = image.ID - b.config = &Config{} + b.config = &runconfig.Config{} if image.Config != nil { b.config = image.Config } @@ -158,14 +162,14 @@ func (b *buildFile) CmdRun(args string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to run") } - config, _, _, err := ParseRun(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) + config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) if err != nil { return err } cmd := b.config.Cmd b.config.Cmd = nil - MergeConfig(b.config, config) + runconfig.Merge(b.config, config) defer func(cmd []string) { b.config.Cmd = cmd }(cmd) @@ -179,11 +183,20 @@ func (b *buildFile) CmdRun(args string) error { return nil } - cid, err := b.run() + c, err := b.create() if err != nil { return err } - if err := b.commit(cid, cmd, "run"); err != nil { + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { return err } @@ -342,7 +355,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { return nil } -func (b *buildFile) addContext(container *Container, orig, dest string) error { +func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error { var ( origPath = path.Join(b.contextPath, orig) destPath = path.Join(container.BasefsPath(), dest) @@ -358,20 +371,39 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error { } return err } + if fi.IsDir() { if err := archive.CopyWithTar(origPath, destPath); err != nil { return err } - // First try to unpack the source as an archive - } else if err := archive.UntarPath(origPath, destPath); err != nil { + return nil + } + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // If we are adding a remote file, do not try to untar it + if !remote { + // try to successfully untar the orig + if err := archive.UntarPath(origPath, tarDest); err == nil { + return nil + } utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) - // If that fails, just copy it as a regular file - if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { - return err - } - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } + } + + // If that fails, just copy it as a regular file + // but do not use all the magic path handling for the tar path + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := archive.CopyWithTar(origPath, destPath); err != nil { + return err } return nil } @@ -399,14 +431,15 @@ func (b *buildFile) CmdAdd(args string) error { b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} b.config.Image = b.image - // FIXME: do we really need this? var ( origPath = orig destPath = dest remoteHash string + isRemote bool ) if utils.IsURL(orig) { + isRemote = true resp, err := utils.Download(orig) if err != nil { return err @@ -435,6 +468,7 @@ func (b *buildFile) CmdAdd(args string) error { } tarSum := utils.TarSum{Reader: r, DisableCompression: true} remoteHash = tarSum.Sum(nil) + r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(dest, "/") { @@ -515,7 +549,7 @@ func (b *buildFile) CmdAdd(args string) error { } defer container.Unmount() - if err := b.addContext(container, origPath, destPath); err != nil { + if err := b.addContext(container, origPath, destPath, isRemote); err != nil { return err } @@ -554,16 +588,16 @@ func (sf *StderrFormater) Write(buf []byte) (int, error) { return len(buf), err } -func (b *buildFile) run() (string, error) { +func (b *buildFile) create() (*Container, error) { if b.image == "" { - return "", fmt.Errorf("Please provide a source image with `from` prior to run") + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.config.Image = b.image // Create the container and start it c, _, err := b.runtime.Create(b.config, "") if err != nil { - return "", err + return nil, err } b.tmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) @@ -572,6 +606,10 @@ func (b *buildFile) run() (string, error) { c.Path = b.config.Cmd[0] c.Args = b.config.Cmd[1:] + return c, nil +} + +func (b *buildFile) run(c *Container) error { var errCh chan error if b.verbose { @@ -582,12 +620,12 @@ func (b *buildFile) run() (string, error) { //start the container if err := c.Start(); err != nil { - return "", err + return err } if errCh != nil { if err := <-errCh; err != nil { - return "", err + return err } } @@ -597,10 +635,10 @@ func (b *buildFile) run() (string, error) { Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), Code: ret, } - return "", err + return err } - return c.ID, nil + return nil } // Commit the container with the autorun command @@ -742,7 +780,7 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC return &buildFile{ runtime: srv.runtime, srv: srv, - config: &Config{}, + config: &runconfig.Config{}, outStream: outStream, errStream: errStream, tmpContainers: make(map[string]struct{}), diff --git a/commands_unit_test.go b/commands_unit_test.go index e44d9a1854..60d8d60398 100644 --- a/commands_unit_test.go +++ b/commands_unit_test.go @@ -1,16 +1,17 @@ package docker import ( + "github.com/dotcloud/docker/runconfig" "strings" "testing" ) -func parse(t *testing.T, args string) (*Config, *HostConfig, error) { - config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil) +func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) { + config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil) return config, hostConfig, err } -func mustParse(t *testing.T, args string) (*Config, *HostConfig) { +func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) { config, hostConfig, err := parse(t, args) if err != nil { t.Fatal(err) diff --git a/config.go b/config.go index fc04c9ff16..dc6e8b554f 100644 --- a/config.go +++ b/config.go @@ -39,6 +39,7 @@ func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { EnableIptables: job.GetenvBool("EnableIptables"), EnableIpForward: job.GetenvBool("EnableIpForward"), BridgeIP: job.Getenv("BridgeIP"), + BridgeIface: job.Getenv("BridgeIface"), DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), @@ -51,7 +52,7 @@ func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { } else { config.Mtu = GetDefaultNetworkMtu() } - config.DisableNetwork = job.Getenv("BridgeIface") == DisableNetworkBridge + config.DisableNetwork = config.BridgeIface == DisableNetworkBridge return config } diff --git a/container.go b/container.go index 81e8749d2a..9c7fc8ffd7 100644 --- a/container.go +++ b/container.go @@ -8,8 +8,10 @@ import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/links" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "github.com/kr/pty" "io" @@ -17,7 +19,6 @@ import ( "log" "os" "path" - "path/filepath" "strings" "sync" "syscall" @@ -25,8 +26,10 @@ import ( ) var ( - ErrNotATTY = errors.New("The PTY is not a file") - ErrNoTTY = errors.New("No PTY found") + ErrNotATTY = errors.New("The PTY is not a file") + ErrNoTTY = errors.New("No PTY found") + ErrContainerStart = errors.New("The container failed to start. Unknown error") + ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") ) type Container struct { @@ -41,7 +44,7 @@ type Container struct { Path string Args []string - Config *Config + Config *runconfig.Config State State Image string @@ -67,160 +70,12 @@ type Container struct { // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. // Easier than migrating older container configs :) VolumesRW map[string]bool - hostConfig *HostConfig + hostConfig *runconfig.HostConfig - activeLinks map[string]*Link -} - -// Note: the Config structure should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -type Config struct { - Hostname string - Domainname string - User string - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 // CPU shares (relative weight vs. other containers) - AttachStdin bool - AttachStdout bool - AttachStderr bool - PortSpecs []string // Deprecated - Can be in the format of 8080/tcp - ExposedPorts map[Port]struct{} - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string - Cmd []string - Dns []string - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} - VolumesFrom string - WorkingDir string - Entrypoint []string - NetworkDisabled bool - OnBuild []string -} - -func ContainerConfigFromJob(job *engine.Job) *Config { - config := &Config{ - Hostname: job.Getenv("Hostname"), - Domainname: job.Getenv("Domainname"), - User: job.Getenv("User"), - Memory: job.GetenvInt64("Memory"), - MemorySwap: job.GetenvInt64("MemorySwap"), - CpuShares: job.GetenvInt64("CpuShares"), - AttachStdin: job.GetenvBool("AttachStdin"), - AttachStdout: job.GetenvBool("AttachStdout"), - AttachStderr: job.GetenvBool("AttachStderr"), - Tty: job.GetenvBool("Tty"), - OpenStdin: job.GetenvBool("OpenStdin"), - StdinOnce: job.GetenvBool("StdinOnce"), - Image: job.Getenv("Image"), - VolumesFrom: job.Getenv("VolumesFrom"), - WorkingDir: job.Getenv("WorkingDir"), - NetworkDisabled: job.GetenvBool("NetworkDisabled"), - } - job.GetenvJson("ExposedPorts", &config.ExposedPorts) - job.GetenvJson("Volumes", &config.Volumes) - if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { - config.PortSpecs = PortSpecs - } - if Env := job.GetenvList("Env"); Env != nil { - config.Env = Env - } - if Cmd := job.GetenvList("Cmd"); Cmd != nil { - config.Cmd = Cmd - } - if Dns := job.GetenvList("Dns"); Dns != nil { - config.Dns = Dns - } - if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { - config.Entrypoint = Entrypoint - } - - return config -} - -type HostConfig struct { - Binds []string - ContainerIDFile string - LxcConf []KeyValuePair - Privileged bool - PortBindings map[Port][]PortBinding - Links []string - PublishAllPorts bool -} - -func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { - hostConfig := &HostConfig{ - ContainerIDFile: job.Getenv("ContainerIDFile"), - Privileged: job.GetenvBool("Privileged"), - PublishAllPorts: job.GetenvBool("PublishAllPorts"), - } - job.GetenvJson("LxcConf", &hostConfig.LxcConf) - job.GetenvJson("PortBindings", &hostConfig.PortBindings) - if Binds := job.GetenvList("Binds"); Binds != nil { - hostConfig.Binds = Binds - } - if Links := job.GetenvList("Links"); Links != nil { - hostConfig.Links = Links - } - - return hostConfig -} - -type BindMap struct { - SrcPath string - DstPath string - Mode string -} - -var ( - ErrContainerStart = errors.New("The container failed to start. Unknown error") - ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") - ErrInvalidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.") - ErrConflictAttachDetach = errors.New("Conflicting options: -a and -d") - ErrConflictDetachAutoRemove = errors.New("Conflicting options: -rm and -d") -) - -type KeyValuePair struct { - Key string - Value string -} - -type PortBinding struct { - HostIp string - HostPort string -} - -// 80/tcp -type Port string - -func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] -} - -func (p Port) Port() string { - return strings.Split(string(p), "/")[0] -} - -func (p Port) Int() int { - i, err := parsePort(p.Port()) - if err != nil { - panic(err) - } - return i -} - -func NewPort(proto, port string) Port { - return Port(fmt.Sprintf("%s/%s", port, proto)) + activeLinks map[string]*links.Link } +// FIXME: move deprecated port stuff to nat to clean up the core. type PortMapping map[string]string // Deprecated type NetworkSettings struct { @@ -229,13 +84,13 @@ type NetworkSettings struct { Gateway string Bridge string PortMapping map[string]PortMapping // Deprecated - Ports map[Port][]PortBinding + Ports nat.PortMap } func (settings *NetworkSettings) PortMappingAPI() *engine.Table { var outs = engine.NewTable("", 0) for port, bindings := range settings.Ports { - p, _ := parsePort(port.Port()) + p, _ := nat.ParsePort(port.Port()) if len(bindings) == 0 { out := &engine.Env{} out.SetInt("PublicPort", p) @@ -245,7 +100,7 @@ func (settings *NetworkSettings) PortMappingAPI() *engine.Table { } for _, binding := range bindings { out := &engine.Env{} - h, _ := parsePort(binding.HostPort) + h, _ := nat.ParsePort(binding.HostPort) out.SetInt("PrivatePort", p) out.SetInt("PublicPort", h) out.Set("Type", port.Proto()) @@ -322,7 +177,7 @@ func (container *Container) ToDisk() (err error) { } func (container *Container) readHostConfig() error { - container.hostConfig = &HostConfig{} + container.hostConfig = &runconfig.HostConfig{} // If the hostconfig file does not exist, do not read it. // (We still have to initialize container.hostConfig, // but that's OK, since we just did that above.) @@ -366,6 +221,7 @@ func (container *Container) setupPty() error { container.ptyMaster = ptyMaster container.command.Stdout = ptySlave container.command.Stderr = ptySlave + container.command.Console = ptySlave.Name() // Copy the PTYs to our broadcasters go func() { @@ -637,17 +493,7 @@ func (container *Container) Start() (err error) { log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") } - if container.Volumes == nil || len(container.Volumes) == 0 { - container.Volumes = make(map[string]string) - container.VolumesRW = make(map[string]bool) - } - - // Apply volumes from another container if requested - if err := container.applyExternalVolumes(); err != nil { - return err - } - - if err := container.createVolumes(); err != nil { + if err := prepareVolumesForContainer(container); err != nil { return err } @@ -671,7 +517,7 @@ func (container *Container) Start() (err error) { } if len(children) > 0 { - container.activeLinks = make(map[string]*Link, len(children)) + container.activeLinks = make(map[string]*links.Link, len(children)) // If we encounter an error make sure that we rollback any network // config and ip table changes @@ -682,8 +528,19 @@ func (container *Container) Start() (err error) { container.activeLinks = nil } - for p, child := range children { - link, err := NewLink(container, child, p, runtime.eng) + for linkAlias, child := range children { + if !child.State.IsRunning() { + return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + link, err := links.NewLink( + container.NetworkSettings.IPAddress, + child.NetworkSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + runtime.eng) + if err != nil { rollback() return err @@ -721,62 +578,10 @@ func (container *Container) Start() (err error) { return err } - // Setup the root fs as a bind mount of the base fs - root := container.RootfsPath() - if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { - return nil - } - - // Create a bind mount of the base fs as a place where we can add mounts - // without affecting the ability to access the base fs - if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { + if err := mountVolumesForContainer(container, envPath); err != nil { return err } - // Make sure the root fs is private so the mounts here don't propagate to basefs - if err := mount.ForceMount(root, root, "none", "private"); err != nil { - return err - } - - // Mount docker specific files into the containers root fs - if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(envPath, path.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.ResolvConfPath, path.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { - return err - } - - if container.HostnamePath != "" && container.HostsPath != "" { - if err := mount.Mount(container.HostnamePath, path.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.HostsPath, path.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { - return err - } - } - - // Mount user specified volumes - for r, v := range container.Volumes { - mountAs := "ro" - if container.VolumesRW[r] { - mountAs = "rw" - } - - r = path.Join(root, r) - if p, err := utils.FollowSymlinkInScope(r, root); err != nil { - return err - } else { - r = p - } - - if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { - return err - } - } - populateCommand(container) // Setup logging of stdout and stderr to disk @@ -829,205 +634,6 @@ func (container *Container) Start() (err error) { return nil } -func (container *Container) getBindMap() (map[string]BindMap, error) { - // Create the requested bind mounts - binds := make(map[string]BindMap) - // Define illegal container destinations - illegalDsts := []string{"/", "."} - - for _, bind := range container.hostConfig.Binds { - // FIXME: factorize bind parsing in parseBind - var src, dst, mode string - arr := strings.Split(bind, ":") - if len(arr) == 2 { - src = arr[0] - dst = arr[1] - mode = "rw" - } else if len(arr) == 3 { - src = arr[0] - dst = arr[1] - mode = arr[2] - } else { - return nil, fmt.Errorf("Invalid bind specification: %s", bind) - } - - // Bail if trying to mount to an illegal destination - for _, illegal := range illegalDsts { - if dst == illegal { - return nil, fmt.Errorf("Illegal bind destination: %s", dst) - } - } - - bindMap := BindMap{ - SrcPath: src, - DstPath: dst, - Mode: mode, - } - binds[path.Clean(dst)] = bindMap - } - return binds, nil -} - -func (container *Container) createVolumes() error { - binds, err := container.getBindMap() - if err != nil { - return err - } - volumesDriver := container.runtime.volumes.driver - // Create the requested volumes if they don't exist - for volPath := range container.Config.Volumes { - volPath = path.Clean(volPath) - volIsDir := true - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { - continue - } - var srcPath string - var isBindMount bool - srcRW := false - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - srcPath = bindMap.SrcPath - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - // Otherwise create an directory in $ROOT/volumes/ and use that - } else { - - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) - if err != nil { - return err - } - srcPath, err = volumesDriver.Get(c.ID) - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - srcRW = true // RW by default - } - - if p, err := filepath.EvalSymlinks(srcPath); err != nil { - return err - } else { - srcPath = p - } - - container.Volumes[volPath] = srcPath - container.VolumesRW[volPath] = srcRW - - // Create the mountpoint - volPath = path.Join(container.basefs, volPath) - rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) - if err != nil { - return err - } - - if _, err := os.Stat(rootVolPath); err != nil { - if os.IsNotExist(err) { - if volIsDir { - if err := os.MkdirAll(rootVolPath, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil { - return err - } - if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { - return err - } else { - f.Close() - } - } - } - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - volList, err := ioutil.ReadDir(rootVolPath) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(srcPath) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { - return err - } - - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - } - } - } - } - return nil -} - -func (container *Container) applyExternalVolumes() error { - if container.Config.VolumesFrom != "" { - containerSpecs := strings.Split(container.Config.VolumesFrom, ",") - for _, containerSpec := range containerSpecs { - mountRW := true - specParts := strings.SplitN(containerSpec, ":", 2) - switch len(specParts) { - case 0: - return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) - case 2: - switch specParts[1] { - case "ro": - mountRW = false - case "rw": // mountRW is already true - default: - return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) - } - } - c := container.runtime.Get(specParts[0]) - if c == nil { - return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) - } - for volPath, id := range c.Volumes { - if _, exists := container.Volumes[volPath]; exists { - continue - } - if err := os.MkdirAll(path.Join(container.basefs, volPath), 0755); err != nil { - return err - } - container.Volumes[volPath] = id - if isRW, exists := c.VolumesRW[volPath]; exists { - container.VolumesRW[volPath] = isRW && mountRW - } - } - - } - } - return nil -} - func (container *Container) Run() error { if err := container.Start(); err != nil { return err @@ -1152,8 +758,8 @@ func (container *Container) allocateNetwork() error { } var ( - portSpecs = make(map[Port]struct{}) - bindings = make(map[Port][]PortBinding) + portSpecs = make(nat.PortSet) + bindings = make(nat.PortMap) ) if !container.State.IsGhost() { @@ -1177,7 +783,7 @@ func (container *Container) allocateNetwork() error { for port := range portSpecs { binding := bindings[port] if container.hostConfig.PublishAllPorts && len(binding) == 0 { - binding = append(binding, PortBinding{}) + binding = append(binding, nat.PortBinding{}) } for i := 0; i < len(binding); i++ { @@ -1300,29 +906,7 @@ func (container *Container) cleanup() { } } - var ( - root = container.RootfsPath() - mounts = []string{ - root, - path.Join(root, "/.dockerinit"), - path.Join(root, "/.dockerenv"), - path.Join(root, "/etc/resolv.conf"), - } - ) - - if container.HostnamePath != "" && container.HostsPath != "" { - mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts")) - } - - for r := range container.Volumes { - mounts = append(mounts, path.Join(root, r)) - } - - for i := len(mounts) - 1; i >= 0; i-- { - if lastError := mount.Unmount(mounts[i]); lastError != nil { - log.Printf("Failed to umount %v: %v", mounts[i], lastError) - } - } + unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) @@ -1390,6 +974,13 @@ func (container *Container) Stop(seconds int) error { } func (container *Container) Restart(seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := container.Mount(); err == nil { + defer container.Unmount() + } + if err := container.Stop(seconds); err != nil { return err } @@ -1422,7 +1013,11 @@ func (container *Container) ExportRw() (archive.Archive, error) { container.Unmount() return nil, err } - return EofReader(archive, func() { container.Unmount() }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil } func (container *Container) Export() (archive.Archive, error) { @@ -1435,7 +1030,11 @@ func (container *Container) Export() (archive.Archive, error) { container.Unmount() return nil, err } - return EofReader(archive, func() { container.Unmount() }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil } func (container *Container) WaitTimeout(timeout time.Duration) error { @@ -1562,7 +1161,7 @@ func (container *Container) GetSize() (int64, int64) { return sizeRw, sizeRootfs } -func (container *Container) Copy(resource string) (archive.Archive, error) { +func (container *Container) Copy(resource string) (io.ReadCloser, error) { if err := container.Mount(); err != nil { return nil, err } @@ -1589,11 +1188,15 @@ func (container *Container) Copy(resource string) (archive.Archive, error) { if err != nil { return nil, err } - return EofReader(archive, func() { container.Unmount() }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil } // Returns true if the container exposes a certain port -func (container *Container) Exposes(p Port) bool { +func (container *Container) Exposes(p nat.Port) bool { _, exists := container.Config.ExposedPorts[p] return exists } diff --git a/container_unit_test.go b/container_unit_test.go index 679ff57e73..3877b7f0da 100644 --- a/container_unit_test.go +++ b/container_unit_test.go @@ -1,28 +1,12 @@ package docker import ( + "github.com/dotcloud/docker/nat" "testing" ) -func TestParseLxcConfOpt(t *testing.T) { - opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} - - for _, o := range opts { - k, v, err := parseLxcOpt(o) - if err != nil { - t.FailNow() - } - if k != "lxc.utsname" { - t.Fail() - } - if v != "docker" { - t.Fail() - } - } -} - func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"}) + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) if err != nil { t.Fatal(err) } @@ -64,7 +48,7 @@ func TestParseNetworkOptsPrivateOnly(t *testing.T) { } func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"}) + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) if err != nil { t.Fatal(err) } @@ -106,7 +90,7 @@ func TestParseNetworkOptsPublic(t *testing.T) { } func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"}) + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) if err != nil { t.Fatal(err) } diff --git a/contrib/desktop-integration/README.txt b/contrib/desktop-integration/README.md similarity index 71% rename from contrib/desktop-integration/README.txt rename to contrib/desktop-integration/README.md index 2f55c979e3..02181a5f75 100644 --- a/contrib/desktop-integration/README.txt +++ b/contrib/desktop-integration/README.md @@ -8,4 +8,4 @@ Examples ======== * Data container: ./data/Dockerfile creates a data image sharing /data volume -* Firefox: ./firefox/Dockerfile shows a way to dockerize a common multimedia application +* Iceweasel: ./iceweasel/Dockerfile shows a way to dockerize a common multimedia application diff --git a/contrib/desktop-integration/data/Dockerfile b/contrib/desktop-integration/data/Dockerfile index 453afdd3d6..a9843a52ad 100644 --- a/contrib/desktop-integration/data/Dockerfile +++ b/contrib/desktop-integration/data/Dockerfile @@ -11,28 +11,28 @@ # # Build data image # docker build -t data -rm . # -# # Create a data container. (eg: firefox-data) -# docker run -name firefox-data data true +# # Create a data container. (eg: iceweasel-data) +# docker run -name iceweasel-data data true # # # List data from it -# docker run -volumes-from firefox-data busybox ls -al /data +# docker run -volumes-from iceweasel-data busybox ls -al /data docker-version 0.6.5 # Smallest base image, just to launch a container -from busybox -maintainer Daniel Mizyrycki +FROM busybox +MAINTAINER Daniel Mizyrycki # Create a regular user -run echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd -run echo 'sysadmin:x:1000:' >> /etc/group +RUN echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd +RUN echo 'sysadmin:x:1000:' >> /etc/group # Create directory for that user -run mkdir /data -run chown sysadmin.sysadmin /data +RUN mkdir /data +RUN chown sysadmin.sysadmin /data # Add content to /data. This will keep sysadmin ownership -run touch /data/init_volume +RUN touch /data/init_volume # Create /data volume VOLUME /data diff --git a/contrib/desktop-integration/firefox/Dockerfile b/contrib/desktop-integration/firefox/Dockerfile deleted file mode 100644 index f8924f4b4a..0000000000 --- a/contrib/desktop-integration/firefox/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -# VERSION: 0.7 -# DESCRIPTION: Create firefox container with its dependencies -# AUTHOR: Daniel Mizyrycki -# COMMENTS: -# This file describes how to build a Firefox container with all -# dependencies installed. It uses native X11 unix socket and alsa -# sound devices. Tested on Debian 7.2 -# USAGE: -# # Download Firefox Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/firefox/Dockerfile -# -# # Build firefox image -# docker build -t firefox -rm . -# -# # Run stateful data-on-host firefox. For ephemeral, remove -v /data/firefox:/data -# docker run -v /data/firefox:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ -# -e DISPLAY=unix$DISPLAY firefox -# -# # To run stateful dockerized data containers -# docker run -volumes-from firefox-data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ -# -e DISPLAY=unix$DISPLAY firefox - -docker-version 0.6.5 - -# Base docker image -from tianon/debian:wheezy -maintainer Daniel Mizyrycki - -# Install firefox dependencies -run echo "deb http://ftp.debian.org/debian/ wheezy main contrib" > /etc/apt/sources.list -run apt-get update -run DEBIAN_FRONTEND=noninteractive apt-get install -y libXrender1 libasound2 \ - libdbus-glib-1-2 libgtk2.0-0 libpango1.0-0 libxt6 wget bzip2 sudo - -# Install Firefox -run mkdir /application -run cd /application; wget -O - \ - http://ftp.mozilla.org/pub/mozilla.org/firefox/releases/25.0/linux-x86_64/en-US/firefox-25.0.tar.bz2 | tar jx - -# create sysadmin account -run useradd -m -d /data -p saIVpsc0EVTwA sysadmin -run sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group -run sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers - -# Autorun firefox. -no-remote is necessary to create a new container, as firefox -# appears to communicate with itself through X11. -cmd ["/bin/sh", "-c", "/usr/bin/sudo -u sysadmin -H -E /application/firefox/firefox -no-remote"] diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile new file mode 100644 index 0000000000..721cc6d2cf --- /dev/null +++ b/contrib/desktop-integration/iceweasel/Dockerfile @@ -0,0 +1,41 @@ +# VERSION: 0.7 +# DESCRIPTION: Create iceweasel container with its dependencies +# AUTHOR: Daniel Mizyrycki +# COMMENTS: +# This file describes how to build a Iceweasel container with all +# dependencies installed. It uses native X11 unix socket and alsa +# sound devices. Tested on Debian 7.2 +# USAGE: +# # Download Iceweasel Dockerfile +# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile +# +# # Build iceweasel image +# docker build -t iceweasel -rm . +# +# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data +# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ +# -e DISPLAY=unix$DISPLAY iceweasel +# +# # To run stateful dockerized data containers +# docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ +# -e DISPLAY=unix$DISPLAY iceweasel + +docker-version 0.6.5 + +# Base docker image +FROM debian:wheezy +MAINTAINER Daniel Mizyrycki + +# Install Iceweasel and "sudo" +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo + +# create sysadmin account +RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin +RUN sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group +RUN sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers + +# Autorun iceweasel. -no-remote is necessary to create a new container, as +# iceweasel appears to communicate with itself through X11. +CMD ["/usr/bin/sudo", "-u", "sysadmin", "-H", "-E", "/usr/bin/iceweasel", "-no-remote"] diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000000..2b75c6903f --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,123 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.io +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.io + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.io +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +exec="/usr/bin/$prog" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + [ -x $exec ] || exit 5 + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + $exec -d $other_args &>> $logfile & + pid=$! + touch $lockfile + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000000..9c99dd1966 --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker -d + +other_args="" diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh index 54e99f1f04..d7ffdc1902 100755 --- a/contrib/mkimage-yum.sh +++ b/contrib/mkimage-yum.sh @@ -51,7 +51,7 @@ done yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall Core -yum -c "$yum_config" --installroot="$mount" -y clean all +yum -c "$yum_config" --installroot="$target" -y clean all cat > "$target"/etc/sysconfig/network < + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/contrib/syntax/textmate/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage similarity index 50% rename from contrib/syntax/textmate/Dockerfile.tmLanguage rename to contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage index fa8f38992e..13b586e5cb 100644 --- a/contrib/syntax/textmate/Dockerfile.tmLanguage +++ b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -12,15 +12,37 @@ match - ^\s*(FROM|MAINTAINER|RUN|CMD|EXPOSE|ENV|ADD)\s - name - keyword.control.dockerfile + ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR)\s + captures + + 0 + + name + keyword.control.dockerfile + + 1 + + name + keyword.other.special-method.dockerfile + + match - ^\s*(ENTRYPOINT|VOLUME|USER|WORKDIR)\s - name - keyword.operator.dockerfile + ^\s*(ONBUILD\s+)?(CMD|ENTRYPOINT)\s + captures + + 0 + + name + keyword.operator.dockerfile + + 1 + + name + keyword.other.special-method.dockerfile + + begin @@ -39,6 +61,23 @@ + + begin + ' + end + ' + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + match ^\s*#.*$ diff --git a/contrib/syntax/textmate/Docker.tmbundle/info.plist b/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000000..239f4b0a9b --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/contrib/syntax/textmate/Dockerfile.YAML-tmLanguage b/contrib/syntax/textmate/Dockerfile.YAML-tmLanguage deleted file mode 100644 index 2323c40558..0000000000 --- a/contrib/syntax/textmate/Dockerfile.YAML-tmLanguage +++ /dev/null @@ -1,23 +0,0 @@ -# [PackageDev] target_format: plist, ext: tmLanguage ---- -name: Dockerfile -scopeName: source.dockerfile -uuid: a39d8795-59d2-49af-aa00-fe74ee29576e - -patterns: -# Keywords -- name: keyword.control.dockerfile - match: ^\s*(FROM|MAINTAINER|RUN|CMD|EXPOSE|ENV|ADD)\s -- name: keyword.operator.dockerfile - match: ^\s*(ENTRYPOINT|VOLUME|USER|WORKDIR)\s -# String -- name: string.quoted.double.dockerfile - begin: "\"" - end: "\"" - patterns: - - name: constant.character.escaped.dockerfile - match: \\. -# Comment -- name: comment.block.dockerfile - match: ^\s*#.*$ -... \ No newline at end of file diff --git a/contrib/syntax/textmate/README.md b/contrib/syntax/textmate/README.md index 157b5c9da0..e78b76af45 100644 --- a/contrib/syntax/textmate/README.md +++ b/contrib/syntax/textmate/README.md @@ -1,9 +1,16 @@ -# Dockerfile.tmLanguage +# Docker.tmbundle -Pretty basic Dockerfile.tmLanguage for Sublime Text syntax highlighting. +Dockerfile syntaxt highlighting for TextMate and Sublime Text. -PR's with syntax updates, suggestions etc. are all very much appreciated! +## Install -I'll get to making this installable via Package Control soon! +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +Copy the directory `Docker.tmbundle` (showed as a Package in OSX) to `~/Library/Application Support/TextMate/Managed/Bundles` enjoy. diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim index fb5cab23cb..ec79ae7476 100644 --- a/contrib/syntax/vim/syntax/dockerfile.vim +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -11,8 +11,7 @@ let b:current_syntax = "dockerfile" syntax case ignore -syntax match dockerfileKeyword /\v^\s*(FROM|MAINTAINER|RUN|CMD|EXPOSE|ENV|ADD)\s/ -syntax match dockerfileKeyword /\v^\s*(ENTRYPOINT|VOLUME|USER|WORKDIR)\s/ +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ diff --git a/docker/docker.go b/docker/docker.go index d92f4d98ea..02c99b9316 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -6,19 +6,16 @@ import ( "os" "strings" - "github.com/dotcloud/docker" + _ "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/opts" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" ) -var ( - GITCOMMIT string - VERSION string -) - func main() { if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" { // Running in init mode @@ -36,13 +33,13 @@ func main() { pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flDns = docker.NewListOpts(docker.ValidateIp4Address) + flDns = opts.NewListOpts(opts.ValidateIp4Address) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") - flHosts = docker.NewListOpts(docker.ValidateHost) + flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") @@ -61,6 +58,9 @@ func main() { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } + if _, err := api.ValidateHost(defaultHost); err != nil { + log.Fatal(err) + } flHosts.Set(defaultHost) } @@ -71,8 +71,6 @@ func main() { if *flDebug { os.Setenv("DEBUG", "1") } - docker.GITCOMMIT = GITCOMMIT - docker.VERSION = VERSION if *flDaemon { if flag.NArg() != 0 { flag.Usage() @@ -83,28 +81,39 @@ func main() { if err != nil { log.Fatal(err) } - // Load plugin: httpapi - job := eng.Job("initserver") - job.Setenv("Pidfile", *pidfile) - job.Setenv("Root", *flRoot) - job.SetenvBool("AutoRestart", *flAutoRestart) - job.SetenvList("Dns", flDns.GetAll()) - job.SetenvBool("EnableIptables", *flEnableIptables) - job.SetenvBool("EnableIpForward", *flEnableIpForward) - job.Setenv("BridgeIface", *bridgeName) - job.Setenv("BridgeIP", *bridgeIp) - job.Setenv("DefaultIp", *flDefaultIp) - job.SetenvBool("InterContainerCommunication", *flInterContainerComm) - job.Setenv("GraphDriver", *flGraphDriver) - job.SetenvInt("Mtu", *flMtu) - if err := job.Run(); err != nil { - log.Fatal(err) - } + // load the daemon in the background so we can immediately start + // the http api so that connections don't fail while the daemon + // is booting + go func() { + // Load plugin: httpapi + job := eng.Job("initserver") + job.Setenv("Pidfile", *pidfile) + job.Setenv("Root", *flRoot) + job.SetenvBool("AutoRestart", *flAutoRestart) + job.SetenvList("Dns", flDns.GetAll()) + job.SetenvBool("EnableIptables", *flEnableIptables) + job.SetenvBool("EnableIpForward", *flEnableIpForward) + job.Setenv("BridgeIface", *bridgeName) + job.Setenv("BridgeIP", *bridgeIp) + job.Setenv("DefaultIp", *flDefaultIp) + job.SetenvBool("InterContainerCommunication", *flInterContainerComm) + job.Setenv("GraphDriver", *flGraphDriver) + job.SetenvInt("Mtu", *flMtu) + if err := job.Run(); err != nil { + log.Fatal(err) + } + // after the daemon is done setting up we can tell the api to start + // accepting connections + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatal(err) + } + }() + // Serve api - job = eng.Job("serveapi", flHosts.GetAll()...) + job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) - job.Setenv("Version", VERSION) + job.Setenv("Version", dockerversion.VERSION) if err := job.Run(); err != nil { log.Fatal(err) } @@ -113,7 +122,7 @@ func main() { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) - if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { + if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) @@ -126,5 +135,5 @@ func main() { } func showVersion() { - fmt.Printf("Docker version %s, build %s\n", VERSION, GITCOMMIT) + fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } diff --git a/dockerinit/dockerinit.go b/dockerinit/dockerinit.go index 0c363f4ac3..1d0689387a 100644 --- a/dockerinit/dockerinit.go +++ b/dockerinit/dockerinit.go @@ -4,11 +4,6 @@ import ( "github.com/dotcloud/docker/sysinit" ) -var ( - GITCOMMIT string - VERSION string -) - func main() { // Running in init mode sysinit.SysInit() diff --git a/dockerversion/dockerversion.go b/dockerversion/dockerversion.go new file mode 100644 index 0000000000..c130ac2810 --- /dev/null +++ b/dockerversion/dockerversion.go @@ -0,0 +1,15 @@ +package dockerversion + +// FIXME: this should be embedded in the docker/docker.go, +// but we can't because distro policy requires us to +// package a separate dockerinit binary, and that binary needs +// to know its version too. + +var ( + GITCOMMIT string + VERSION string + + IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary + INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary + INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) +) diff --git a/docs/sources/contributing/devenvironment.rst b/docs/sources/contributing/devenvironment.rst index 6675173584..cab4c71afb 100644 --- a/docs/sources/contributing/devenvironment.rst +++ b/docs/sources/contributing/devenvironment.rst @@ -24,7 +24,17 @@ a working, up-to-date docker installation, then continue to the next step. -Step 2: Check out the Source +Step 2: Install tools used for this tutorial +-------------------------------------------- + +Install ``git``; honest, it's very good. You can use other ways to get the Docker +source, but they're not anywhere near as easy. + +Install ``make``. This tutorial uses our base Makefile to kick off the docker +containers in a repeatable and consistent way. Again, you can do it in other ways +but you need to do more work. + +Step 3: Check out the Source ---------------------------- .. code-block:: bash @@ -35,7 +45,7 @@ Step 2: Check out the Source To checkout a different revision just use ``git checkout`` with the name of branch or revision number. -Step 3: Build the Environment +Step 4: Build the Environment ----------------------------- This following command will build a development environment using the Dockerfile in the current directory. Essentially, it will install all the build and runtime dependencies necessary to build and test Docker. This command will take some time to complete when you first execute it. @@ -48,7 +58,7 @@ If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment. -Step 4: Build the Docker Binary +Step 5: Build the Docker Binary ------------------------------- To create the Docker binary, run this command: diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile new file mode 100644 index 0000000000..af1423f258 --- /dev/null +++ b/docs/sources/examples/postgresql_service.Dockerfile @@ -0,0 +1,53 @@ +# +# example Dockerfile for http://docs.docker.io/en/latest/examples/postgresql_service/ +# + +FROM ubuntu +MAINTAINER SvenDowideit@docker.com + +# Add the PostgreSQL PGP key to verify their Debian packages. +# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + +# Add PostgreSQL's repository. It contains the most recent stable release +# of PostgreSQL, ``9.3``. +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list + +# Update the Ubuntu and PostgreSQL repository indexes +RUN apt-get update + +# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 +# There are some warnings (in red) that show up during the build. You can hide +# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive +RUN apt-get -y -q install python-software-properties software-properties-common +RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 + +# Note: The official Debian and Ubuntu images automatically ``apt-get clean`` +# after each ``apt-get`` + +# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` +USER postgres + +# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and +# then create a database `docker` owned by the ``docker`` role. +# Note: here we use ``&&\`` to run commands one after the other - the ``\`` +# allows the RUN command to span multiple lines. +RUN /etc/init.d/postgresql start &&\ + psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ + createdb -O docker docker + +# Adjust PostgreSQL configuration so that remote connections to the +# database are possible. +RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf + +# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` +RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf + +# Expose the PostgreSQL port +EXPOSE 5432 + +# Add VOLUMEs to allow backup of config, logs and databases +VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] + +# Set the default command to run when starting the container +CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst index 1c427563e7..5a2323471b 100644 --- a/docs/sources/examples/postgresql_service.rst +++ b/docs/sources/examples/postgresql_service.rst @@ -9,152 +9,109 @@ PostgreSQL Service .. include:: example_header.inc -.. note:: - - A shorter version of `this blog post`_. - -.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/ - Installing PostgreSQL on Docker ------------------------------- -Run an interactive shell in a Docker container. +Assuming there is no Docker image that suits your needs in `the index`_, you +can create one yourself. -.. code-block:: bash +.. _the index: http://index.docker.io - sudo docker run -i -t ubuntu /bin/bash - -Update its dependencies. - -.. code-block:: bash - - apt-get update - -Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``. - -.. code-block:: bash - - apt-get -y install python-software-properties software-properties-common wget vim - -Add PostgreSQL's repository. It contains the most recent stable release -of PostgreSQL, ``9.3``. - -.. code-block:: bash - - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - - echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list - apt-get update - -Finally, install PostgreSQL 9.3 - -.. code-block:: bash - - apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 - -Now, create a PostgreSQL superuser role that can create databases and -other roles. Following Vagrant's convention the role will be named -``docker`` with ``docker`` password assigned to it. - -.. code-block:: bash - - su postgres -c "createuser -P -d -r -s docker" - -Create a test database also named ``docker`` owned by previously created ``docker`` -role. - -.. code-block:: bash - - su postgres -c "createdb -O docker docker" - -Adjust PostgreSQL configuration so that remote connections to the -database are possible. Make sure that inside -``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line: - -.. code-block:: bash - - host all all 0.0.0.0/0 md5 - -Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf`` -uncomment ``listen_addresses`` like so: - -.. code-block:: bash - - listen_addresses='*' +Start by creating a new Dockerfile: .. note:: This PostgreSQL setup is for development only purposes. Refer - to PostgreSQL documentation how to fine-tune these settings so that it - is secure enough. + to the PostgreSQL documentation to fine-tune these settings so that it + is suitably secure. -Exit. +.. literalinclude:: postgresql_service.Dockerfile + +Build an image from the Dockerfile assign it a name. .. code-block:: bash - exit + $ sudo docker build -t eg_postgresql . -Create an image from our container and assign it a name. The ```` -is in the Bash prompt; you can also locate it using ``docker ps -a``. +And run the PostgreSQL server container (in the foreground): .. code-block:: bash - sudo docker commit /postgresql + $ sudo docker run -rm -P -name pg_test eg_postgresql -Finally, run the PostgreSQL server via ``docker``. +There are 2 ways to connect to the PostgreSQL server. We can use +:ref:`working_with_links_names`, or we can access it from our host (or the network). + +.. note:: The ``-rm`` removes the container and its image when the container + exists successfully. + +Using container linking +^^^^^^^^^^^^^^^^^^^^^^^ + +Containers can be linked to another container's ports directly using +``-link remote_name:local_alias`` in the client's ``docker run``. This will +set a number of environment variables that can then be used to connect: .. code-block:: bash - CONTAINER=$(sudo docker run -d -p 5432 \ - -t /postgresql \ - /bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \ - -D /var/lib/postgresql/9.3/main \ - -c config_file=/etc/postgresql/9.3/main/postgresql.conf') + $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash -Connect the PostgreSQL server using ``psql`` (You will need the -postgresql client installed on the machine. For ubuntu, use something -like ``sudo apt-get install postgresql-client``). + postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password + +Connecting from your host system +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming you have the postgresql-client installed, you can use the host-mapped port +to test as well. You need to use ``docker ps`` to find out what local host port the +container is mapped to first: .. code-block:: bash - CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER) - psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test + $ psql -h localhost -p 49153 -d docker -U docker --password -As before, create roles or databases if needed. +Testing the database +^^^^^^^^^^^^^^^^^^^^ + +Once you have authenticated and have a ``docker =#`` prompt, you can +create a table and populate it. .. code-block:: bash psql (9.3.1) Type "help" for help. - docker=# CREATE DATABASE foo OWNER=docker; - CREATE DATABASE + docker=# CREATE TABLE cities ( + docker(# name varchar(80), + docker(# location point + docker(# ); + CREATE TABLE + docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); + INSERT 0 1 + docker=# select * from cities; + name | location + ---------------+----------- + San Francisco | (-194,53) + (1 row) -Additionally, publish your newly created image on the Docker Index. +Using the container volumes +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can use the defined volumes to inspect the PostgreSQL log files and to backup your +configuration and data: .. code-block:: bash - sudo docker login - Username: - [...] + docker run -rm --volumes-from pg_test -t -i busybox sh -.. code-block:: bash + / # ls + bin etc lib linuxrc mnt proc run sys usr + dev home lib64 media opt root sbin tmp var + / # ls /etc/postgresql/9.3/main/ + environment pg_hba.conf postgresql.conf + pg_ctl.conf pg_ident.conf start.conf + /tmp # ls /var/log + ldconfig postgresql - sudo docker push /postgresql - -PostgreSQL service auto-launch ------------------------------- - -Running our image seems complicated. We have to specify the whole command with -``docker run``. Let's simplify it so the service starts automatically when the -container starts. - -.. code-block:: bash - - sudo docker commit -run='{"Cmd": \ - ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \ - /var/lib/postgresql/9.3/main -c \ - config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \ - /postgresql - -From now on, just type ``docker run /postgresql`` and -PostgreSQL should automatically start. diff --git a/docs/sources/examples/using_supervisord.rst b/docs/sources/examples/using_supervisord.rst index eed063292d..750b6c2334 100644 --- a/docs/sources/examples/using_supervisord.rst +++ b/docs/sources/examples/using_supervisord.rst @@ -112,7 +112,7 @@ Once we've got a built image we can launch a container from it. .. code-block:: bash - sudo docker run -p 22 -p 80 -t -i /supervisor + sudo docker run -p 22 -p 80 -t -i /supervisord 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 diff --git a/docs/sources/faq.rst b/docs/sources/faq.rst index 037f4d797b..5c8e69703f 100644 --- a/docs/sources/faq.rst +++ b/docs/sources/faq.rst @@ -175,6 +175,7 @@ Linux: - Gentoo - ArchLinux - openSUSE 12.3+ +- CRUX 3.0+ Cloud: @@ -182,6 +183,12 @@ Cloud: - Google Compute Engine - Rackspace +How do I report a security issue with Docker? +............................................. + +You can learn about the project's security policy `here `_ +and report security issues to this `mailbox `_. + Can I help by adding some questions and answers? ................................................ diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst index e8fdc2c1ca..31090a070c 100644 --- a/docs/sources/installation/amazon.rst +++ b/docs/sources/installation/amazon.rst @@ -1,5 +1,5 @@ :title: Installation on Amazon EC2 -:description: Docker installation on Amazon EC2 +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: amazon ec2, virtualization, cloud, docker, documentation, installation Amazon EC2 diff --git a/docs/sources/installation/archlinux.rst b/docs/sources/installation/archlinux.rst index 2d823bfd46..c9b4c1d2c5 100644 --- a/docs/sources/installation/archlinux.rst +++ b/docs/sources/installation/archlinux.rst @@ -1,5 +1,5 @@ :title: Installation on Arch Linux -:description: Docker installation on Arch Linux. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: arch linux, virtualization, docker, documentation, installation .. _arch_linux: diff --git a/docs/sources/installation/cruxlinux.rst b/docs/sources/installation/cruxlinux.rst new file mode 100644 index 0000000000..d1970cd1bf --- /dev/null +++ b/docs/sources/installation/cruxlinux.rst @@ -0,0 +1,98 @@ +:title: Installation on CRUX Linux +:description: Docker installation on CRUX Linux. +:keywords: crux linux, virtualization, Docker, documentation, installation + +.. _crux_linux: + + +CRUX Linux +========== + +.. include:: install_header.inc + +.. include:: install_unofficial.inc + +Installing on CRUX Linux can be handled via the ports from `James Mills `_: + +* `docker `_ + +* `docker-bin `_ + +* `docker-git `_ + +The ``docker`` port will install the latest tagged version of Docker. +The ``docker-bin`` port will install the latest tagged versin of Docker from upstream built binaries. +The ``docker-git`` package will build from the current master branch. + + +Installation +------------ + +For the time being (*until the CRUX Docker port(s) get into the official contrib repository*) you will need to install +`James Mills' `_ ports repository. You can do so via: + +Download the ``httpup`` file to ``/etc/ports/``: +:: + + curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup + + +Add ``prtdir /usr/ports/prologic`` to ``/etc/prt-get.conf``: +:: + + vim /etc/prt-get.conf + + # or: + echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf + + +Update ports and prt-get cache: +:: + + ports -u + prt-get cache + + +To install (*and its dependencies*): +:: + + prt-get depinst docker + + +Use ``docker-bin`` for the upstream binary or ``docker-git`` to build and install from the master branch from git. + + +Kernel Requirements +------------------- + +To have a working **CRUX+Docker** Host you must ensure your Kernel +has the necessary modules enabled for LXC containers to function +correctly and Docker Daemon to work properly. + +Please read the ``README.rst``: +:: + + prt-get readme docker + +There is a ``test_kernel_config.sh`` script in the above ports which you can use to test your Kernel configuration: + +:: + + cd /usr/ports/prologic/docker + ./test_kernel_config.sh /usr/src/linux/.config + + +Starting Docker +--------------- + +There is a rc script created for Docker. To start the Docker service: + +:: + + sudo su - + /etc/rc.d/docker start + +To start on system boot: + +- Edit ``/etc/rc.conf`` +- Put ``docker`` into the ``SERVICES=(...)`` array after ``net``. diff --git a/docs/sources/installation/fedora.rst b/docs/sources/installation/fedora.rst index 6dd2bf91d9..7e0aee78fd 100644 --- a/docs/sources/installation/fedora.rst +++ b/docs/sources/installation/fedora.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Fedora +:title: Installation on Fedora :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux diff --git a/docs/sources/installation/frugalware.rst b/docs/sources/installation/frugalware.rst index de2b92ae10..ed9bb2bfaa 100644 --- a/docs/sources/installation/frugalware.rst +++ b/docs/sources/installation/frugalware.rst @@ -1,5 +1,5 @@ :title: Installation on FrugalWare -:description: Docker installation on FrugalWare. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: frugalware linux, virtualization, docker, documentation, installation .. _frugalware: diff --git a/docs/sources/installation/gentoolinux.rst b/docs/sources/installation/gentoolinux.rst index 421af0a1e7..5abfddeb91 100644 --- a/docs/sources/installation/gentoolinux.rst +++ b/docs/sources/installation/gentoolinux.rst @@ -1,5 +1,5 @@ -:title: Installation on Gentoo Linux -:description: Docker installation instructions and nuances for Gentoo Linux. +:title: Installation on Gentoo +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: gentoo linux, virtualization, docker, documentation, installation .. _gentoo_linux: diff --git a/docs/sources/installation/google.rst b/docs/sources/installation/google.rst index 88118778a2..cc1df5da24 100644 --- a/docs/sources/installation/google.rst +++ b/docs/sources/installation/google.rst @@ -50,18 +50,9 @@ docker-playground:~$ curl get.docker.io | bash docker-playground:~$ sudo update-rc.d docker defaults -6. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses. -`See this issue `_ for more details. - -.. code-block:: bash - - docker-playground:~$ echo 'DOCKER_OPTS="$DOCKER_OPTS -mtu 1460"' | sudo tee -a /etc/default/docker - docker-playground:~$ sudo service docker restart - -7. Start a new container: +6. Start a new container: .. code-block:: bash docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' docker on GCE \o/ - diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index 04c155d885..39c1f6a292 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -21,6 +21,7 @@ Contents: rhel fedora archlinux + cruxlinux gentoolinux openSUSE frugalware diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst index 6fc5ed10bf..efb999ee1f 100644 --- a/docs/sources/installation/mac.rst +++ b/docs/sources/installation/mac.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Mac OS X 10.6 Snow Leopard +:title: Installation on Mac OS X 10.6 Snow Leopard :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac @@ -49,10 +49,10 @@ Run the following commands to get boot2docker: # Enter the installation directory cd ~/bin - + # Get the file curl https://raw.github.com/steeve/boot2docker/master/boot2docker > boot2docker - + # Mark it executable chmod +x boot2docker @@ -66,14 +66,14 @@ Run the following commands to get it downloaded and set up: .. code-block:: bash # Get the file - curl -o docker http://get.docker.io/builds/Darwin/x86_64/docker-latest - + curl -o docker https://get.docker.io/builds/Darwin/x86_64/docker-latest + # Mark it executable chmod +x docker # Set the environment variable for the docker daemon - export DOCKER_HOST=tcp:// - + export DOCKER_HOST=tcp://127.0.0.1:4243 + # Copy the executable file sudo cp docker /usr/local/bin/ @@ -94,7 +94,7 @@ Inside the ``~/bin`` directory, run the following commands: # Run the VM (the docker daemon) ./boot2docker up - + # To see all available commands: ./boot2docker @@ -116,6 +116,21 @@ client just like any other application. # Git commit (server): c348c04 # Go version (server): go1.2 +Forwarding VM Port Range to Host +-------------------------------- + +If we take the port range that docker uses by default with the -P option +(49000-49900), and forward same range from host to vm, we'll be able to interact +with our containers as if they were running locally: + +.. code-block:: bash + + # vm must be powered off + for i in {4900..49900}; do + VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; + VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; + done + SSH-ing The VM -------------- @@ -147,6 +162,18 @@ If SSH complains about keys: ssh-keygen -R '[localhost]:2022' +Upgrading to a newer release of boot2docker +------------------------------------------- + +To upgrade an initialised VM, you can use the following 3 commands. Your persistence +disk will not be changed, so you won't lose your images and containers: + +.. code-block:: bash + + ./boot2docker stop + ./boot2docker download + ./boot2docker start + About the way Docker works on Mac OS X: --------------------------------------- diff --git a/docs/sources/installation/openSUSE.rst b/docs/sources/installation/openSUSE.rst index ded5de44a4..c791beacbf 100644 --- a/docs/sources/installation/openSUSE.rst +++ b/docs/sources/installation/openSUSE.rst @@ -1,5 +1,5 @@ :title: Installation on openSUSE -:description: Docker installation on openSUSE. +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: openSUSE, virtualbox, docker, documentation, installation .. _openSUSE: diff --git a/docs/sources/installation/rackspace.rst b/docs/sources/installation/rackspace.rst index d0005a14bc..687131a413 100644 --- a/docs/sources/installation/rackspace.rst +++ b/docs/sources/installation/rackspace.rst @@ -1,5 +1,5 @@ -:title: Rackspace Cloud Installation -:description: Installing Docker on Ubuntu proviced by Rackspace +:title: Installation on Rackspace Cloud +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Rackspace Cloud, installation, docker, linux, ubuntu Rackspace Cloud diff --git a/docs/sources/installation/rhel.rst b/docs/sources/installation/rhel.rst index 9036fb79ea..7930da6309 100644 --- a/docs/sources/installation/rhel.rst +++ b/docs/sources/installation/rhel.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Red Hat Enterprise Linux +:title: Installation on Red Hat Enterprise Linux :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, linux, rhel, centos diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 3d6ee6415d..f37be90d7d 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -1,4 +1,4 @@ -:title: Requirements and Installation on Ubuntu Linux +:title: Installation on Ubuntu :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst index c980a32df9..9d965ea3fe 100644 --- a/docs/sources/installation/windows.rst +++ b/docs/sources/installation/windows.rst @@ -1,11 +1,11 @@ -:title: Requirements and Installation on Windows -:description: Docker's tutorial to run docker on Windows +:title: Installation on Windows +:description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin .. _windows: -Installing Docker on Windows -============================ +Windows +======= Docker can run on Windows using a VM like VirtualBox. You then run Linux within the VM. diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.rst b/docs/sources/reference/api/docker_remote_api_v1.0.rst index dc06a27fc0..fa4b969758 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.0.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.0.rst @@ -732,11 +732,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 500: server error diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.rst b/docs/sources/reference/api/docker_remote_api_v1.1.rst index 31b34caf5a..92b5039aa6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.1.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.1.rst @@ -742,11 +742,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.rst b/docs/sources/reference/api/docker_remote_api_v1.2.rst index 555ec14b75..1ae2db696f 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.2.rst @@ -761,11 +761,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.rst b/docs/sources/reference/api/docker_remote_api_v1.3.rst index ab452798b9..cb4c54642d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.3.rst @@ -808,11 +808,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.rst b/docs/sources/reference/api/docker_remote_api_v1.4.rst index 5c8884b16f..39c8839653 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.4.rst @@ -852,11 +852,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.rst b/docs/sources/reference/api/docker_remote_api_v1.5.rst index 609fc6b056..0cdbaf747a 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.5.rst @@ -831,11 +831,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.rst b/docs/sources/reference/api/docker_remote_api_v1.6.rst index df53275a4f..a9ddfb2c13 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.6.rst @@ -958,11 +958,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.rst b/docs/sources/reference/api/docker_remote_api_v1.7.rst index 28c5ba30f2..cacd7ab6f7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.7.rst @@ -877,11 +877,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.rst b/docs/sources/reference/api/docker_remote_api_v1.8.rst index 6ccc6eca94..5033f34210 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.8.rst @@ -892,11 +892,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index cb406da82b..47cdb46b28 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -892,11 +892,11 @@ Tag an image into a repository .. sourcecode:: http - HTTP/1.1 200 OK + HTTP/1.1 201 OK :query repo: The repository to tag in :query force: 1/True/true or 0/False/false, default false - :statuscode 200: no error + :statuscode 201: no error :statuscode 400: bad parameter :statuscode 404: no such image :statuscode 409: conflict diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst index 45ce8ff9d1..362fa6fe3d 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ b/docs/sources/reference/api/remote_api_client_libraries.rst @@ -1,6 +1,6 @@ :title: Remote API Client Libraries :description: Various client libraries available to use with the Docker remote API -:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go +:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, JavaScript, Erlang, Go ================================== @@ -21,12 +21,18 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | Ruby | docker-api | https://github.com/swipely/docker-api | Active | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active | +| JavaScript (NodeJS) | dockerode | https://github.com/apocas/dockerode | Active | +| | | Install via NPM: `npm install dockerode` | | ++----------------------+----------------+--------------------------------------------+----------+ +| JavaScript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active | | | | Install via NPM: `npm install docker.io` | | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript | docker-js | https://github.com/dgoujard/docker-js | Active | +| JavaScript | docker-js | https://github.com/dgoujard/docker-js | Active | +----------------------+----------------+--------------------------------------------+----------+ -| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active | +| JavaScript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active | +| **WebUI** | | | | ++----------------------+----------------+--------------------------------------------+----------+ +| JavaScript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active | | **WebUI** | | | | +----------------------+----------------+--------------------------------------------+----------+ | Java | docker-java | https://github.com/kpelykh/docker-java | Active | diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 2f71b87a93..4b6a151006 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -251,9 +251,14 @@ value ````. This value will be passed to all future ``RUN`` instructions. This is functionally equivalent to prefixing the command with ``=`` +The environment variables set using ``ENV`` will persist when a container is run +from the resulting image. You can view the values using ``docker inspect``, and change them using ``docker run --env =``. + .. note:: - The environment variables will persist when a container is run - from the resulting image. + One example where this can cause unexpected consequenses, is setting + ``ENV DEBIAN_FRONTEND noninteractive``. + Which will persist when the container is run interactively; for example: + ``docker run -t -i image bash`` .. _dockerfile_add: @@ -269,7 +274,7 @@ the container's filesystem at path ````. source directory being built (also called the *context* of the build) or a remote file URL. -```` is the path at which the source will be copied in the +```` is the absolute path to which the source will be copied inside the destination container. All new files and directories are created with mode 0755, uid and gid @@ -399,8 +404,10 @@ the image. ``WORKDIR /path/to/workdir`` -The ``WORKDIR`` instruction sets the working directory in which -the command given by ``CMD`` is executed. +The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and +``ENTRYPOINT`` Dockerfile commands that follow it. + +It can be used multiple times in the one Dockerfile. 3.11 ONBUILD ------------ diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index ae77080309..7ba0123065 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -12,7 +12,7 @@ To list available commands, either run ``docker`` with no parameters or execute $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] - -H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind/connect to or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used. + -H=[unix:///var/run/docker.sock]: tcp://[host]:port to bind/connect to or unix://[/path/to/socket] to use. When host=[127.0.0.1] is omitted for tcp or path=[/var/run/docker.sock] is omitted for unix sockets, default values are used. A self-sufficient runtime for linux containers. @@ -102,12 +102,17 @@ the ``-H`` flag for the client. docker ps # both are equal - To run the daemon with `systemd socket activation `_, use ``docker -d -H fd://``. Using ``fd://`` will work perfectly for most setups but you can also specify individual sockets too ``docker -d -H fd://3``. If the specified socket activated files aren't found then docker will exit. You can find examples of using systemd socket activation with docker and systemd in the `docker source tree `_. +.. warning:: + Docker and LXC do not support the use of softlinks for either the Docker data directory (``/var/lib/docker``) or for ``/tmp``. + If your system is likely to be set up in that way, you can use ``readlink -f`` to canonicalise the links: + + ``TMPDIR=$(readlink -f /tmp) /usr/local/bin/docker -d -D -g $(readlink -f /var/lib/docker) -H unix:// $EXPOSE_ALL > /var/lib/boot2docker/docker.log 2>&1`` + .. _cli_attach: ``attach`` @@ -181,7 +186,7 @@ Examples: Build a new container image from the source code at PATH -t, --time="": Repository name (and optionally a tag) to be applied to the resulting image in case of success. - -q, --quiet=false: Suppress verbose build output. + -q, --quiet=false: Suppress the verbose output generated by the containers. --no-cache: Do not use the cache when building the image. --rm: Remove intermediate containers after a successful build @@ -189,7 +194,8 @@ The files at ``PATH`` or ``URL`` are called the "context" of the build. The build process may refer to any of the files in the context, for example when using an :ref:`ADD ` instruction. When a single ``Dockerfile`` is given as ``URL``, then no context is set. When a Git repository is set as -``URL``, then the repository is used as the context +``URL``, then the repository is used as the context. Git repositories are +cloned with their submodules (`git clone --recursive`). .. _cli_build_examples: @@ -1083,6 +1089,10 @@ is, ``docker run`` is equivalent to the API ``/containers/create`` then The ``docker run`` command can be used in combination with ``docker commit`` to :ref:`change the command that a container runs `. +See :ref:`port_redirection` for more detailed information about the ``--expose``, +``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for +specific examples using ``--link``. + Known Issues (run -volumes-from) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index 307edace00..d8de280671 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -143,6 +143,7 @@ Network Settings ---------------- :: + -n=true : Enable networking for this container -dns=[] : Set custom dns servers for the container diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 6bd1f0b7a0..d62f778b9d 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -59,10 +59,10 @@ Bind Docker to another host/port or a Unix socket .. warning:: Changing the default ``docker`` daemon binding to a TCP port or Unix *docker* user group will increase your security risks - by allowing non-root users to potentially gain *root* access on the - host (`e.g. #1369 - `_). Make sure you - control access to ``docker``. + by allowing non-root users to gain *root* access on the + host. Make sure you control access to ``docker``. If you are binding + to a TCP port, anyone with access to that port has full Docker access; + so it is not advisable on an open network. With ``-H`` it is possible to make the Docker daemon to listen on a specific IP and port. By default, it will listen on diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst index 5cddb238e4..2612d63aec 100644 --- a/docs/sources/use/port_redirection.rst +++ b/docs/sources/use/port_redirection.rst @@ -31,6 +31,15 @@ container, Docker provide ways to bind the container port to an interface of the host system. To simplify communication between containers, Docker provides the linking mechanism. +Auto map all exposed ports on the host +-------------------------------------- + +To bind all the exposed container ports to the host automatically, use +``docker run -P ``. The mapped host ports will be auto-selected +from a pool of unused ports (49000..49900), and you will need to use +``docker ps``, ``docker inspect `` or +``docker port `` to determine what they are. + Binding a port to a host interface ----------------------------------- diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 34728cbd3d..40e282c986 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -101,13 +101,23 @@ might not work on any other machine. For example:: - sudo docker run -v /var/logs:/var/host_logs:ro ubuntu bash + sudo docker run -t -i -v /var/logs:/var/host_logs:ro ubuntu bash The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``. .. versionadded:: v0.5.0 + +Note for OS/X users and remote daemon users: +-------------------------------------------- + +OS/X users run ``boot2docker`` to create a minimalist virtual machine running the docker daemon. That +virtual machine then launches docker commands on behalf of the OS/X command line. The means that ``host +directories`` refer to directories in the ``boot2docker`` virtual machine, not the OS/X filesystem. + +Similarly, anytime when the docker daemon is on a remote machine, the ``host directories`` always refer to directories on the daemon's machine. + Known Issues ............ diff --git a/engine/engine_test.go b/engine/engine_test.go index 065a19f492..da59610727 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "testing" ) @@ -64,6 +65,18 @@ func TestEngineRoot(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmp) + // We expect Root to resolve to an absolute path. + // FIXME: this should not be necessary. + // Until the above FIXME is implemented, let's check for the + // current behavior. + tmp, err = filepath.EvalSymlinks(tmp) + if err != nil { + t.Fatal(err) + } + tmp, err = filepath.Abs(tmp) + if err != nil { + t.Fatal(err) + } dir := path.Join(tmp, "dir") eng, err := New(dir) if err != nil { diff --git a/execdriver/driver.go b/execdriver/driver.go index 1ea086075d..32b39771b6 100644 --- a/execdriver/driver.go +++ b/execdriver/driver.go @@ -99,6 +99,8 @@ type Command struct { Network *Network `json:"network"` // if network is nil then networking is disabled Config []string `json:"config"` // generic values that specific drivers can consume Resources *Resources `json:"resources"` + + Console string `json:"-"` } // Return the pid of the process diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go index 4c3979e718..ee4d02a6b6 100644 --- a/execdriver/lxc/driver.go +++ b/execdriver/lxc/driver.go @@ -279,7 +279,8 @@ func (i *info) IsRunning() bool { output, err := i.driver.getInfo(i.ID) if err != nil { - panic(err) + utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false } if strings.Contains(string(output), "RUNNING") { running = true diff --git a/execdriver/lxc/init.go b/execdriver/lxc/init.go index 7c2b039c50..e138915212 100644 --- a/execdriver/lxc/init.go +++ b/execdriver/lxc/init.go @@ -4,11 +4,10 @@ import ( "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/pkg/user" "github.com/syndtr/gocapability/capability" "net" "os" - "strconv" "strings" "syscall" ) @@ -79,35 +78,28 @@ func setupWorkingDirectory(args *execdriver.InitArgs) error { // Takes care of dropping privileges to the desired user func changeUser(args *execdriver.InitArgs) error { - if args.User == "" { - return nil - } - userent, err := utils.UserLookup(args.User) + uid, gid, suppGids, err := user.GetUserGroupSupplementary( + args.User, + syscall.Getuid(), syscall.Getgid(), + ) if err != nil { - return fmt.Errorf("Unable to find user %v: %v", args.User, err) + return err } - uid, err := strconv.Atoi(userent.Uid) - if err != nil { - return fmt.Errorf("Invalid uid: %v", userent.Uid) + if err := syscall.Setgroups(suppGids); err != nil { + return fmt.Errorf("Setgroups failed: %v", err) } - gid, err := strconv.Atoi(userent.Gid) - if err != nil { - return fmt.Errorf("Invalid gid: %v", userent.Gid) - } - if err := syscall.Setgid(gid); err != nil { - return fmt.Errorf("setgid failed: %v", err) + return fmt.Errorf("Setgid failed: %v", err) } if err := syscall.Setuid(uid); err != nil { - return fmt.Errorf("setuid failed: %v", err) + return fmt.Errorf("Setuid failed: %v", err) } return nil } func setupCapabilities(args *execdriver.InitArgs) error { - if args.Privileged { return nil } @@ -127,6 +119,7 @@ func setupCapabilities(args *execdriver.InitArgs) error { capability.CAP_AUDIT_CONTROL, capability.CAP_MAC_OVERRIDE, capability.CAP_MAC_ADMIN, + capability.CAP_NET_ADMIN, } c, err := capability.NewPid(os.Getpid()) diff --git a/execdriver/lxc/lxc_template.go b/execdriver/lxc/lxc_template.go index 705bdf5363..639780f5d8 100644 --- a/execdriver/lxc/lxc_template.go +++ b/execdriver/lxc/lxc_template.go @@ -15,6 +15,7 @@ lxc.network.name = eth0 {{else}} # network is disabled (-n=false) lxc.network.type = empty +lxc.network.flags = up {{end}} # root filesystem @@ -79,6 +80,10 @@ lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noex # if your userspace allows it. eg. see http://bit.ly/T9CkqJ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 +{{if .Tty}} +lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 +{{end}} + lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 diff --git a/graph.go b/graph.go index 42da42c8af..a16a6ab7f7 100644 --- a/graph.go +++ b/graph.go @@ -3,7 +3,9 @@ package docker import ( "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -125,12 +127,12 @@ func (graph *Graph) Get(name string) (*Image, error) { } // Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.Archive, container *Container, comment, author string, config *Config) (*Image, error) { +func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) { img := &Image{ ID: GenerateID(), Comment: comment, Created: time.Now().UTC(), - DockerVersion: VERSION, + DockerVersion: dockerversion.VERSION, Author: author, Config: config, Architecture: runtime.GOARCH, @@ -149,7 +151,7 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm // Register imports a pre-existing image into the graph. // FIXME: pass img as first argument -func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) (err error) { +func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. @@ -224,7 +226,9 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, if err != nil { return nil, err } - return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp) + progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") + defer progress.Close() + return archive.NewTempArchive(progress, tmp) } // Mktemp creates a temporary sub-directory inside the graph's filesystem. diff --git a/graphdriver/aufs/aufs.go b/graphdriver/aufs/aufs.go index d1cf87d1a0..f2a88a7ace 100644 --- a/graphdriver/aufs/aufs.go +++ b/graphdriver/aufs/aufs.go @@ -271,7 +271,7 @@ func (a *Driver) Diff(id string) (archive.Archive, error) { }) } -func (a *Driver) ApplyDiff(id string, diff archive.Archive) error { +func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) } diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go index 8432d92a4e..303e363e92 100644 --- a/graphdriver/devmapper/deviceset.go +++ b/graphdriver/devmapper/deviceset.go @@ -12,6 +12,7 @@ import ( "path" "path/filepath" "strconv" + "strings" "sync" "time" ) @@ -29,6 +30,15 @@ type DevInfo struct { TransactionId uint64 `json:"transaction_id"` Initialized bool `json:"initialized"` devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + // A floating mount means one reference is not owned and + // will be stolen by the next mount. This allows us to + // avoid unmounting directly after creation before the + // first get (since we need to mount to set up the device + // a bit first). + floating bool `json:"-"` } type MetaData struct { @@ -43,7 +53,7 @@ type DeviceSet struct { TransactionId uint64 NewTransactionId uint64 nextFreeDevice int - activeMounts map[string]int + sawBusy bool } type DiskUsage struct { @@ -69,6 +79,14 @@ type DevStatus struct { HighestMappedSector uint64 } +type UnmountMode int + +const ( + UnmountRegular UnmountMode = iota + UnmountFloat + UnmountSink +) + func getDevName(name string) string { return "/dev/mapper/" + name } @@ -290,7 +308,7 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { utils.Debugf("Removing uninitialized base image") - if err := devices.removeDevice(""); err != nil { + if err := devices.deleteDevice(""); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -355,6 +373,10 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes return // Ignore _LOG_DEBUG } + if strings.Contains(message, "busy") { + devices.sawBusy = true + } + utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -562,7 +584,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return nil } -func (devices *DeviceSet) removeDevice(hash string) error { +func (devices *DeviceSet) deleteDevice(hash string) error { info := devices.Devices[hash] if info == nil { return fmt.Errorf("hash %s doesn't exists", hash) @@ -579,7 +601,7 @@ func (devices *DeviceSet) removeDevice(hash string) error { devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { - if err := removeDevice(info.Name()); err != nil { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("Error removing device: %s\n", err) return err } @@ -610,50 +632,87 @@ func (devices *DeviceSet) removeDevice(hash string) error { return nil } -func (devices *DeviceSet) RemoveDevice(hash string) error { +func (devices *DeviceSet) DeleteDevice(hash string) error { devices.Lock() defer devices.Unlock() - return devices.removeDevice(hash) + return devices.deleteDevice(hash) } -func (devices *DeviceSet) deactivateDevice(hash string) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", hash) - defer utils.Debugf("[devmapper] deactivateDevice END") - var devname string - // FIXME: shouldn't we just register the pool into devices? - devname, err := devices.byHash(hash) - if err != nil { - return err - } +func (devices *DeviceSet) deactivatePool() error { + utils.Debugf("[devmapper] deactivatePool()") + defer utils.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { - if err := removeDevice(devname); err != nil { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(hash string) error { + utils.Debugf("[devmapper] deactivateDevice(%s)", hash) + defer utils.Debugf("[devmapper] deactivateDevice END") + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + devinfo, err := getInfo(info.Name()) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } - if err := devices.waitRemove(hash); err != nil { - return err - } } return nil } -// waitRemove blocks until either: -// a) the device registered at - is removed, -// or b) the 1 second timeout expires. -func (devices *DeviceSet) waitRemove(hash string) error { - utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash) - defer utils.Debugf("[deviceset %s] waitRemove(%) END", devices.devicePrefix, hash) - devname, err := devices.byHash(hash) +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 10; i++ { + devices.sawBusy = false + err = removeDevice(devname) + if err == nil { + break + } + if !devices.sawBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + time.Sleep(5 * time.Millisecond) + } if err != nil { return err } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 1 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(devname) @@ -681,18 +740,18 @@ func (devices *DeviceSet) waitRemove(hash string) error { // a) the device registered at - is closed, // or b) the 1 second timeout expires. func (devices *DeviceSet) waitClose(hash string) error { - devname, err := devices.byHash(hash) - if err != nil { - return err + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) } i := 0 for ; i < 1000; i += 1 { - devinfo, err := getInfo(devname) + devinfo, err := getInfo(info.Name()) if err != nil { return err } if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount) + utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break @@ -700,26 +759,11 @@ func (devices *DeviceSet) waitClose(hash string) error { time.Sleep(1 * time.Millisecond) } if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to close", devname) + return fmt.Errorf("Timeout while waiting for device %s to close", hash) } return nil } -// byHash is a hack to allow looking up the deviceset's pool by the hash "pool". -// FIXME: it seems probably cleaner to register the pool in devices.Devices, -// but I am afraid of arcane implications deep in the devicemapper code, -// so this will do. -func (devices *DeviceSet) byHash(hash string) (devname string, err error) { - if hash == "pool" { - return devices.getPoolDevName(), nil - } - info := devices.Devices[hash] - if info == nil { - return "", fmt.Errorf("hash %s doesn't exists", hash) - } - return info.Name(), nil -} - func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() @@ -728,13 +772,12 @@ func (devices *DeviceSet) Shutdown() error { utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) - for path, count := range devices.activeMounts { - for i := count; i > 0; i-- { - if err := sysUnmount(path, 0); err != nil { - utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err) + for _, info := range devices.Devices { + if info.mountCount > 0 { + if err := sysUnmount(info.mountPath, 0); err != nil { + utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } } - delete(devices.activeMounts, path) } for _, d := range devices.Devices { @@ -746,32 +789,42 @@ func (devices *DeviceSet) Shutdown() error { } } - pool := devices.getPoolDevName() - if devinfo, err := getInfo(pool); err == nil && devinfo.Exists != 0 { - if err := devices.deactivateDevice("pool"); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", pool, err) - } + if err := devices.deactivatePool(); err != nil { + utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } return nil } -func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error { +func (devices *DeviceSet) MountDevice(hash, path string) error { devices.Lock() defer devices.Unlock() + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + if info.floating { + // Steal floating ref + info.floating = false + } else { + info.mountCount++ + } + return nil + } + if err := devices.activateDeviceIfNeeded(hash); err != nil { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } - info := devices.Devices[hash] - var flags uintptr = sysMsMgcVal - if readOnly { - flags = flags | sysMsRdOnly - } - err := sysMount(info.DevName(), path, "ext4", flags, "discard") if err != nil && err == sysEInval { err = sysMount(info.DevName(), path, "ext4", flags, "") @@ -780,20 +833,53 @@ func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) } - count := devices.activeMounts[path] - devices.activeMounts[path] = count + 1 + info.mountCount = 1 + info.mountPath = path + info.floating = false return devices.setInitialized(hash) } -func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s path=%s)", hash, path) +func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { + utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) defer utils.Debugf("[devmapper] UnmountDevice END") devices.Lock() defer devices.Unlock() - utils.Debugf("[devmapper] Unmount(%s)", path) - if err := sysUnmount(path, 0); err != nil { + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("UnmountDevice: no such device %s\n", hash) + } + + if mode == UnmountFloat { + if info.floating { + return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) + } + + // Leave this reference floating + info.floating = true + return nil + } + + if mode == UnmountSink { + if !info.floating { + // Someone already sunk this + return nil + } + // Otherwise, treat this as a regular unmount + } + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -804,15 +890,9 @@ func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) erro return err } - if count := devices.activeMounts[path]; count > 1 { - devices.activeMounts[path] = count - 1 - } else { - delete(devices.activeMounts, path) - } + devices.deactivateDevice(hash) - if deactivate { - devices.deactivateDevice(hash) - } + info.mountPath = "" return nil } @@ -955,9 +1035,8 @@ func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { SetDevDir("/dev") devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, - activeMounts: make(map[string]int), + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, } if err := devices.initDevmapper(doInit); err != nil { diff --git a/graphdriver/devmapper/devmapper.go b/graphdriver/devmapper/devmapper.go index 7f83a09df9..7317118dcf 100644 --- a/graphdriver/devmapper/devmapper.go +++ b/graphdriver/devmapper/devmapper.go @@ -324,7 +324,7 @@ func createPool(poolName string, dataFile, metadataFile *osFile) error { return fmt.Errorf("Can't get data size") } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" + params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target") } diff --git a/graphdriver/devmapper/driver.go b/graphdriver/devmapper/driver.go index 664899cfbf..4d414f9a75 100644 --- a/graphdriver/devmapper/driver.go +++ b/graphdriver/devmapper/driver.go @@ -7,8 +7,8 @@ import ( "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io/ioutil" + "os" "path" - "sync" ) func init() { @@ -22,9 +22,7 @@ func init() { type Driver struct { *DeviceSet - home string - sync.Mutex // Protects concurrent modification to active - active map[string]int + home string } var Init = func(home string) (graphdriver.Driver, error) { @@ -35,7 +33,6 @@ var Init = func(home string) (graphdriver.Driver, error) { d := &Driver{ DeviceSet: deviceSet, home: home, - active: make(map[string]int), } return d, nil } @@ -83,55 +80,45 @@ func (d *Driver) Create(id, parent string) error { return err } + // We float this reference so that the next Get call can + // steal it, so we don't have to unmount + if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { + return err + } + return nil } func (d *Driver) Remove(id string) error { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - if d.active[id] != 0 { - utils.Errorf("Warning: removing active id %s\n", id) + // Sink the float from create in case no Get() call was made + if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { + return err + } + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err } mp := path.Join(d.home, "mnt", id) - if err := d.unmount(id, mp); err != nil { + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { return err } - return d.DeviceSet.RemoveDevice(id) + + return nil } func (d *Driver) Get(id string) (string, error) { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - count := d.active[id] - mp := path.Join(d.home, "mnt", id) - if count == 0 { - if err := d.mount(id, mp); err != nil { - return "", err - } + if err := d.mount(id, mp); err != nil { + return "", err } - d.active[id] = count + 1 - return path.Join(mp, "rootfs"), nil } func (d *Driver) Put(id string) { - // Protect the d.active from concurrent access - d.Lock() - defer d.Unlock() - - if count := d.active[id]; count > 1 { - d.active[id] = count - 1 - } else { - mp := path.Join(d.home, "mnt", id) - d.unmount(id, mp) - delete(d.active, id) + if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { + utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) } } @@ -140,25 +127,8 @@ func (d *Driver) mount(id, mountPoint string) error { if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { return err } - // If mountpoint is already mounted, do nothing - if mounted, err := Mounted(mountPoint); err != nil { - return fmt.Errorf("Error checking mountpoint: %s", err) - } else if mounted { - return nil - } // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint, false) -} - -func (d *Driver) unmount(id, mountPoint string) error { - // If mountpoint is not mounted, do nothing - if mounted, err := Mounted(mountPoint); err != nil { - return fmt.Errorf("Error checking mountpoint: %s", err) - } else if !mounted { - return nil - } - // Unmount the device - return d.DeviceSet.UnmountDevice(id, mountPoint, true) + return d.DeviceSet.MountDevice(id, mountPoint) } func (d *Driver) Exists(id string) bool { diff --git a/graphdriver/devmapper/driver_test.go b/graphdriver/devmapper/driver_test.go index 785845ce6e..7939241987 100644 --- a/graphdriver/devmapper/driver_test.go +++ b/graphdriver/devmapper/driver_test.go @@ -136,7 +136,12 @@ type Set map[string]bool func (r Set) Assert(t *testing.T, names ...string) { for _, key := range names { - if _, exists := r[key]; !exists { + required := true + if strings.HasPrefix(key, "?") { + key = key[1:] + required = false + } + if _, exists := r[key]; !exists && required { t.Fatalf("Key not set: %s", key) } delete(r, key) @@ -486,6 +491,7 @@ func TestDriverCreate(t *testing.T) { "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { @@ -495,7 +501,6 @@ func TestDriverCreate(t *testing.T) { "DmTaskCreate", "DmTaskGetInfo", "sysMount", - "Mounted", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", @@ -604,6 +609,7 @@ func TestDriverRemove(t *testing.T) { "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { @@ -614,7 +620,6 @@ func TestDriverRemove(t *testing.T) { "DmTaskCreate", "DmTaskGetInfo", "sysMount", - "Mounted", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", @@ -645,7 +650,6 @@ func TestDriverRemove(t *testing.T) { "DmTaskSetTarget", "DmTaskSetAddNode", "DmUdevWait", - "Mounted", "sysUnmount", ) }() diff --git a/graphdriver/driver.go b/graphdriver/driver.go index c0ed00b0ad..89fd03a624 100644 --- a/graphdriver/driver.go +++ b/graphdriver/driver.go @@ -28,7 +28,7 @@ type Driver interface { type Differ interface { Diff(id string) (archive.Archive, error) Changes(id string) ([]archive.Change, error) - ApplyDiff(id string, diff archive.Archive) error + ApplyDiff(id string, diff archive.ArchiveReader) error DiffSize(id string) (bytes int64, err error) } diff --git a/hack/infrastructure/docker-ci.rst b/hack/infrastructure/docker-ci.rst deleted file mode 100644 index 0be530d302..0000000000 --- a/hack/infrastructure/docker-ci.rst +++ /dev/null @@ -1,56 +0,0 @@ -docker-ci -========= - -docker-ci is our buildbot continuous integration server, -building and testing docker, hosted on EC2 and reachable at -http://docker-ci.dotcloud.com - - -Deployment -========== - -# Load AWS credentials -export AWS_ACCESS_KEY_ID='' -export AWS_SECRET_ACCESS_KEY='' -export AWS_KEYPAIR_NAME='' -export AWS_SSH_PRIVKEY='' - -# Load buildbot credentials and config -export BUILDBOT_PWD='' -export IRC_PWD='' -export IRC_CHANNEL='docker-dev' -export SMTP_USER='' -export SMTP_PWD='' -export EMAIL_RCP='' - -# Load registry test credentials -export REGISTRY_USER='' -export REGISTRY_PWD='' - -cd docker/testing -vagrant up --provider=aws - - -github pull request -=================== - -The entire docker pull request test workflow is event driven by github. Its -usage is fully automatic and the results are logged in docker-ci.dotcloud.com - -Each time there is a pull request on docker's github project, github connects -to docker-ci using github's rest API documented in http://developer.github.com/v3/repos/hooks -The issued command to program github's notification PR event was: -curl -u GITHUB_USER:GITHUB_PASSWORD -d '{"name":"web","active":true,"events":["pull_request"],"config":{"url":"http://docker-ci.dotcloud.com:8011/change_hook/github?project=docker"}}' https://api.github.com/repos/dotcloud/docker/hooks - -buildbot (0.8.7p1) was patched using ./testing/buildbot/github.py, so it -can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0) -implemented this capability. Also we added a new scheduler to exclusively filter -PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it. - - -nighthly release -================ - -The nightly release process is done by buildbot, running a DinD container that downloads -the docker repository and builds the release container. The resulting docker -binary is then tested, and if everything is fine, the release is done. diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile index d894330ffa..fd795f4d45 100644 --- a/hack/infrastructure/docker-ci/Dockerfile +++ b/hack/infrastructure/docker-ci/Dockerfile @@ -1,47 +1,29 @@ -# VERSION: 0.25 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Deploy docker-ci on Digital Ocean -# COMMENTS: -# CONFIG_JSON is an environment variable json string loaded as: -# -# export CONFIG_JSON=' -# { "DROPLET_NAME": "docker-ci", -# "DO_CLIENT_ID": "Digital_Ocean_client_id", -# "DO_API_KEY": "Digital_Ocean_api_key", -# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id", -# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path", -# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", -# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", -# "BUILDBOT_PWD": "Buildbot_server_password", -# "IRC_PWD": "Buildbot_IRC_password", -# "SMTP_USER": "SMTP_server_user", -# "SMTP_PWD": "SMTP_server_password", -# "PKG_ACCESS_KEY": "Docker_release_S3_bucket_access_key", -# "PKG_SECRET_KEY": "Docker_release_S3_bucket_secret_key", -# "PKG_GPG_PASSPHRASE": "Docker_release_gpg_passphrase", -# "INDEX_AUTH": "Index_encripted_user_password", -# "REGISTRY_USER": "Registry_test_user", -# "REGISTRY_PWD": "Registry_test_password", -# "REGISTRY_BUCKET": "Registry_S3_bucket_name", -# "REGISTRY_ACCESS_KEY": "Registry_S3_bucket_access_key", -# "REGISTRY_SECRET_KEY": "Registry_S3_bucket_secret_key", -# "IRC_CHANNEL": "Buildbot_IRC_channel", -# "EMAIL_RCP": "Buildbot_mailing_receipient" }' -# -# -# TO_BUILD: docker build -t docker-ci . -# TO_DEPLOY: docker run -e CONFIG_JSON="${CONFIG_JSON}" docker-ci +# DOCKER-VERSION: 0.7.6 +# AUTHOR: Daniel Mizyrycki +# DESCRIPTION: docker-ci continuous integration service +# TO_BUILD: docker build -rm -t docker-ci/docker-ci . +# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ +# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci from ubuntu:12.04 +maintainer Daniel Mizyrycki -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \ - > /etc/apt/sources.list -run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \ - python-pip ssh rsync less vim -run pip install requests fabric +ENV DEBIAN_FRONTEND noninteractive +RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ + /etc/apt/sources.list; apt-get update +RUN apt-get install -y --no-install-recommends python2.7 python-dev \ + libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx +RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py +RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py -# Add deployment code and set default container command -add . /docker-ci -cmd "/docker-ci/deployment.py" +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ + /etc/apt/sources.list.d/docker.list; apt-get update +RUN apt-get install -y lxc-docker-0.8.0 +RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto +RUN ln -s /var/socket/docker.sock /run/docker.sock +ADD . /docker-ci +RUN /docker-ci/setup.sh + +ENTRYPOINT ["supervisord", "-n"] diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst index 33a14359bf..3e429ffdd5 100644 --- a/hack/infrastructure/docker-ci/README.rst +++ b/hack/infrastructure/docker-ci/README.rst @@ -1,26 +1,65 @@ -======= -testing -======= +========= +docker-ci +========= -This directory contains docker-ci testing related files. +This directory contains docker-ci continuous integration system. +As expected, it is a fully dockerized and deployed using +docker-container-runner. +docker-ci is based on Buildbot, a continuous integration system designed +to automate the build/test cycle. By automatically rebuilding and testing +the tree each time something has changed, build problems are pinpointed +quickly, before other developers are inconvenienced by the failure. +We are running buildbot at Rackspace to verify docker and docker-registry +pass tests, and check for coverage code details. + +docker-ci instance is at https://docker-ci.docker.io/waterfall + +Inside docker-ci container we have the following directory structure: + +/docker-ci source code of docker-ci +/data/backup/docker-ci/ daily backup (replicated over S3) +/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes +/data/buildbot/{master,slave}/ main docker-ci buildbot config and database +/var/socket/{docker.sock} host volume access to docker socket -Buildbot -======== +Production deployment +===================== -Buildbot is a continuous integration system designed to automate the -build/test cycle. By automatically rebuilding and testing the tree each time -something has changed, build problems are pinpointed quickly, before other -developers are inconvenienced by the failure. +:: -We are running buildbot in Amazon's EC2 to verify docker passes all -tests when commits get pushed to the master branch and building -nightly releases using Docker in Docker awesome implementation made -by Jerome Petazzoni. + # Clone docker-ci repository + git clone https://github.com/dotcloud/docker + cd docker/hack/infrastructure/docker-ci -https://github.com/jpetazzo/dind + export DOCKER_PROD=[PRODUCTION_SERVER_IP] -Docker's buildbot instance is at http://docker-ci.dotcloud.com/waterfall + # Create data host volume. (only once) + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + mkdir -p /data/docker-ci/coverage/docker + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + mkdir -p /data/docker-ci/coverage/docker-registry + docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ + chown -R 1000.1000 /data/docker-ci -For deployment instructions, please take a look at -hack/infrastructure/docker-ci/Dockerfile + # dcr deployment. Define credentials and special environment dcr variables + # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml ) + export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME] + export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD] + export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD] + export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS] + export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET] + export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE] + export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS] + export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET] + export SMTP_USER=[MAILGUN_SMTP_USERNAME] + export SMTP_PWD=[MAILGUN_SMTP_PASSWORD] + export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] + + # Build docker-ci and testbuilder docker images + docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci . + (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .) + + # Run docker-ci container ( assuming no previous container running ) + (cd dcr/prod; dcr docker-ci.yml start) + (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io) diff --git a/hack/infrastructure/docker-ci/VERSION b/hack/infrastructure/docker-ci/VERSION index 0bfccb0804..b49b25336d 100644 --- a/hack/infrastructure/docker-ci/VERSION +++ b/hack/infrastructure/docker-ci/VERSION @@ -1 +1 @@ -0.4.5 +0.5.6 diff --git a/hack/infrastructure/docker-ci/buildbot/README.rst b/hack/infrastructure/docker-ci/buildbot/README.rst deleted file mode 100644 index 6cbcb8d93a..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/README.rst +++ /dev/null @@ -1 +0,0 @@ -Buildbot configuration and setup files diff --git a/hack/infrastructure/docker-ci/buildbot/buildbot.conf b/hack/infrastructure/docker-ci/buildbot/buildbot.conf deleted file mode 100644 index e07b2e3c8c..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/buildbot.conf +++ /dev/null @@ -1,18 +0,0 @@ -[program:buildmaster] -command=twistd --nodaemon --no_save -y buildbot.tac -directory=/data/buildbot/master -chown= root:root -redirect_stderr=true -stdout_logfile=/var/log/supervisor/buildbot-master.log -stderr_logfile=/var/log/supervisor/buildbot-master.log - -[program:buildworker] -command=twistd --nodaemon --no_save -y buildbot.tac -directory=/data/buildbot/slave -chown= root:root -redirect_stderr=true -stdout_logfile=/var/log/supervisor/buildbot-slave.log -stderr_logfile=/var/log/supervisor/buildbot-slave.log - -[group:buildbot] -programs=buildmaster,buildworker diff --git a/hack/infrastructure/docker-ci/buildbot/github.py b/hack/infrastructure/docker-ci/buildbot/github.py index ff6b6c62dd..5316e13282 100644 --- a/hack/infrastructure/docker-ci/buildbot/github.py +++ b/hack/infrastructure/docker-ci/buildbot/github.py @@ -17,7 +17,7 @@ """ github_buildbot.py is based on git_buildbot.py -github_buildbot.py will determine the repository information from the JSON +github_buildbot.py will determine the repository information from the JSON HTTP POST it receives from github.com and build the appropriate repository. If your github repository is private, you must add a ssh key to the github repository for the user who initiated the build on the buildslave. @@ -88,7 +88,8 @@ def getChanges(request, options = None): payload = json.loads(request.args['payload'][0]) import urllib,datetime fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19] - open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) + # Github event debug + # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) if 'pull_request' in payload: user = payload['pull_request']['user']['login'] @@ -142,13 +143,13 @@ def process_change(payload, user, repo, repo_url, project): 'category' : 'github_pullrequest', 'who' : '{0} - PR#{1}'.format(user,payload['number']), 'files' : [], - 'comments' : payload['pull_request']['title'], + 'comments' : payload['pull_request']['title'], 'revision' : newrev, 'when' : convertTime(payload['pull_request']['updated_at']), 'branch' : branch, 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev), 'repository' : repo_url, - 'project' : project }] + 'project' : project }] return changes for commit in payload['commits']: files = [] diff --git a/hack/infrastructure/docker-ci/buildbot/master.cfg b/hack/infrastructure/docker-ci/buildbot/master.cfg index 9ca5fc035a..75605da8ab 100644 --- a/hack/infrastructure/docker-ci/buildbot/master.cfg +++ b/hack/infrastructure/docker-ci/buildbot/master.cfg @@ -1,4 +1,4 @@ -import os +import os, re from buildbot.buildslave import BuildSlave from buildbot.schedulers.forcesched import ForceScheduler from buildbot.schedulers.basic import SingleBranchScheduler @@ -6,127 +6,156 @@ from buildbot.schedulers.timed import Nightly from buildbot.changes import filter from buildbot.config import BuilderConfig from buildbot.process.factory import BuildFactory -from buildbot.process.properties import Interpolate +from buildbot.process.properties import Property from buildbot.steps.shell import ShellCommand from buildbot.status import html, words from buildbot.status.web import authz, auth from buildbot.status.mail import MailNotifier -PORT_WEB = 80 # Buildbot webserver port -PORT_GITHUB = 8011 # Buildbot github hook port -PORT_MASTER = 9989 # Port where buildbot master listen buildworkers -TEST_USER = 'buildbot' # Credential to authenticate build triggers -TEST_PWD = 'docker' # Credential to authenticate build triggers -GITHUB_DOCKER = 'github.com/dotcloud/docker' -BUILDBOT_PATH = '/data/buildbot' -DOCKER_PATH = '/go/src/github.com/dotcloud/docker' -DOCKER_CI_PATH = '/docker-ci' + +def ENV(x): + '''Promote an environment variable for global use returning its value''' + retval = os.environ.get(x, '') + globals()[x] = retval + return retval + + +class TestCommand(ShellCommand): + '''Extend ShellCommand with optional summary logs''' + def __init__(self, *args, **kwargs): + super(TestCommand, self).__init__(*args, **kwargs) + + def createSummary(self, log): + exit_status = re.sub(r'.+\n\+ exit (\d+).+', + r'\1', log.getText()[-100:], flags=re.DOTALL) + if exit_status != '0': + return + # Infer coverage path from log + if '+ COVERAGE_PATH' in log.getText(): + path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+', + r'\2/\1', log.getText(), flags=re.DOTALL) + url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) + self.addURL('coverage', url) + elif 'COVERAGE_FILE' in log.getText(): + path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+', + r'\2/\1', log.getText(), flags=re.DOTALL) + url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) + self.addURL('coverage', url) + + +PORT_WEB = 8000 # Buildbot webserver port +PORT_GITHUB = 8011 # Buildbot github hook port +PORT_MASTER = 9989 # Port where buildbot master listen buildworkers + +BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB) +DOCKER_REPO = 'https://github.com/docker-test/docker' +DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO) +REGISTRY_REPO = 'https://github.com/docker-test/docker-registry' +REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO) +if ENV('DEPLOYMENT') == 'staging': + BUILDBOT_URL = "//docker-ci-stage.docker.io/" +if ENV('DEPLOYMENT') == 'production': + BUILDBOT_URL = '//docker-ci.docker.io/' + DOCKER_REPO = 'https://github.com/dotcloud/docker' + DOCKER_TEST_ARGV = '' + REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry' + REGISTRY_TEST_ARGV = '' # Credentials set by setup.sh from deployment.py -BUILDBOT_PWD = '' -IRC_PWD = '' -IRC_CHANNEL = '' -SMTP_USER = '' -SMTP_PWD = '' -EMAIL_RCP = '' +ENV('WEB_USER') +ENV('WEB_IRC_PWD') +ENV('BUILDBOT_PWD') +ENV('SMTP_USER') +ENV('SMTP_PWD') +ENV('EMAIL_RCP') +ENV('IRC_CHANNEL') c = BuildmasterConfig = {} -c['title'] = "Docker" +c['title'] = "docker-ci" c['titleURL'] = "waterfall" -c['buildbotURL'] = "http://docker-ci.dotcloud.com/" +c['buildbotURL'] = BUILDBOT_URL c['db'] = {'db_url':"sqlite:///state.sqlite"} c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)] c['slavePortnum'] = PORT_MASTER # Schedulers -c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker', - 'index','registry','docker-coverage','registry-coverage','nightlyrelease'])] -c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None, +c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[ + 'docker', 'docker-registry', 'nightlyrelease', 'backup'])] +c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None, change_filter=filter.ChangeFilter(branch='master', - repository='https://github.com/dotcloud/docker'), builderNames=['docker'])] -c['schedulers'] += [SingleBranchScheduler(name='pullrequest', - change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None, - builderNames=['pullrequest'])] -c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease', - 'docker-coverage','registry-coverage'], hour=7, minute=00)] -c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'], - hour=range(0,24,4), minute=15)] + repository=DOCKER_REPO), builderNames=['docker'])] +c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None, + change_filter=filter.ChangeFilter(branch='master', + repository=REGISTRY_REPO), builderNames=['docker-registry'])] +c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None, + change_filter=filter.ChangeFilter(category='github_pullrequest', + project='docker'), builderNames=['docker-pr'])] +c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None, + change_filter=filter.ChangeFilter(category='github_pullrequest', + project='docker-registry'), builderNames=['docker-registry-pr'])] +c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[ + 'nightlyrelease', 'backup'], hour=7, minute=00)] + # Builders -# Docker commit test -test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind' - ' test_docker.sh %(src::revision)s') + +# Backup factory = BuildFactory() -factory.addStep(ShellCommand(description='Docker', logEnviron=False, - usePTY=True, command=["sh", "-c", Interpolate(test_cmd)])) -c['builders'] = [BuilderConfig(name='docker',slavenames=['buildworker'], +factory.addStep(TestCommand(description='backup', logEnviron=False, + usePTY=True, command='/docker-ci/tool/backup.py')) +c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'], + factory=factory)] + +# Docker test +factory = BuildFactory() +factory.addStep(TestCommand(description='docker', logEnviron=False, + usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV))) +c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'], factory=factory)] # Docker pull request test -test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind' - ' test_docker.sh %(src::revision)s %(src::repository)s %(src::branch)s') factory = BuildFactory() -factory.addStep(ShellCommand(description='pull_request', logEnviron=False, - usePTY=True, command=["sh", "-c", Interpolate(test_cmd)])) -c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-pr', logEnviron=False, + usePTY=True, command=['/docker-ci/dockertest/docker', + Property('revision'), Property('repository'), Property('branch')])) +c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'], factory=factory)] -# Docker coverage test +# docker-registry test factory = BuildFactory() -factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False, - usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format( - DOCKER_CI_PATH))) -c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-registry', logEnviron=False, + usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV))) +c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'], factory=factory)] -# Docker registry coverage test +# Docker registry pull request test factory = BuildFactory() -factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False, - usePTY=True, command='docker run registry_coverage'.format( - DOCKER_CI_PATH))) -c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'], - factory=factory)] - -# Registry functional test -factory = BuildFactory() -factory.addStep(ShellCommand(description='registry', logEnviron=False, - command='. {0}/master/credentials.cfg; ' - '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH), - usePTY=True)) -c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'], - factory=factory)] - -# Index functional test -factory = BuildFactory() -factory.addStep(ShellCommand(description='index', logEnviron=False, - command='. {0}/master/credentials.cfg; ' - '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH), - usePTY=True)) -c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'], +factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False, + usePTY=True, command=['/docker-ci/dockertest/docker-registry', + Property('revision'), Property('repository'), Property('branch')])) +c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'], factory=factory)] # Docker nightly release -nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET=' - 'test.docker.io dockerbuilder hack/dind dockerbuild.sh') factory = BuildFactory() factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False, - usePTY=True, command=nightlyrelease_cmd)) + usePTY=True, command=['/docker-ci/dockertest/nightlyrelease'])) c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], factory=factory)] # Status -authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]), +authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]), forceBuild='auth') c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True, change_hook_dialects={ 'github': True })) -c['status'].append(MailNotifier(fromaddr='buildbot@docker.io', +c['status'].append(MailNotifier(fromaddr='docker-test@docker.io', sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP], mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True, smtpUser=SMTP_USER, smtpPassword=SMTP_PWD)) c['status'].append(words.IRC("irc.freenode.net", "dockerqabot", - channels=[IRC_CHANNEL], password=IRC_PWD, allowForce=True, + channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True, notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1})) diff --git a/hack/infrastructure/docker-ci/buildbot/requirements.txt b/hack/infrastructure/docker-ci/buildbot/requirements.txt deleted file mode 100644 index d2dcf1d125..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -sqlalchemy<=0.7.9 -sqlalchemy-migrate>=0.7.2 -buildbot==0.8.7p1 -buildbot_slave==0.8.7p1 -nose==1.2.1 -requests==1.1.0 -flask==0.10.1 -simplejson==2.3.2 -selenium==2.35.0 diff --git a/hack/infrastructure/docker-ci/buildbot/setup.sh b/hack/infrastructure/docker-ci/buildbot/setup.sh deleted file mode 100755 index c5d9cb988e..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/setup.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Setup of buildbot configuration. Package installation is being done by -# Vagrantfile -# Dependencies: buildbot, buildbot-slave, supervisor - -USER=$1 -CFG_PATH=$2 -DOCKER_PATH=$3 -BUILDBOT_PWD=$4 -IRC_PWD=$5 -IRC_CHANNEL=$6 -SMTP_USER=$7 -SMTP_PWD=$8 -EMAIL_RCP=$9 -REGISTRY_USER=${10} -REGISTRY_PWD=${11} -REGISTRY_BUCKET=${12} -REGISTRY_ACCESS_KEY=${13} -REGISTRY_SECRET_KEY=${14} -BUILDBOT_PATH="/data/buildbot" -SLAVE_NAME="buildworker" -SLAVE_SOCKET="localhost:9989" - -export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin" - -function run { su $USER -c "$1"; } - -# Exit if buildbot has already been installed -[ -d "$BUILDBOT_PATH" ] && exit 0 - -# Setup buildbot -run "mkdir -p $BUILDBOT_PATH" -cd $BUILDBOT_PATH -run "buildbot create-master master" -run "cp $CFG_PATH/master.cfg master" -run "sed -i -E 's#(BUILDBOT_PWD = ).+#\1\"$BUILDBOT_PWD\"#' master/master.cfg" -run "sed -i -E 's#(IRC_PWD = ).+#\1\"$IRC_PWD\"#' master/master.cfg" -run "sed -i -E 's#(IRC_CHANNEL = ).+#\1\"$IRC_CHANNEL\"#' master/master.cfg" -run "sed -i -E 's#(SMTP_USER = ).+#\1\"$SMTP_USER\"#' master/master.cfg" -run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg" -run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg" -run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD" -run "echo 'export DOCKER_CREDS=\"$REGISTRY_USER:$REGISTRY_PWD\"' > $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_BUCKET=\"$REGISTRY_BUCKET\"' >> $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_ACCESS_KEY=\"$REGISTRY_ACCESS_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg" -run "echo 'export S3_SECRET_KEY=\"$REGISTRY_SECRET_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg" - -# Patch github webstatus to capture pull requests -cp $CFG_PATH/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks - -# Allow buildbot subprocesses (docker tests) to properly run in containers, -# in particular with docker -u -run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac" - -# Setup supervisor -cp $CFG_PATH/buildbot.conf /etc/supervisor/conf.d/buildbot.conf -sed -i -E "s/^chmod=0700.+/chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf -kill -HUP $(pgrep -f "/usr/bin/python /usr/bin/supervisord") diff --git a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml new file mode 100644 index 0000000000..523535446a --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml @@ -0,0 +1,22 @@ +docker-ci: + image: "docker-ci/docker-ci" + release_name: "docker-ci-0.5.6" + ports: ["80","2222:22","8011:8011"] + register: "80" + volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] + command: [] + env: + - "DEPLOYMENT=production" + - "IRC_CHANNEL=docker-testing" + - "BACKUP_BUCKET=backup-ci" + - "$WEB_USER" + - "$WEB_IRC_PWD" + - "$BUILDBOT_PWD" + - "$AWS_ACCESS_KEY" + - "$AWS_SECRET_KEY" + - "$GPG_PASSPHRASE" + - "$BACKUP_AWS_ID" + - "$BACKUP_AWS_SECRET" + - "$SMTP_USER" + - "$SMTP_PWD" + - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/prod/settings.yml b/hack/infrastructure/docker-ci/dcr/prod/settings.yml new file mode 100644 index 0000000000..9831afa6dd --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/prod/settings.yml @@ -0,0 +1,5 @@ +default: + hipaches: ['192.168.100.67:6379'] + daemons: ['192.168.100.67:4243'] + use_ssh: False + diff --git a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml new file mode 100644 index 0000000000..8eba84825c --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml @@ -0,0 +1,22 @@ +docker-ci: + image: "docker-ci/docker-ci" + release_name: "docker-ci-stage" + ports: ["80","2222:22","8011:8011"] + register: "80" + volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] + command: [] + env: + - "DEPLOYMENT=staging" + - "IRC_CHANNEL=docker-testing-staging" + - "BACKUP_BUCKET=ci-backup-stage" + - "$BACKUP_AWS_ID" + - "$BACKUP_AWS_SECRET" + - "$WEB_USER" + - "$WEB_IRC_PWD" + - "$BUILDBOT_PWD" + - "$AWS_ACCESS_KEY" + - "$AWS_SECRET_KEY" + - "$GPG_PASSPHRASE" + - "$SMTP_USER" + - "$SMTP_PWD" + - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/stage/settings.yml b/hack/infrastructure/docker-ci/dcr/stage/settings.yml new file mode 100644 index 0000000000..a7d37acff3 --- /dev/null +++ b/hack/infrastructure/docker-ci/dcr/stage/settings.yml @@ -0,0 +1,5 @@ +default: + hipaches: ['192.168.100.65:6379'] + daemons: ['192.168.100.65:4243'] + use_ssh: False + diff --git a/hack/infrastructure/docker-ci/deployment.py b/hack/infrastructure/docker-ci/deployment.py deleted file mode 100755 index fd0fdb0fe8..0000000000 --- a/hack/infrastructure/docker-ci/deployment.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python - -import os, sys, re, json, requests, base64 -from subprocess import call -from fabric import api -from fabric.api import cd, run, put, sudo -from os import environ as env -from datetime import datetime -from time import sleep - -# Remove SSH private key as it needs more processing -CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','', - env['CONFIG_JSON'], flags=re.DOTALL)) - -# Populate environment variables -for key in CONFIG: - env[key] = CONFIG[key] - -# Load SSH private key -env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', - env['CONFIG_JSON'],flags=re.DOTALL) - -DROPLET_NAME = env.get('DROPLET_NAME','docker-ci') -TIMEOUT = 120 # Seconds before timeout droplet creation -IMAGE_ID = 1004145 # Docker on Ubuntu 13.04 -REGION_ID = 4 # New York 2 -SIZE_ID = 62 # memory 2GB -DO_IMAGE_USER = 'root' # Image user on Digital Ocean -API_URL = 'https://api.digitalocean.com/' -DOCKER_PATH = '/go/src/github.com/dotcloud/docker' -DOCKER_CI_PATH = '/docker-ci' -CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH) - - -class DigitalOcean(): - - def __init__(self, key, client): - '''Set default API parameters''' - self.key = key - self.client = client - self.api_url = API_URL - - def api(self, cmd_path, api_arg={}): - '''Make api call''' - api_arg.update({'api_key':self.key, 'client_id':self.client}) - resp = requests.get(self.api_url + cmd_path, params=api_arg).text - resp = json.loads(resp) - if resp['status'] != 'OK': - raise Exception(resp['error_message']) - return resp - - def droplet_data(self, name): - '''Get droplet data''' - data = self.api('droplets') - data = [droplet for droplet in data['droplets'] - if droplet['name'] == name] - return data[0] if data else {} - - -def json_fmt(data): - '''Format json output''' - return json.dumps(data, sort_keys = True, indent = 2) - - -do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID']) - -# Get DROPLET_NAME data -data = do.droplet_data(DROPLET_NAME) - -# Stop processing if DROPLET_NAME exists on Digital Ocean -if data: - print ('Droplet: {} already deployed. Not further processing.' - .format(DROPLET_NAME)) - exit(1) - -# Create droplet -do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID, - 'image_id':IMAGE_ID, 'size_id':SIZE_ID, - 'ssh_key_ids':[env['DOCKER_KEY_ID']]}) - -# Wait for droplet to be created. -start_time = datetime.now() -while (data.get('status','') != 'active' and ( - datetime.now()-start_time).seconds < TIMEOUT): - data = do.droplet_data(DROPLET_NAME) - print data['status'] - sleep(3) - -# Wait for the machine to boot -sleep(15) - -# Get droplet IP -ip = str(data['ip_address']) -print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip) - -# Create docker-ci ssh private key so docker-ci docker container can communicate -# with its EC2 instance -os.makedirs('/root/.ssh') -open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY']) -os.chmod('/root/.ssh/id_rsa',0600) -open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') - -api.env.host_string = ip -api.env.user = DO_IMAGE_USER -api.env.key_filename = '/root/.ssh/id_rsa' - -# Correct timezone -sudo('echo "America/Los_Angeles" >/etc/timezone') -sudo('dpkg-reconfigure --frontend noninteractive tzdata') - -# Load public docker-ci key -sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB'])) - -# Create docker nightly release credentials file -credentials = { - 'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'], - 'AWS_SECRET_KEY': env['PKG_SECRET_KEY'], - 'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']} -open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write( - base64.b64encode(json.dumps(credentials))) - -# Transfer docker -sudo('mkdir -p ' + DOCKER_CI_PATH) -sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH)) -call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip, - os.path.dirname(DOCKER_CI_PATH)), shell=True) - -# Install Docker and Buildbot dependencies -sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker') -sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9') -sudo('echo deb https://get.docker.io/ubuntu docker main >' - ' /etc/apt/sources.list.d/docker.list') -sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n' - 'deb http://us.archive.ubuntu.com/ubuntu/ raring-security main universe\n"' - ' > /etc/apt/sources.list; apt-get update') -sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev' - ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)' - ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev') -sudo('wget -O - https://go.googlecode.com/files/go1.2.linux-amd64.tar.gz | ' - 'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go') -sudo('GOPATH=/go go get -d github.com/dotcloud/docker') -sudo('pip install -r {}/requirements.txt'.format(CFG_PATH)) - -# Install docker and testing dependencies -sudo('apt-get install -y -q lxc-docker') -sudo('curl -s https://phantomjs.googlecode.com/files/' - 'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin' - ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs') - -# Build docker-ci containers -sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH)) -sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH)) -sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format( - DOCKER_CI_PATH)) -sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format( - DOCKER_CI_PATH)) - -# Download docker-ci testing container -sudo('docker pull mzdaniel/test_docker') - -# Setup buildbot -sudo('mkdir /data') -sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}' - ' {11} {12}'.format(CFG_PATH, DOCKER_PATH, env['BUILDBOT_PWD'], - env['IRC_PWD'], env['IRC_CHANNEL'], env['SMTP_USER'], - env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'], - env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'], - env['REGISTRY_SECRET_KEY'])) - -# Preventively reboot docker-ci daily -sudo('ln -s /sbin/reboot /etc/cron.daily') diff --git a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh b/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh deleted file mode 100755 index c29ede5b81..0000000000 --- a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -set -x -# Generate a random string of $1 characters -function random { - cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 -} - -# Compute test paths -BASE_PATH=`pwd`/test_docker_$(random 12) -DOCKER_PATH=$BASE_PATH/go/src/github.com/dotcloud/docker -export GOPATH=$BASE_PATH/go:$DOCKER_PATH/vendor - -# Fetch latest master -mkdir -p $DOCKER_PATH -cd $DOCKER_PATH -git init . -git fetch -q http://github.com/dotcloud/docker master -git reset --hard FETCH_HEAD - -# Fetch go coverage -cd $BASE_PATH/go -GOPATH=$BASE_PATH/go go get github.com/axw/gocov/gocov -sudo -E GOPATH=$GOPATH ./bin/gocov test -deps -exclude-goroot -v\ - -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,\ -code.google.com/p/go.net/websocket\ - github.com/dotcloud/docker | ./bin/gocov report; exit_status=$? - -# Cleanup testing directory -rm -rf $BASE_PATH - -exit $exit_status diff --git a/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh b/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh new file mode 100755 index 0000000000..fdacc290b4 --- /dev/null +++ b/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +export PATH='/go/bin':$PATH +export DOCKER_PATH='/go/src/github.com/dotcloud/docker' + +# Signal coverage report name, parsed by docker-ci +set -x +COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S") +set +x + +REPORTS="/data/$COVERAGE_PATH" +INDEX="$REPORTS/index.html" + +# Test docker +cd $DOCKER_PATH +./hack/make.sh test; exit_status=$? +PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles" + +if [ "$exit_status" -eq "0" ]; then + # Download coverage dependencies + go get github.com/axw/gocov/gocov + go get -u github.com/matm/gocov-html + + # Create coverage report + mkdir -p $REPORTS + cd $PROFILE_PATH + cat > $INDEX << "EOF" + + + + + +Docker Coverage Report + +

Docker Coverage Report

+ + +EOF + for profile in *; do + gocov convert $profile | gocov-html >$REPORTS/$profile.html + echo "" >> $INDEX + done + echo "
packagepct
$profile" >> $INDEX + go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX + echo "
" >> $INDEX +fi + +# Signal test and coverage result, parsed by docker-ci +set -x +exit $exit_status + diff --git a/hack/infrastructure/docker-ci/docker-test/Dockerfile b/hack/infrastructure/docker-ci/docker-test/Dockerfile deleted file mode 100644 index 0f3a63f5f1..0000000000 --- a/hack/infrastructure/docker-ci/docker-test/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# VERSION: 0.4 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Testing docker PRs and commits on top of master using -# REFERENCES: This code reuses the excellent implementation of -# Docker in Docker made by Jerome Petazzoni. -# https://github.com/jpetazzo/dind -# COMMENTS: -# This Dockerfile adapts /Dockerfile to enable docker PRs and commits testing -# Optional arguments: -# [commit] (default: 'HEAD') -# [repo] (default: 'http://github.com/dotcloud/docker') -# [branch] (default: 'master') -# TO_BUILD: docker build -t test_docker . -# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch] - -from docker -maintainer Daniel Mizyrycki - -# Setup go in PATH. Extracted from /Dockerfile -env PATH /usr/local/go/bin:$PATH - -# Add test_docker.sh -add test_docker.sh /usr/bin/test_docker.sh -run chmod +x /usr/bin/test_docker.sh diff --git a/hack/infrastructure/docker-ci/docker-test/test_docker.sh b/hack/infrastructure/docker-ci/docker-test/test_docker.sh deleted file mode 100755 index 14816706ed..0000000000 --- a/hack/infrastructure/docker-ci/docker-test/test_docker.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -set -x -COMMIT=${1-HEAD} -REPO=${2-http://github.com/dotcloud/docker} -BRANCH=${3-master} - -# Compute test paths -DOCKER_PATH=/go/src/github.com/dotcloud/docker - -# Timestamp -echo -date; echo - -# Fetch latest master -cd / -rm -rf /go -git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH -cd $DOCKER_PATH - -# Merge commit -git fetch -q "$REPO" "$BRANCH" -git merge --no-edit $COMMIT || exit 255 - -# Test commit -./hack/make.sh test; exit_status=$? - -# Display load if test fails -if [ $exit_status -ne 0 ] ; then - uptime; echo; free -fi - -exit $exit_status diff --git a/hack/infrastructure/docker-ci/dockertest/docker b/hack/infrastructure/docker-ci/dockertest/docker new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/docker @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/docker-registry b/hack/infrastructure/docker-ci/dockertest/docker-registry new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/docker-registry @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease new file mode 100755 index 0000000000..475b088065 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/nightlyrelease @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +if [ "$DEPLOYMENT" == "production" ]; then + AWS_S3_BUCKET='test.docker.io' +else + AWS_S3_BUCKET='get-staging.docker.io' +fi + +docker run -rm -privileged -v /run:/var/socket \ + -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ + -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ + -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker + diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project new file mode 100755 index 0000000000..160f2d5d59 --- /dev/null +++ b/hack/infrastructure/docker-ci/dockertest/project @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -x + +PROJECT_NAME=$(basename $0) + +docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ + -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 + diff --git a/hack/infrastructure/docker-ci/nginx/nginx.conf b/hack/infrastructure/docker-ci/nginx/nginx.conf new file mode 100644 index 0000000000..6649741134 --- /dev/null +++ b/hack/infrastructure/docker-ci/nginx/nginx.conf @@ -0,0 +1,12 @@ +server { + listen 80; + root /data/docker-ci; + + location / { + proxy_pass http://localhost:8000/; + } + + location /coverage { + root /data/docker-ci; + } +} diff --git a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile b/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile deleted file mode 100644 index 2100a9e8e9..0000000000 --- a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -# VERSION: 1.6 -# DOCKER-VERSION 0.6.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Build docker nightly release using Docker in Docker. -# REFERENCES: This code reuses the excellent implementation of docker in docker -# made by Jerome Petazzoni. https://github.com/jpetazzo/dind -# COMMENTS: -# release_credentials.json is a base64 json encoded file containing: -# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id", -# "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key", -# "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" } -# TO_BUILD: docker build -t dockerbuilder . -# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh - -from docker -maintainer Daniel Mizyrycki - -# Add docker dependencies and downloading packages -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list -run apt-get update; apt-get install -y -q wget python2.7 - -# Add production docker binary -run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker - -# Add proto docker builder -add ./dockerbuild.sh /usr/bin/dockerbuild.sh -run chmod +x /usr/bin/dockerbuild.sh - -# Add release credentials -add ./release_credentials.json /root/release_credentials.json diff --git a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh b/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh deleted file mode 100644 index d5e58da7e1..0000000000 --- a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded -# from /root/release_credentials.json -# Variable AWS_S3_BUCKET is passed to the environment from docker run -e - -# Turn debug off to load credentials from the environment -set +x -eval $(cat /root/release_credentials.json | python -c ' -import sys,json,base64; -d=json.loads(base64.b64decode(sys.stdin.read())); -exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")') - -# Fetch docker master branch -set -x -cd / -rm -rf /go -git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker -cd /go/src/github.com/dotcloud/docker - -# Launch docker daemon using dind inside the container -/usr/bin/docker version -/usr/bin/docker -d & -sleep 5 - -# Build Docker release container -docker build -t docker . - -# Test docker and if everything works well, release -echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh -set +x -docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh -exit_status=$? - -# Display load if test fails -set -x -if [ $exit_status -ne 0 ] ; then - uptime; echo; free - exit 1 -fi diff --git a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile b/hack/infrastructure/docker-ci/registry-coverage/Dockerfile deleted file mode 100644 index e544645b67..0000000000 --- a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# VERSION: 0.1 -# DOCKER-VERSION 0.6.4 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Docker registry coverage -# COMMENTS: Add registry coverage into the docker-ci image -# TO_BUILD: docker build -t registry_coverage . -# TO_RUN: docker run registry_coverage - -from docker-ci -maintainer Daniel Mizyrycki - -# Add registry_coverager.sh and dependencies -run pip install coverage flask pyyaml requests simplejson python-glanceclient \ - blinker redis boto gevent rsa mock -add registry_coverage.sh /usr/bin/registry_coverage.sh -run chmod +x /usr/bin/registry_coverage.sh - -cmd "/usr/bin/registry_coverage.sh" diff --git a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh b/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh deleted file mode 100755 index c67b17eba0..0000000000 --- a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -x - -# Setup the environment -REGISTRY_PATH=/data/docker-registry -export SETTINGS_FLAVOR=test -export DOCKER_REGISTRY_CONFIG=config_test.yml -export PYTHONPATH=$REGISTRY_PATH/test - -# Fetch latest docker-registry master -rm -rf $REGISTRY_PATH -git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH -cd $REGISTRY_PATH - -# Generate coverage -coverage run -m unittest discover test || exit 1 -coverage report --include='./*' --omit='./test/*' diff --git a/hack/infrastructure/docker-ci/setup.sh b/hack/infrastructure/docker-ci/setup.sh new file mode 100755 index 0000000000..65a00f6dd0 --- /dev/null +++ b/hack/infrastructure/docker-ci/setup.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Set timezone +echo "GMT" >/etc/timezone +dpkg-reconfigure --frontend noninteractive tzdata + +# Set ssh superuser +mkdir -p /data/buildbot /var/run/sshd /run +useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin +sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers +cd /home/sysadmin +mkdir .ssh +chmod 700 .ssh +cat > .ssh/authorized_keys << 'EOF' +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io +EOF +chmod 600 .ssh/authorized_keys +chown -R sysadmin .ssh + +# Fix docker group id for use of host dockerd by sysadmin +sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group + +# Create buildbot configuration +cd /data/buildbot; buildbot create-master master +cp -a /data/buildbot/master/master.cfg.sample \ + /data/buildbot/master/master.cfg +cd /data/buildbot; \ + buildslave create-slave slave localhost:9989 buildworker pass +cp /docker-ci/buildbot/master.cfg /data/buildbot/master + +# Patch github webstatus to capture pull requests +cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks +chown -R sysadmin.sysadmin /data + +# Create nginx configuration +rm /etc/nginx/sites-enabled/default +cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf +/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf + +# Set supervisord buildbot, nginx and sshd processes +/bin/echo -e "\ +[program:buildmaster]\n\ +command=twistd --nodaemon --no_save -y buildbot.tac\n\ +directory=/data/buildbot/master\n\ +user=sysadmin\n\n\ +[program:buildworker]\n\ +command=twistd --nodaemon --no_save -y buildbot.tac\n\ +directory=/data/buildbot/slave\n\ +user=sysadmin\n" > \ + /etc/supervisor/conf.d/buildbot.conf +/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \ + /etc/supervisor/conf.d/nginx.conf +/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \ + /etc/supervisor/conf.d/sshd.conf diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile new file mode 100644 index 0000000000..a008da6843 --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/Dockerfile @@ -0,0 +1,12 @@ +# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder . +# TO_RUN: docker run -rm -u sysadmin \ +# -v /run:/var/socket docker-ci/testbuilder docker-registry +# + +FROM docker-ci/docker-ci +ENV HOME /home/sysadmin + +RUN mkdir /testbuilder +ADD . /testbuilder + +ENTRYPOINT ["/testbuilder/testbuilder.sh"] diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh new file mode 100755 index 0000000000..72087462ad --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -x +set -e +PROJECT_PATH=$1 + +# Build the docker project +cd /data/$PROJECT_PATH +sg docker -c "docker build -q -rm -t registry ." +cd test; sg docker -c "docker build -q -rm -t docker-registry-test ." + +# Run the tests +sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh new file mode 100755 index 0000000000..b365dd7eaf --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/docker.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -x +set -e +PROJECT_PATH=$1 + +# Build the docker project +cd /data/$PROJECT_PATH +sg docker -c "docker build -q -rm -t docker ." + +if [ "$DOCKER_RELEASE" == "1" ]; then + # Do nightly release + echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" + set +x + sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" +else + # Run the tests + sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" +fi diff --git a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh new file mode 100755 index 0000000000..70701343c2 --- /dev/null +++ b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Download, build and run a docker project tests +# Environment variables: DEPLOYMENT + +cat $0 +set -e +set -x + +PROJECT=$1 +COMMIT=${2-HEAD} +REPO=${3-https://github.com/dotcloud/$PROJECT} +BRANCH=${4-master} +REPO_PROJ="https://github.com/docker-test/$PROJECT" +if [ "$DEPLOYMENT" == "production" ]; then + REPO_PROJ="https://github.com/dotcloud/$PROJECT" +fi +set +x + +# Generate a random string of $1 characters +function random { + cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 +} + +PROJECT_PATH="$PROJECT-tmp-$(random 12)" + +# Set docker-test git user +set -x +git config --global user.email "docker-test@docker.io" +git config --global user.name "docker-test" + +# Fetch project +git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH +cd /data/$PROJECT_PATH +echo "Git commit: $(git rev-parse HEAD)" +git fetch -q $REPO $BRANCH +git merge --no-edit $COMMIT + +# Build the project dockertest +/testbuilder/$PROJECT.sh $PROJECT_PATH +rm -rf /data/$PROJECT_PATH diff --git a/hack/infrastructure/docker-ci/tool/backup.py b/hack/infrastructure/docker-ci/tool/backup.py new file mode 100755 index 0000000000..2db633e526 --- /dev/null +++ b/hack/infrastructure/docker-ci/tool/backup.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +import os,sys,json +from datetime import datetime +from filecmp import cmp +from subprocess import check_call +from boto.s3.key import Key +from boto.s3.connection import S3Connection + +def ENV(x): + '''Promote an environment variable for global use returning its value''' + retval = os.environ.get(x, '') + globals()[x] = retval + return retval + +ROOT_PATH = '/data/backup/docker-ci' +TODAY = str(datetime.today())[:10] +BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY) +BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH) +ENV('BACKUP_BUCKET') +ENV('BACKUP_AWS_ID') +ENV('BACKUP_AWS_SECRET') + +'''Create full master buildbot backup, avoiding duplicates''' +# Ensure backup path exist +if not os.path.exists(ROOT_PATH): + os.makedirs(ROOT_PATH) +# Make actual backups +check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave' + ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True) +# remove previous dump if it is the same as the latest +if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and + os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE): + os.unlink(os.path._resolve_link(BACKUP_LINK)) +# Recreate backup link pointing to latest backup +try: + os.unlink(BACKUP_LINK) +except: + pass +os.symlink(BACKUP_FILE, BACKUP_LINK) + +# Make backup on S3 +bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET) +k = Key(bucket) +k.key = BACKUP_FILE +k.set_contents_from_filename(BACKUP_FILE) +bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:]) diff --git a/hack/make.sh b/hack/make.sh index ef13c1a283..c0092b106f 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -53,9 +53,9 @@ DEFAULT_BUNDLES=( ) VERSION=$(cat ./VERSION) -if [ -d .git ] && command -v git &> /dev/null; then +if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) - if [ -n "$(git status --porcelain)" ]; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-dirty" fi elif [ "$DOCKER_GITCOMMIT" ]; then @@ -68,9 +68,22 @@ else exit 1 fi +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/github.com/dotcloud + ln -sf ../../../.. .gopath/src/github.com/dotcloud/docker + export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + # Use these flags when compiling the tests and final binary -LDFLAGS='-X main.GITCOMMIT "'$GITCOMMIT'" -X main.VERSION "'$VERSION'" -w' -LDFLAGS_STATIC='-X github.com/dotcloud/docker/utils.IAMSTATIC true -linkmode external -extldflags "-lpthread -static -Wl,--unresolved-symbols=ignore-in-object-files"' +LDFLAGS='-X github.com/dotcloud/docker/dockerversion.GITCOMMIT "'$GITCOMMIT'" -X github.com/dotcloud/docker/dockerversion.VERSION "'$VERSION'" -w' +LDFLAGS_STATIC='-X github.com/dotcloud/docker/dockerversion.IAMSTATIC true -linkmode external -extldflags "-lpthread -static -Wl,--unresolved-symbols=ignore-in-object-files"' BUILDFLAGS='-tags netgo -a' HAVE_GO_TEST_COVER= diff --git a/hack/make/dynbinary b/hack/make/dynbinary index c02094c0c5..d5ea6ebe54 100644 --- a/hack/make/dynbinary +++ b/hack/make/dynbinary @@ -7,11 +7,22 @@ CGO_ENABLED=0 go build -o $DEST/dockerinit-$VERSION -ldflags "$LDFLAGS -d" $BUIL echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf dockerinit-$VERSION $DEST/dockerinit +sha1sum= +if command -v sha1sum &> /dev/null; then + sha1sum=sha1sum +elif command -v shasum &> /dev/null; then + # Mac OS X - why couldn't they just use the same command name and be happy? + sha1sum=shasum +else + echo >&2 'error: cannot find sha1sum command or equivalent' + exit 1 +fi + # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another -export DOCKER_INITSHA1="$(sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" # exported so that "dyntest" can easily access it later without recalculating it ( - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/utils.INITPATH \"$DOCKER_INITPATH\"" + export LDFLAGS_STATIC="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\"" source "$(dirname "$BASH_SOURCE")/binary" ) diff --git a/hack/make/dyntest b/hack/make/dyntest index eb5c2b73ed..555517fd05 100644 --- a/hack/make/dyntest +++ b/hack/make/dyntest @@ -12,6 +12,6 @@ fi ( export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" + export LDFLAGS_STATIC="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"" source "$(dirname "$BASH_SOURCE")/test" ) diff --git a/hack/make/dyntest-integration b/hack/make/dyntest-integration index 0887c45be0..b3a2d7be63 100644 --- a/hack/make/dyntest-integration +++ b/hack/make/dyntest-integration @@ -12,6 +12,6 @@ fi ( export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" + export LDFLAGS_STATIC="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"" source "$(dirname "$BASH_SOURCE")/test-integration" ) diff --git a/hack/make/ubuntu b/hack/make/ubuntu index 1d309d2b5c..23af0ff7e0 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -119,6 +119,8 @@ EOF --deb-recommends xz-utils \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ --conflicts lxc-docker-virtual-package \ --provides lxc-docker \ --provides lxc-docker-virtual-package \ diff --git a/hack/vendor.sh b/hack/vendor.sh index d3e7ea9f43..184cb750a5 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -50,3 +50,11 @@ clone git github.com/syndtr/gocapability 3454319be2 clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 + +# get Go tip's archive/tar, for xattr support +# TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep +clone hg code.google.com/p/go a15f344a9efa +mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar +rm -rf src/code.google.com/p/go +mkdir -p src/code.google.com/p/go/src/pkg/archive +mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar diff --git a/image.go b/image.go index dbd2173597..fa5b65787c 100644 --- a/image.go +++ b/image.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -18,17 +19,17 @@ import ( ) type Image struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - OS string `json:"os,omitempty"` + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig runconfig.Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *runconfig.Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + OS string `json:"os,omitempty"` graph *Graph Size int64 } @@ -66,7 +67,7 @@ func LoadImage(root string) (*Image, error) { return img, nil } -func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, layer string) error { +func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { // Store the layer var ( size int64 @@ -173,7 +174,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { if err != nil { return nil, err } - return EofReader(archive, func() { driver.Put(img.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil } parentFs, err := driver.Get(img.Parent) @@ -189,7 +194,11 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { if err != nil { return nil, err } - return EofReader(archive, func() { driver.Put(img.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil } func ValidateID(id string) error { diff --git a/integration/api_test.go b/integration/api_test.go index 82de56a8ba..5779e6b226 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -1,14 +1,16 @@ package docker import ( - "archive/tar" "bufio" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "encoding/json" "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "net" @@ -45,7 +47,7 @@ func TestGetVersion(t *testing.T) { t.Fatal(err) } out.Close() - expected := docker.VERSION + expected := dockerversion.VERSION if result := v.Get("Version"); result != expected { t.Errorf("Expected version %s, %s found", expected, result) } @@ -308,7 +310,7 @@ func TestGetContainersJSON(t *testing.T) { } beginLen := len(outs.Data) - containerID := createTestContainer(eng, &docker.Config{ + containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, t) @@ -345,7 +347,7 @@ func TestGetContainersExport(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -387,13 +389,84 @@ func TestGetContainersExport(t *testing.T) { } } +func TestSaveImageAndThenLoad(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + // save image + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + tarball := r.Body + + // delete the image + r = httptest.NewRecorder() + req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // make sure there is no image + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) + } + + // load the image + r = httptest.NewRecorder() + req, err = http.NewRequest("POST", "/images/load", tarball) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } + + // finally make sure the image is there + r = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) + if err != nil { + t.Fatal(err) + } + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusOK { + t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) + } +} + func TestGetContainersChanges(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/rm", "/etc/passwd"}, }, @@ -432,7 +505,7 @@ func TestGetContainersTop(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, @@ -509,7 +582,7 @@ func TestGetContainersByName(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, @@ -541,7 +614,7 @@ func TestPostCommit(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -577,7 +650,7 @@ func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - configJSON, err := json.Marshal(&docker.Config{ + configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, @@ -619,7 +692,7 @@ func TestPostContainersKill(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -658,7 +731,7 @@ func TestPostContainersRestart(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, @@ -704,7 +777,7 @@ func TestPostContainersStart(t *testing.T) { containerID := createTestContainer( eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -712,7 +785,7 @@ func TestPostContainersStart(t *testing.T) { t, ) - hostConfigJSON, err := json.Marshal(&docker.HostConfig{}) + hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{}) req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { @@ -757,7 +830,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { containerID := createTestContainer( eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -765,7 +838,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { t, ) - hostConfigJSON, err := json.Marshal(&docker.HostConfig{ + hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{ Binds: []string{"/:/tmp"}, }) @@ -791,7 +864,7 @@ func TestPostContainersStop(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, @@ -831,7 +904,7 @@ func TestPostContainersWait(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, @@ -869,7 +942,7 @@ func TestPostContainersAttach(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, @@ -947,7 +1020,7 @@ func TestPostContainersAttachStderr(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, @@ -1028,7 +1101,7 @@ func TestDeleteContainers(t *testing.T) { defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, @@ -1163,7 +1236,7 @@ func TestPostContainersCopy(t *testing.T) { // Create a container and remove a file containerID := createTestContainer(eng, - &docker.Config{ + &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, @@ -1215,6 +1288,34 @@ func TestPostContainersCopy(t *testing.T) { } } +func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Content-Type", "application/json") + if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusNotFound { + t.Fatalf("404 expected for id_not_found Container, received %v", r.Code) + } +} + // Mocked types for tests type NopConn struct { io.ReadCloser diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 6a7da70558..805932b57a 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -14,16 +14,6 @@ import ( "testing" ) -// mkTestContext generates a build context from the contents of the provided dockerfile. -// This context is suitable for use as an argument to BuildFile.Build() -func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive { - context, err := docker.MkBuildContext(dockerfile, files) - if err != nil { - t.Fatal(err) - } - return context -} - // A testContextTemplate describes a build context and how to test it type testContextTemplate struct { // Contents of the Dockerfile @@ -34,6 +24,18 @@ type testContextTemplate struct { remoteFiles [][2]string } +func (context testContextTemplate) Archive(dockerfile string, t *testing.T) archive.Archive { + input := []string{"Dockerfile", dockerfile} + for _, pair := range context.files { + input = append(input, pair[0], pair[1]) + } + a, err := archive.Generate(input...) + if err != nil { + t.Fatal(err) + } + return a +} + // A table of all the contexts to build and test. // A new docker runtime will be created and torn down for each context. var testContexts = []testContextTemplate{ @@ -148,6 +150,65 @@ RUN [ "$(/hello.sh)" = "hello world" ] nil, }, + // Users and groups + { + ` +FROM {IMAGE} + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0:root' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1000:1000::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1000:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0:root' ] + +# Add a "supplementary" group for our dockerio user +RUN echo 'supplementary:x:1001:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] +USER 1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER 1000:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER dockerio:1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER 1000:1000 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER 1000:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] +USER 1000:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ] +`, + nil, + nil, + }, + // Environment variable { ` @@ -322,7 +383,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) + id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err } @@ -726,7 +787,7 @@ func TestForbiddenContextPath(t *testing.T) { dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) + _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") @@ -772,7 +833,7 @@ func TestBuildADDFileNotFound(t *testing.T) { dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) + _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") diff --git a/integration/commands_test.go b/integration/commands_test.go index a0fc4b9523..a3359ec631 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "github.com/dotcloud/docker" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/utils" @@ -119,7 +120,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -164,7 +165,7 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -209,7 +210,7 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -254,7 +255,7 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -307,7 +308,7 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -353,7 +354,7 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -405,7 +406,7 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -469,7 +470,7 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -516,7 +517,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -549,7 +550,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch = make(chan struct{}) go func() { @@ -597,7 +598,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -615,7 +616,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch := make(chan struct{}) go func() { @@ -662,7 +663,7 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) go func() { @@ -731,7 +732,7 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -767,7 +768,7 @@ func TestRunAutoRemove(t *testing.T) { func TestCmdLogs(t *testing.T) { t.Skip("Test not impemented") - cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { @@ -785,7 +786,7 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -805,7 +806,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -855,7 +856,7 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -938,7 +939,7 @@ func TestRunCidFile(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -988,7 +989,7 @@ func TestContainerOrphaning(t *testing.T) { defer os.RemoveAll(tmpDir) // setup a CLI and server - cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) srv := mkServerFromEngine(globalEngine, t) @@ -1045,8 +1046,8 @@ func TestCmdKill(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - cli2 := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli2 := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) diff --git a/integration/container_test.go b/integration/container_test.go index 97f4cd282f..b961e1d147 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -3,7 +3,7 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/sh", "-c", "echo hello world"}, }, @@ -234,7 +234,7 @@ func TestCommitAutoRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}}) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}}) if err != nil { t.Error(err) } @@ -415,7 +415,7 @@ func TestOutput(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -438,7 +438,7 @@ func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, }, @@ -460,7 +460,7 @@ func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, @@ -520,7 +520,7 @@ func TestCreateVolume(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) + config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) if err != nil { t.Fatal(err) } @@ -552,7 +552,7 @@ func TestCreateVolume(t *testing.T) { func TestKill(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -596,7 +596,7 @@ func TestExitCode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - trueContainer, _, err := runtime.Create(&docker.Config{ + trueContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/true"}, }, "") @@ -611,7 +611,7 @@ func TestExitCode(t *testing.T) { t.Fatalf("Unexpected exit code %d (expected 0)", code) } - falseContainer, _, err := runtime.Create(&docker.Config{ + falseContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/false"}, }, "") @@ -630,7 +630,7 @@ func TestExitCode(t *testing.T) { func TestRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -661,7 +661,7 @@ func TestRestart(t *testing.T) { func TestRestartStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -739,7 +739,7 @@ func TestUser(t *testing.T) { defer nuke(runtime) // Default user must be root - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, }, @@ -758,7 +758,7 @@ func TestUser(t *testing.T) { } // Set a username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -779,7 +779,7 @@ func TestUser(t *testing.T) { } // Set a UID - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -800,7 +800,7 @@ func TestUser(t *testing.T) { } // Set a different user by uid - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -823,7 +823,7 @@ func TestUser(t *testing.T) { } // Set a different user by username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -844,7 +844,7 @@ func TestUser(t *testing.T) { } // Test an wrong username - container, _, err = runtime.Create(&docker.Config{ + container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -866,7 +866,7 @@ func TestMultipleContainers(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _, err := runtime.Create(&docker.Config{ + container1, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -877,7 +877,7 @@ func TestMultipleContainers(t *testing.T) { } defer runtime.Destroy(container1) - container2, _, err := runtime.Create(&docker.Config{ + container2, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -921,7 +921,7 @@ func TestMultipleContainers(t *testing.T) { func TestStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -966,7 +966,7 @@ func TestStdin(t *testing.T) { func TestTty(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -1013,7 +1013,7 @@ func TestEnv(t *testing.T) { os.Setenv("TRICKY", "tri\ncky\n") runtime := mkRuntime(t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) + config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) if err != nil { t.Fatal(err) } @@ -1067,7 +1067,7 @@ func TestEntrypoint(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo"}, Cmd: []string{"-n", "foobar"}, @@ -1091,7 +1091,7 @@ func TestEntrypointNoCmd(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo", "foobar"}, }, @@ -1114,7 +1114,7 @@ func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1147,7 +1147,7 @@ func BenchmarkRunParallel(b *testing.B) { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1301,7 +1301,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1321,7 +1321,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID + ":ro", @@ -1362,7 +1362,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1382,7 +1382,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID, @@ -1418,7 +1418,7 @@ func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1462,7 +1462,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1491,7 +1491,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat", "/test/foo"}, VolumesFrom: container.ID, @@ -1529,7 +1529,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) + config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) if err != nil { t.Fatal(err) } @@ -1617,7 +1617,7 @@ func TestMultipleVolumesFrom(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1646,7 +1646,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container2, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Volumes: map[string]struct{}{"/other": {}}, @@ -1668,7 +1668,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container3, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), @@ -1696,7 +1696,7 @@ func TestRestartGhost(t *testing.T) { defer nuke(runtime) container, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, diff --git a/integration/graph_test.go b/integration/graph_test.go index eec4c5c7dc..ff1c0d9361 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io" @@ -105,8 +106,8 @@ func TestGraphCreate(t *testing.T) { if image.Comment != "Testing" { t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment) } - if image.DockerVersion != docker.VERSION { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", docker.VERSION, image.DockerVersion) + if image.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion) } images, err := graph.Map() if err != nil { diff --git a/integration/runtime_test.go b/integration/runtime_test.go index da95967a30..ca2119ce1f 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" "io" @@ -169,9 +171,14 @@ func spawnGlobalDaemon() { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() + // Give some time to ListenAndServer to actually start // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } } // FIXME: test that ImagePull(json=true) send correct json output @@ -199,7 +206,7 @@ func TestRuntimeCreate(t *testing.T) { t.Errorf("Expected 0 containers, %v found", len(runtime.List())) } - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, @@ -242,23 +249,23 @@ func TestRuntimeCreate(t *testing.T) { // Test that conflict error displays correct details testContainer, _, _ := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "conflictname", ) - if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { + if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) } // Make sure create with bad parameters returns an error - if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { + if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := runtime.Create( - &docker.Config{ + &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{}, }, @@ -267,7 +274,7 @@ func TestRuntimeCreate(t *testing.T) { t.Fatal("Builder.Create should throw an error when Cmd is empty") } - config := &docker.Config{ + config := &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, @@ -280,7 +287,7 @@ func TestRuntimeCreate(t *testing.T) { } // test expose 80:8000 - container, warnings, err := runtime.Create(&docker.Config{ + container, warnings, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, @@ -299,7 +306,7 @@ func TestDestroy(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") @@ -368,7 +375,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc eng = NewTestEngine(t) runtime = mkRuntimeFromEngine(eng, t) port = 5554 - p docker.Port + p nat.Port ) defer func() { if err != nil { @@ -387,8 +394,8 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } - ep := make(map[docker.Port]struct{}, 1) - p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto)) + ep := make(map[nat.Port]struct{}, 1) + p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} jobCreate := eng.Job("create") @@ -411,8 +418,8 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc } jobStart := eng.Job("start", id) - portBindings := make(map[docker.Port][]docker.PortBinding) - portBindings[p] = []docker.PortBinding{ + portBindings := make(map[nat.Port][]nat.PortBinding) + portBindings[p] = []nat.PortBinding{ {}, } if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { @@ -711,7 +718,7 @@ func TestDefaultContainerName(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -735,7 +742,7 @@ func TestRandomContainerName(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -766,7 +773,7 @@ func TestContainerNameValidation(t *testing.T) { {"abc-123_AAA.1", true}, {"\000asdf", false}, } { - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { if !test.Valid { continue @@ -807,7 +814,7 @@ func TestLinkChildContainer(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -823,7 +830,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -849,7 +856,7 @@ func TestGetAllChildren(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -865,7 +872,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err = runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -902,7 +909,7 @@ func TestDestroyWithInitLayer(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&docker.Config{ + container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") diff --git a/integration/server_test.go b/integration/server_test.go index 45d4930ad7..2b7ef13cbd 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -3,6 +3,7 @@ package docker import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "strings" "testing" "time" @@ -71,7 +72,7 @@ func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -118,7 +119,7 @@ func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -130,7 +131,7 @@ func TestCreateNumberUsername(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -142,7 +143,7 @@ func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } @@ -202,7 +203,7 @@ func TestCommit(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -224,7 +225,7 @@ func TestRestartKillWait(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -302,7 +303,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -401,7 +402,7 @@ func TestRmi(t *testing.T) { initialImages := getAllImages(eng, t) - config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo", "test"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } @@ -548,7 +549,7 @@ func TestListContainers(t *testing.T) { srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() - config := docker.Config{ + config := runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, @@ -671,7 +672,7 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { } // Create a container from the image - config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } diff --git a/integration/utils_test.go b/integration/utils_test.go index 450cb7527f..947ace11d9 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -1,8 +1,8 @@ package docker import ( - "archive/tar" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "fmt" "io" "io/ioutil" @@ -16,6 +16,7 @@ import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -48,7 +49,7 @@ func mkRuntime(f utils.Fataler) *docker.Runtime { return r } -func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) { +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) @@ -60,7 +61,7 @@ func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils return } -func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) { +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } @@ -252,8 +253,8 @@ func readFile(src string, t *testing.T) (content string) { // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. -func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) { - config, hc, _, err := docker.ParseRun(args, nil) +func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) { + config, hc, _, err := runconfig.Parse(args, nil) defer func() { if err != nil && t != nil { t.Fatal(err) @@ -318,7 +319,7 @@ func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testi } // FIXME: this is duplicated from graph_test.go in the docker package. -func fakeTar() (io.Reader, error) { +func fakeTar() (io.ReadCloser, error) { content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -332,7 +333,7 @@ func fakeTar() (io.Reader, error) { tw.Write([]byte(content)) } tw.Close() - return buf, nil + return ioutil.NopCloser(buf), nil } func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { diff --git a/links.go b/links/links.go similarity index 79% rename from links.go rename to links/links.go index aa1c08374b..68ac98ee07 100644 --- a/links.go +++ b/links/links.go @@ -1,8 +1,9 @@ -package docker +package links import ( "fmt" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" "path" "strings" ) @@ -12,31 +13,28 @@ type Link struct { ChildIP string Name string ChildEnvironment []string - Ports []Port + Ports []nat.Port IsEnabled bool eng *engine.Engine } -func NewLink(parent, child *Container, name string, eng *engine.Engine) (*Link, error) { - if parent.ID == child.ID { - return nil, fmt.Errorf("Cannot link to self: %s == %s", parent.ID, child.ID) - } - if !child.State.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, name) - } +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { - ports := make([]Port, len(child.Config.ExposedPorts)) - var i int - for p := range child.Config.ExposedPorts { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { ports[i] = p i++ } l := &Link{ Name: name, - ChildIP: child.NetworkSettings.IPAddress, - ParentIP: parent.NetworkSettings.IPAddress, - ChildEnvironment: child.Config.Env, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, Ports: ports, eng: eng, } @@ -85,14 +83,14 @@ func (l *Link) ToEnv() []string { } // Default port rules -func (l *Link) getDefaultPort() *Port { - var p Port +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port i := len(l.Ports) if i == 0 { return nil } else if i > 1 { - sortPorts(l.Ports, func(ip, jp Port) bool { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") diff --git a/links_test.go b/links/links_test.go similarity index 60% rename from links_test.go rename to links/links_test.go index 8a266a9a3d..e66f9bfb78 100644 --- a/links_test.go +++ b/links/links_test.go @@ -1,36 +1,16 @@ -package docker +package links import ( + "github.com/dotcloud/docker/nat" "strings" "testing" ) -func newMockLinkContainer(id string, ip string) *Container { - return &Container{ - Config: &Config{}, - ID: id, - NetworkSettings: &NetworkSettings{ - IPAddress: ip, - }, - } -} - func TestLinkNew(t *testing.T) { - toID := GenerateID() - fromID := GenerateID() + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} - from := newMockLinkContainer(fromID, "172.0.17.2") - from.Config.Env = []string{} - from.State = State{Running: true} - ports := make(map[Port]struct{}) - - ports[Port("6379/tcp")] = struct{}{} - - from.Config.ExposedPorts = ports - - to := newMockLinkContainer(toID, "172.0.17.3") - - link, err := NewLink(to, from, "/db/docker", nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) if err != nil { t.Fatal(err) } @@ -51,28 +31,17 @@ func TestLinkNew(t *testing.T) { t.Fail() } for _, p := range link.Ports { - if p != Port("6379/tcp") { + if p != nat.Port("6379/tcp") { t.Fail() } } } func TestLinkEnv(t *testing.T) { - toID := GenerateID() - fromID := GenerateID() + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} - from := newMockLinkContainer(fromID, "172.0.17.2") - from.Config.Env = []string{"PASSWORD=gordon"} - from.State = State{Running: true} - ports := make(map[Port]struct{}) - - ports[Port("6379/tcp")] = struct{}{} - - from.Config.ExposedPorts = ports - - to := newMockLinkContainer(toID, "172.0.17.3") - - link, err := NewLink(to, from, "/db/docker", nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } diff --git a/nat/nat.go b/nat/nat.go new file mode 100644 index 0000000000..f3af362f8b --- /dev/null +++ b/nat/nat.go @@ -0,0 +1,133 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "github.com/dotcloud/docker/utils" + "strconv" + "strings" +) + +const ( + PortSpecTemplate = "ip:hostPort:containerPort" + PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" +) + +type PortBinding struct { + HostIp string + HostPort string +} + +type PortMap map[Port][]PortBinding + +type PortSet map[Port]struct{} + +// 80/tcp +type Port string + +func NewPort(proto, port string) Port { + return Port(fmt.Sprintf("%s/%s", port, proto)) +} + +func ParsePort(rawPort string) (int, error) { + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +func (p Port) Proto() string { + parts := strings.Split(string(p), "/") + if len(parts) == 1 { + return "tcp" + } + return parts[1] +} + +func (p Port) Port() string { + return strings.Split(string(p), "/")[0] +} + +func (p Port) Int() int { + i, err := ParsePort(p.Port()) + if err != nil { + panic(err) + } + return i +} + +// Splits a port in the format of port/proto +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if l == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + return parts[0], parts[1] +} + +// We will receive port specs in the format of ip:public:private/proto and these need to be +// parsed in the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := utils.PartParser(PortSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIp = parts["ip"] + hostPort = parts["hostPort"] + ) + + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + + port := NewPort(proto, containerPort) + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIp: rawIp, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + return exposedPorts, bindings, nil +} diff --git a/nat/sort.go b/nat/sort.go new file mode 100644 index 0000000000..f36c12f7bb --- /dev/null +++ b/nat/sort.go @@ -0,0 +1,28 @@ +package nat + +import "sort" + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} diff --git a/sorter_unit_test.go b/nat/sort_test.go similarity index 86% rename from sorter_unit_test.go rename to nat/sort_test.go index 0669feedb3..5d490e321b 100644 --- a/sorter_unit_test.go +++ b/nat/sort_test.go @@ -1,4 +1,4 @@ -package docker +package nat import ( "fmt" @@ -11,7 +11,7 @@ func TestSortUniquePorts(t *testing.T) { Port("22/tcp"), } - sortPorts(ports, func(ip, jp Port) bool { + Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) @@ -30,7 +30,7 @@ func TestSortSamePortWithDifferentProto(t *testing.T) { Port("6379/udp"), } - sortPorts(ports, func(ip, jp Port) bool { + Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) diff --git a/networkdriver/lxc/driver.go b/networkdriver/lxc/driver.go index c767fd2208..0bc24b91b6 100644 --- a/networkdriver/lxc/driver.go +++ b/networkdriver/lxc/driver.go @@ -172,7 +172,6 @@ func setupIPTables(addr net.Addr, icc bool) error { iptables.Raw(append([]string{"-D"}, acceptArgs...)...) if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) @@ -353,6 +352,10 @@ func Release(job *engine.Job) engine.Status { proto string ) + if containerInterface == nil { + return job.Errorf("No network information to release for %s", id) + } + for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Printf("Unable to unmap port %s: %s", nat, err) @@ -466,6 +469,20 @@ func LinkContainers(job *engine.Job) engine.Status { job.Errorf("Error toggle iptables forward: %s", output) return engine.StatusErr } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } } return engine.StatusOK } diff --git a/networkdriver/network_test.go b/networkdriver/network_test.go index c15f8b1cf5..6224c2dffb 100644 --- a/networkdriver/network_test.go +++ b/networkdriver/network_test.go @@ -105,7 +105,7 @@ func TestNetworkOverlaps(t *testing.T) { //netY starts before and ends at same IP of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) //netY starts before and ends outside of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) //netY starts and ends before netX AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) //netX starts and ends before netY diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go index f052c48143..e29959a245 100644 --- a/networkdriver/portmapper/mapper.go +++ b/networkdriver/portmapper/mapper.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" + "github.com/dotcloud/docker/pkg/proxy" "net" "sync" ) diff --git a/networkdriver/portmapper/mapper_test.go b/networkdriver/portmapper/mapper_test.go index 05718063e3..4c09f3c651 100644 --- a/networkdriver/portmapper/mapper_test.go +++ b/networkdriver/portmapper/mapper_test.go @@ -2,7 +2,7 @@ package portmapper import ( "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" + "github.com/dotcloud/docker/pkg/proxy" "net" "testing" ) diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go new file mode 100644 index 0000000000..c350805a7d --- /dev/null +++ b/pkg/listenbuffer/buffer.go @@ -0,0 +1,61 @@ +/* + Package to allow go applications to immediately start + listening on a socket, unix, tcp, udp but hold connections + until the application has booted and is ready to accept them +*/ +package listenbuffer + +import ( + "fmt" + "net" + "time" +) + +// NewListenBuffer returns a listener listening on addr with the protocol. It sets the +// timeout to wait on first connection before an error is returned +func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Duration) (net.Listener, error) { + wrapped, err := net.Listen(proto, addr) + if err != nil { + return nil, err + } + + return &defaultListener{ + wrapped: wrapped, + activate: activate, + timeout: timeout, + }, nil +} + +type defaultListener struct { + wrapped net.Listener // the real listener to wrap + ready bool // is the listner ready to start accpeting connections + activate chan struct{} + timeout time.Duration // how long to wait before we consider this an error +} + +func (l *defaultListener) Close() error { + return l.wrapped.Close() +} + +func (l *defaultListener) Addr() net.Addr { + return l.wrapped.Addr() +} + +func (l *defaultListener) Accept() (net.Conn, error) { + // if the listen has been told it is ready then we can go ahead and + // start returning connections + if l.ready { + return l.wrapped.Accept() + } + + select { + case <-time.After(l.timeout): + // close the connection so any clients are disconnected + l.Close() + return nil, fmt.Errorf("timeout (%s) reached waiting for listener to become ready", l.timeout.String()) + case <-l.activate: + l.ready = true + return l.Accept() + } + panic("unreachable") +} diff --git a/pkg/netlink/MAINTAINERS b/pkg/netlink/MAINTAINERS new file mode 100644 index 0000000000..e53d933d47 --- /dev/null +++ b/pkg/netlink/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume Charmes (@creack) diff --git a/opts.go b/pkg/opts/opts.go similarity index 89% rename from opts.go rename to pkg/opts/opts.go index b1d71c491d..a1b8752bad 100644 --- a/opts.go +++ b/pkg/opts/opts.go @@ -1,8 +1,7 @@ -package docker +package opts import ( "fmt" - "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/utils" "os" "path/filepath" @@ -99,6 +98,16 @@ func ValidateLink(val string) (string, error) { return val, nil } +// FIXME: this is a duplicate of docker.utils.parseLink. +// it can't be moved to a separate links/ package because +// links depends on Container which is defined in the core. +// +// Links come in the format of +// name:alias +func parseLink(rawLink string) (map[string]string, error) { + return utils.PartParser("name:alias", rawLink) +} + func ValidatePath(val string) (string, error) { var containerPath string @@ -129,14 +138,6 @@ func ValidateEnv(val string) (string, error) { return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } -func ValidateHost(val string) (string, error) { - host, err := utils.ParseHost(api.DEFAULTHTTPHOST, api.DEFAULTHTTPPORT, api.DEFAULTUNIXSOCKET, val) - if err != nil { - return val, err - } - return host, nil -} - func ValidateIp4Address(val string) (string, error) { re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) var ns = re.FindSubmatch([]byte(val)) diff --git a/opts_unit_test.go b/pkg/opts/opts_test.go similarity index 97% rename from opts_unit_test.go rename to pkg/opts/opts_test.go index 67b061771b..a5c1fac9ca 100644 --- a/opts_unit_test.go +++ b/pkg/opts/opts_test.go @@ -1,4 +1,4 @@ -package docker +package opts import ( "testing" diff --git a/proxy/MAINTAINERS b/pkg/proxy/MAINTAINERS similarity index 100% rename from proxy/MAINTAINERS rename to pkg/proxy/MAINTAINERS diff --git a/proxy/network_proxy_test.go b/pkg/proxy/network_proxy_test.go similarity index 100% rename from proxy/network_proxy_test.go rename to pkg/proxy/network_proxy_test.go diff --git a/proxy/proxy.go b/pkg/proxy/proxy.go similarity index 100% rename from proxy/proxy.go rename to pkg/proxy/proxy.go diff --git a/proxy/stub_proxy.go b/pkg/proxy/stub_proxy.go similarity index 100% rename from proxy/stub_proxy.go rename to pkg/proxy/stub_proxy.go diff --git a/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go similarity index 77% rename from proxy/tcp_proxy.go rename to pkg/proxy/tcp_proxy.go index e7c460f61d..1aa6d9fd70 100644 --- a/proxy/tcp_proxy.go +++ b/pkg/proxy/tcp_proxy.go @@ -1,7 +1,6 @@ package proxy import ( - "github.com/dotcloud/docker/utils" "io" "log" "net" @@ -31,7 +30,7 @@ func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) if err != nil { - log.Printf("Can't forward traffic to backend tcp/%v: %v\n", proxy.backendAddr, err.Error()) + log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) client.Close() return } @@ -49,7 +48,7 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { to.CloseRead() event <- written } - utils.Debugf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr()) + go broker(client, backend) go broker(backend, client) @@ -65,23 +64,20 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { for ; i < 2; i++ { transferred += <-event } - goto done + return } } client.Close() backend.Close() -done: - utils.Debugf("%v bytes transferred between tcp/%v and tcp/%v", transferred, client.RemoteAddr(), backend.RemoteAddr()) } func (proxy *TCPProxy) Run() { quit := make(chan bool) defer close(quit) - utils.Debugf("Starting proxy on tcp/%v for tcp/%v", proxy.frontendAddr, proxy.backendAddr) for { client, err := proxy.listener.Accept() if err != nil { - utils.Debugf("Stopping proxy on tcp/%v for tcp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error()) + log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*net.TCPConn), quit) diff --git a/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go similarity index 79% rename from proxy/udp_proxy.go rename to pkg/proxy/udp_proxy.go index 7d34988f70..14f2306a5a 100644 --- a/proxy/udp_proxy.go +++ b/pkg/proxy/udp_proxy.go @@ -2,9 +2,9 @@ package proxy import ( "encoding/binary" - "github.com/dotcloud/docker/utils" "log" "net" + "strings" "sync" "syscall" "time" @@ -66,7 +66,6 @@ func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() - utils.Debugf("Done proxying between udp/%v and udp/%v", clientAddr.String(), proxy.backendAddr.String()) proxyConn.Close() }() @@ -92,24 +91,20 @@ func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr return } i += written - utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, clientAddr.String()) } } } func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) - utils.Debugf("Starting proxy on udp/%v for udp/%v", proxy.frontendAddr, proxy.backendAddr) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) - if utils.IsClosedError(err) { - utils.Debugf("Stopping proxy on udp/%v for udp/%v (socket was closed)", proxy.frontendAddr, proxy.backendAddr) - } else { - utils.Errorf("Stopping proxy on udp/%v for udp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error()) + if !isClosedError(err) { + log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) } break } @@ -120,7 +115,7 @@ func (proxy *UDPProxy) Run() { if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { - log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) continue } proxy.connTrackTable[*fromKey] = proxyConn @@ -130,11 +125,10 @@ func (proxy *UDPProxy) Run() { for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { - log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err) + log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) break } i += written - utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, proxy.backendAddr.String()) } } } @@ -150,3 +144,13 @@ func (proxy *UDPProxy) Close() { func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } + +func isClosedError(err error) bool { + /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. + * See: + * http://golang.org/src/pkg/net/net.go + * https://code.google.com/p/go/issues/detail?id=4337 + * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ + */ + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/pkg/systemd/MAINTAINERS b/pkg/systemd/MAINTAINERS new file mode 100644 index 0000000000..51228b368a --- /dev/null +++ b/pkg/systemd/MAINTAINERS @@ -0,0 +1 @@ +Brandon Philips (@philips) diff --git a/pkg/user/MAINTAINERS b/pkg/user/MAINTAINERS new file mode 100644 index 0000000000..18e05a3070 --- /dev/null +++ b/pkg/user/MAINTAINERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/pkg/user/user.go b/pkg/user/user.go new file mode 100644 index 0000000000..1672f7e679 --- /dev/null +++ b/pkg/user/user.go @@ -0,0 +1,241 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswd() ([]*User, error) { + return ParsePasswdFilter(nil) +} + +func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { + f, err := os.Open("/etc/passwd") + if err != nil { + return nil, err + } + defer f.Close() + return parsePasswdFile(f, filter) +} + +func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { + var ( + s = bufio.NewScanner(r) + out = []*User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := &User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroup() ([]*Group, error) { + return ParseGroupFilter(nil) +} + +func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { + f, err := os.Open("/etc/group") + if err != nil { + return nil, err + } + defer f.Close() + return parseGroupFile(f, filter) +} + +func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { + var ( + s = bufio.NewScanner(r) + out = []*Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := &Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, and list of supplementary group IDs, if possible. +func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) (int, int, []int, error) { + var ( + uid = defaultUid + gid = defaultGid + suppGids = []int{} + + userArg, groupArg string + ) + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(func(u *User) bool { + if userArg == "" { + return u.Uid == uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && !os.IsNotExist(err) { + if userArg == "" { + userArg = strconv.Itoa(uid) + } + return 0, 0, nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + uid = users[0].Uid + gid = users[0].Gid + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || (haveUser && users[0].Name != "") { + groups, err := ParseGroupFilter(func(g *Group) bool { + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + for _, u := range g.List { + if u == users[0].Name { + return true + } + } + return false + }) + if err != nil && !os.IsNotExist(err) { + return 0, 0, nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + suppGids = make([]int, len(groups)) + for i, group := range groups { + suppGids[i] = group.Gid + } + } + } + + return uid, gid, suppGids, nil +} diff --git a/pkg/user/user_test.go b/pkg/user/user_test.go new file mode 100644 index 0000000000..136632c27e --- /dev/null +++ b/pkg/user/user_test.go @@ -0,0 +1,94 @@ +package user + +import ( + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := parsePasswdFile(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := parseGroupFile(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} diff --git a/reflink_copy_linux.go b/reflink_copy_linux.go deleted file mode 100644 index 74a0cb98f7..0000000000 --- a/reflink_copy_linux.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build amd64 - -package docker - -// FIXME: This could be easily rewritten in pure Go - -/* -#include -#include -#include - -// See linux.git/fs/btrfs/ioctl.h -#define BTRFS_IOCTL_MAGIC 0x94 -#define BTRFS_IOC_CLONE _IOW(BTRFS_IOCTL_MAGIC, 9, int) - -int -btrfs_reflink(int fd_out, int fd_in) -{ - int res; - res = ioctl(fd_out, BTRFS_IOC_CLONE, fd_in); - if (res < 0) - return errno; - return 0; -} - -*/ -import "C" - -import ( - "io" - "os" - "syscall" -) - -// FIXME: Move this to btrfs package? - -func BtrfsReflink(fd_out, fd_in uintptr) error { - res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in)) - if res != 0 { - return syscall.Errno(res) - } - return nil -} - -func CopyFile(dstFile, srcFile *os.File) error { - err := BtrfsReflink(dstFile.Fd(), srcFile.Fd()) - if err == nil { - return nil - } - - // Fall back to normal copy - // FIXME: Check the return of Copy and compare with dstFile.Stat().Size - _, err = io.Copy(dstFile, srcFile) - return err -} diff --git a/reflink_copy_unsupported.go b/reflink_copy_unsupported.go deleted file mode 100644 index 271ed0178f..0000000000 --- a/reflink_copy_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux !amd64 - -package docker - -import ( - "io" - "os" -) - -func CopyFile(dstFile, srcFile *os.File) error { - // No BTRFS reflink suppport, Fall back to normal copy - - // FIXME: Check the return of Copy and compare with dstFile.Stat().Size - _, err := io.Copy(dstFile, srcFile) - return err -} diff --git a/runconfig/compare.go b/runconfig/compare.go new file mode 100644 index 0000000000..c09f897716 --- /dev/null +++ b/runconfig/compare.go @@ -0,0 +1,67 @@ +package runconfig + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.Memory != b.Memory || + a.MemorySwap != b.MemorySwap || + a.CpuShares != b.CpuShares || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty || + a.VolumesFrom != b.VolumesFrom { + return false + } + if len(a.Cmd) != len(b.Cmd) || + len(a.Dns) != len(b.Dns) || + len(a.Env) != len(b.Env) || + len(a.PortSpecs) != len(b.PortSpecs) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Dns); i++ { + if a.Dns[i] != b.Dns[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for i := 0; i < len(a.PortSpecs); i++ { + if a.PortSpecs[i] != b.PortSpecs[i] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/runconfig/config.go b/runconfig/config.go new file mode 100644 index 0000000000..9faa823a57 --- /dev/null +++ b/runconfig/config.go @@ -0,0 +1,76 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" +) + +// Note: the Config structure should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +type Config struct { + Hostname string + Domainname string + User string + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + AttachStdin bool + AttachStdout bool + AttachStderr bool + PortSpecs []string // Deprecated - Can be in the format of 8080/tcp + ExposedPorts map[nat.Port]struct{} + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string + Cmd []string + Dns []string + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} + VolumesFrom string + WorkingDir string + Entrypoint []string + NetworkDisabled bool + OnBuild []string +} + +func ContainerConfigFromJob(job *engine.Job) *Config { + config := &Config{ + Hostname: job.Getenv("Hostname"), + Domainname: job.Getenv("Domainname"), + User: job.Getenv("User"), + Memory: job.GetenvInt64("Memory"), + MemorySwap: job.GetenvInt64("MemorySwap"), + CpuShares: job.GetenvInt64("CpuShares"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStdout: job.GetenvBool("AttachStdout"), + AttachStderr: job.GetenvBool("AttachStderr"), + Tty: job.GetenvBool("Tty"), + OpenStdin: job.GetenvBool("OpenStdin"), + StdinOnce: job.GetenvBool("StdinOnce"), + Image: job.Getenv("Image"), + VolumesFrom: job.Getenv("VolumesFrom"), + WorkingDir: job.Getenv("WorkingDir"), + NetworkDisabled: job.GetenvBool("NetworkDisabled"), + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + job.GetenvJson("Volumes", &config.Volumes) + if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { + config.PortSpecs = PortSpecs + } + if Env := job.GetenvList("Env"); Env != nil { + config.Env = Env + } + if Cmd := job.GetenvList("Cmd"); Cmd != nil { + config.Cmd = Cmd + } + if Dns := job.GetenvList("Dns"); Dns != nil { + config.Dns = Dns + } + if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { + config.Entrypoint = Entrypoint + } + + return config +} diff --git a/config_test.go b/runconfig/config_test.go similarity index 82% rename from config_test.go rename to runconfig/config_test.go index 31c961135a..3ef31491fc 100644 --- a/config_test.go +++ b/runconfig/config_test.go @@ -1,10 +1,11 @@ -package docker +package runconfig import ( + "github.com/dotcloud/docker/nat" "testing" ) -func TestCompareConfig(t *testing.T) { +func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ @@ -44,24 +45,24 @@ func TestCompareConfig(t *testing.T) { VolumesFrom: "11111111", Volumes: volumes2, } - if CompareConfig(&config1, &config2) { - t.Fatalf("CompareConfig should return false, Dns are different") + if Compare(&config1, &config2) { + t.Fatalf("Compare should return false, Dns are different") } - if CompareConfig(&config1, &config3) { - t.Fatalf("CompareConfig should return false, PortSpecs are different") + if Compare(&config1, &config3) { + t.Fatalf("Compare should return false, PortSpecs are different") } - if CompareConfig(&config1, &config4) { - t.Fatalf("CompareConfig should return false, VolumesFrom are different") + if Compare(&config1, &config4) { + t.Fatalf("Compare should return false, VolumesFrom are different") } - if CompareConfig(&config1, &config5) { - t.Fatalf("CompareConfig should return false, Volumes are different") + if Compare(&config1, &config5) { + t.Fatalf("Compare should return false, Volumes are different") } - if !CompareConfig(&config1, &config1) { - t.Fatalf("CompareConfig should return true") + if !Compare(&config1, &config1) { + t.Fatalf("Compare should return true") } } -func TestMergeConfig(t *testing.T) { +func TestMerge(t *testing.T) { volumesImage := make(map[string]struct{}) volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} @@ -82,7 +83,7 @@ func TestMergeConfig(t *testing.T) { Volumes: volumesUser, } - if err := MergeConfig(configUser, configImage); err != nil { + if err := Merge(configUser, configImage); err != nil { t.Error(err) } @@ -125,7 +126,7 @@ func TestMergeConfig(t *testing.T) { t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) } - ports, _, err := parsePortSpecs([]string{"0000"}) + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) } @@ -133,7 +134,7 @@ func TestMergeConfig(t *testing.T) { ExposedPorts: ports, } - if err := MergeConfig(configUser, configImage2); err != nil { + if err := Merge(configUser, configImage2); err != nil { t.Error(err) } diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go new file mode 100644 index 0000000000..6c8618ee81 --- /dev/null +++ b/runconfig/hostconfig.go @@ -0,0 +1,39 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" +) + +type HostConfig struct { + Binds []string + ContainerIDFile string + LxcConf []KeyValuePair + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool +} + +type KeyValuePair struct { + Key string + Value string +} + +func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + hostConfig := &HostConfig{ + ContainerIDFile: job.Getenv("ContainerIDFile"), + Privileged: job.GetenvBool("Privileged"), + PublishAllPorts: job.GetenvBool("PublishAllPorts"), + } + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + job.GetenvJson("PortBindings", &hostConfig.PortBindings) + if Binds := job.GetenvList("Binds"); Binds != nil { + hostConfig.Binds = Binds + } + if Links := job.GetenvList("Links"); Links != nil { + hostConfig.Links = Links + } + + return hostConfig +} diff --git a/runconfig/merge.go b/runconfig/merge.go new file mode 100644 index 0000000000..a8d677baa8 --- /dev/null +++ b/runconfig/merge.go @@ -0,0 +1,119 @@ +package runconfig + +import ( + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/utils" + "strings" +) + +func Merge(userConf, imageConf *Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if userConf.Memory == 0 { + userConf.Memory = imageConf.Memory + } + if userConf.MemorySwap == 0 { + userConf.MemorySwap = imageConf.MemorySwap + } + if userConf.CpuShares == 0 { + userConf.CpuShares = imageConf.CpuShares + } + if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + userConf.PortSpecs = nil + } + if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { + // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. + utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + if !userConf.Tty { + userConf.Tty = imageConf.Tty + } + if !userConf.OpenStdin { + userConf.OpenStdin = imageConf.OpenStdin + } + if !userConf.StdinOnce { + userConf.StdinOnce = imageConf.StdinOnce + } + if userConf.Env == nil || len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + if userConf.Cmd == nil || len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + if userConf.Dns == nil || len(userConf.Dns) == 0 { + userConf.Dns = imageConf.Dns + } else { + //duplicates aren't an issue here + userConf.Dns = append(userConf.Dns, imageConf.Dns...) + } + if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { + userConf.Entrypoint = imageConf.Entrypoint + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if userConf.VolumesFrom == "" { + userConf.VolumesFrom = imageConf.VolumesFrom + } + if userConf.Volumes == nil || len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + return nil +} diff --git a/runconfig/parse.go b/runconfig/parse.go new file mode 100644 index 0000000000..fb08c068b2 --- /dev/null +++ b/runconfig/parse.go @@ -0,0 +1,246 @@ +package runconfig + +import ( + "fmt" + "github.com/dotcloud/docker/nat" + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/opts" + "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "path" + "strings" +) + +var ( + ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d") +) + +//FIXME Only used in tests +func Parse(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return parseRun(cmd, args, sysInfo) +} + +// FIXME: this maps the legacy commands.go code. It should be merged with Parse to only expose a single parse function. +func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + return parseRun(cmd, args, sysInfo) +} + +func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + var ( + // FIXME: use utils.ListOpts for attach and volumes? + flAttach = opts.NewListOpts(opts.ValidateAttach) + flVolumes = opts.NewListOpts(opts.ValidatePath) + flLinks = opts.NewListOpts(opts.ValidateLink) + flEnv = opts.NewListOpts(opts.ValidateEnv) + + flPublish opts.ListOpts + flExpose opts.ListOpts + flDns opts.ListOpts + flVolumesFrom opts.ListOpts + flLxcOpts opts.ListOpts + + flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") + flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + + // For documentation purpose + _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + ) + + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") + cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + + if err := cmd.Parse(args); err != nil { + return nil, nil, cmd, err + } + + // Check if the kernel supports memory limit cgroup. + if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { + *flMemoryString = "" + } + + // Validate input params + if *flDetach && flAttach.Len() > 0 { + return nil, nil, cmd, ErrConflictAttachDetach + } + if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { + return nil, nil, cmd, ErrInvalidWorikingDirectory + } + if *flDetach && *flAutoRemove { + return nil, nil, cmd, ErrConflictDetachAutoRemove + } + + // If neither -d or -a are set, attach to everything by default + if flAttach.Len() == 0 && !*flDetach { + if !*flDetach { + flAttach.Set("stdout") + flAttach.Set("stderr") + if *flStdin { + flAttach.Set("stdin") + } + } + } + + var flMemory int64 + if *flMemoryString != "" { + parsedMemory, err := utils.RAMInBytes(*flMemoryString) + if err != nil { + return nil, nil, cmd, err + } + flMemory = parsedMemory + } + + var binds []string + // add any bind targets to the list of container volumes + for bind := range flVolumes.GetMap() { + if arr := strings.Split(bind, ":"); len(arr) > 1 { + if arr[0] == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") + } + dstDir := arr[1] + flVolumes.Set(dstDir) + binds = append(binds, bind) + flVolumes.Delete(bind) + } else if bind == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") + } + } + + var ( + parsedArgs = cmd.Args() + runCmd []string + entrypoint []string + image string + ) + if len(parsedArgs) >= 1 { + image = cmd.Arg(0) + } + if len(parsedArgs) > 1 { + runCmd = parsedArgs[1:] + } + if *flEntrypoint != "" { + entrypoint = []string{*flEntrypoint} + } + + lxcConf, err := parseLxcConfOpts(flLxcOpts) + if err != nil { + return nil, nil, cmd, err + } + + var ( + domainname string + hostname = *flHostname + parts = strings.SplitN(hostname, ".", 2) + ) + if len(parts) > 1 { + hostname = parts[0] + domainname = parts[1] + } + + ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) + if err != nil { + return nil, nil, cmd, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range flExpose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) + } + p := nat.NewPort(nat.SplitProtoPort(e)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + + config := &Config{ + Hostname: hostname, + Domainname: domainname, + PortSpecs: nil, // Deprecated + ExposedPorts: ports, + User: *flUser, + Tty: *flTty, + NetworkDisabled: !*flNetwork, + OpenStdin: *flStdin, + Memory: flMemory, + CpuShares: *flCpuShares, + AttachStdin: flAttach.Get("stdin"), + AttachStdout: flAttach.Get("stdout"), + AttachStderr: flAttach.Get("stderr"), + Env: flEnv.GetAll(), + Cmd: runCmd, + Dns: flDns.GetAll(), + Image: image, + Volumes: flVolumes.GetMap(), + VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), + Entrypoint: entrypoint, + WorkingDir: *flWorkingDir, + } + + hostConfig := &HostConfig{ + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + } + + if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { + //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + return config, hostConfig, cmd, nil +} + +func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, opts.Len()) + for i, o := range opts.GetAll() { + k, v, err := parseLxcOpt(o) + if err != nil { + return nil, err + } + out[i] = KeyValuePair{Key: k, Value: v} + } + return out, nil +} + +func parseLxcOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go new file mode 100644 index 0000000000..2b89e88ec3 --- /dev/null +++ b/runconfig/parse_test.go @@ -0,0 +1,22 @@ +package runconfig + +import ( + "testing" +) + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parseLxcOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +} diff --git a/runtime.go b/runtime.go index 7e4ae79b40..eed28f92ab 100644 --- a/runtime.go +++ b/runtime.go @@ -4,9 +4,9 @@ import ( "container/list" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/chroot" "github.com/dotcloud/docker/execdriver/lxc" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" @@ -17,6 +17,7 @@ import ( "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -133,14 +134,6 @@ func (runtime *Runtime) Register(container *Container) error { return err } - // Get the root filesystem from the driver - basefs, err := runtime.driver.Get(container.ID) - if err != nil { - return fmt.Errorf("Error getting container filesystem %s from driver %s: %s", container.ID, runtime.driver, err) - } - defer runtime.driver.Put(container.ID) - container.basefs = basefs - container.runtime = runtime // Attach to stdout and stderr @@ -336,7 +329,7 @@ func (runtime *Runtime) restore() error { } // Create creates a new container from the given configuration with a given name. -func (runtime *Runtime) Create(config *Config, name string) (*Container, []string, error) { +func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { // Lookup image img, err := runtime.repositories.LookupImage(config.Image) if err != nil { @@ -354,7 +347,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } - checkDeprecatedExpose := func(config *Config) bool { + checkDeprecatedExpose := func(config *runconfig.Config) bool { if config != nil { if config.PortSpecs != nil { for _, p := range config.PortSpecs { @@ -373,14 +366,12 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin } if img.Config != nil { - if err := MergeConfig(config, img.Config); err != nil { + if err := runconfig.Merge(config, img.Config); err != nil { return nil, nil, err } } - if len(config.Entrypoint) != 0 && config.Cmd == nil { - config.Cmd = []string{} - } else if config.Cmd == nil || len(config.Cmd) == 0 { + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { return nil, nil, fmt.Errorf("No command specified") } @@ -450,7 +441,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin Path: entrypoint, Args: args, //FIXME: de-duplicate from config Config: config, - hostConfig: &HostConfig{}, + hostConfig: &runconfig.HostConfig{}, Image: img.ID, // Always use the resolved image id NetworkSettings: &NetworkSettings{}, Name: name, @@ -527,7 +518,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *Config) (*Image, error) { +func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? // FIXME: this shouldn't be in commands. if err := container.Mount(); err != nil { @@ -539,6 +530,8 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a if err != nil { return nil, err } + defer rwTar.Close() + // Create a new image from the container's base layers + a new layer from container changes img, err := runtime.graph.Create(rwTar, container, comment, author, config) if err != nil { @@ -688,7 +681,7 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime return nil, err } - localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", VERSION)) + localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") @@ -710,19 +703,6 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime sysInfo := sysinfo.New(false) - /* - temporarilly disabled. - */ - if false { - var ed execdriver.Driver - if driver := os.Getenv("EXEC_DRIVER"); driver == "lxc" { - ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) - } else { - ed, err = chroot.NewDriver() - } - if ed != nil { - } - } ed, err := lxc.NewDriver(config.Root, sysInfo.AppArmor) if err != nil { return nil, err @@ -825,7 +805,11 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { if err != nil { return nil, err } - return EofReader(archive, func() { runtime.driver.Put(container.ID) }), nil + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + runtime.driver.Put(container.ID) + return err + }), nil } func (runtime *Runtime) Run(c *Container, startCallback execdriver.StartCallback) (int, error) { diff --git a/server.go b/server.go index 2942eaeb5b..0ed96fee31 100644 --- a/server.go +++ b/server.go @@ -6,9 +6,11 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -200,8 +202,20 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { } func (srv *Server) Auth(job *engine.Job) engine.Status { - authConfig := &auth.AuthConfig{} + var ( + err error + authConfig = &auth.AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + // TODO: this is only done here because auth and registry need to be merged into one pkg + if addr := authConfig.ServerAddress; addr != "" && addr != auth.IndexServerAddress() { + addr, err = registry.ExpandAndVerifyRegistryUrl(addr) + if err != nil { + return job.Error(err) + } + authConfig.ServerAddress = addr + } status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) if err != nil { return job.Error(err) @@ -278,6 +292,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { if err != nil { return job.Errorf("%s: %s", name, err) } + defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { @@ -347,6 +362,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } + defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) @@ -386,6 +402,7 @@ func (srv *Server) exportImage(image *Image, tempdir string) error { if err != nil { return err } + defer fs.Close() fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { @@ -422,14 +439,14 @@ func (srv *Server) Build(job *engine.Job) engine.Status { authConfig = &auth.AuthConfig{} configFile = &auth.ConfigFile{} tag string - context io.Reader + context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = utils.ParseRepositoryTag(repoName) if remoteURL == "" { - context = job.Stdin + context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL @@ -440,7 +457,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { } defer os.RemoveAll(root) - if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } @@ -459,12 +476,13 @@ func (srv *Server) Build(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - c, err := MkBuildContext(string(dockerFile), nil) + c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } + defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) b := NewBuildFile(srv, @@ -649,7 +667,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { } defer file.Body.Close() - config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) + config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) if err != nil { return job.Error(err) } @@ -815,7 +833,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.SetInt("NEventsListener", len(srv.events)) v.Set("KernelVersion", kernelVersion) v.Set("IndexServerAddress", auth.IndexServerAddress()) - v.Set("InitSha1", utils.INITSHA1) + v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) @@ -1030,7 +1048,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such container: %s", name) } - var config Config + var config runconfig.Config if err := job.GetenvJson("config", &config); err != nil { return job.Error(err) } @@ -1561,7 +1579,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) - archive io.Reader + archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { @@ -1587,7 +1605,9 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader } img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) if err != nil { @@ -1610,7 +1630,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } - config := ContainerConfigFromJob(job) + config := runconfig.ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { return job.Errorf("Minimum memory limit allowed is 512k") } @@ -1627,7 +1647,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { return job.Error(err) } if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v\n", defaultDns) + job.Errorf("WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns) config.Dns = defaultDns } @@ -1976,7 +1996,7 @@ func (srv *Server) canDeleteImage(imgID string) error { return nil } -func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) { +func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) { // Retrieve all images images, err := srv.runtime.graph.Map() @@ -2000,7 +2020,7 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) if err != nil { return nil, err } - if CompareConfig(&img.ContainerConfig, config) { + if runconfig.Compare(&img.ContainerConfig, config) { if match == nil || match.Created.Before(img.Created) { match = img } @@ -2009,7 +2029,7 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) return match, nil } -func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) error { +func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { runtime := srv.runtime if hostConfig != nil && hostConfig.Links != nil { @@ -2053,7 +2073,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { - hostConfig := ContainerHostConfigFromJob(job) + hostConfig := runconfig.ContainerHostConfigFromJob(job) // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. @@ -2297,7 +2317,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status { } object = &struct { *Container - HostConfig *HostConfig + HostConfig *runconfig.HostConfig }{container, container.hostConfig} default: return job.Errorf("Unknown kind: %s", kind) @@ -2327,6 +2347,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } + defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) diff --git a/sorter.go b/sorter.go index 9b3e1a9486..b49ac58c24 100644 --- a/sorter.go +++ b/sorter.go @@ -2,31 +2,6 @@ package docker import "sort" -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -func sortPorts(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - type containerSorter struct { containers []*Container by func(i, j *Container) bool diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go index dcf0eddf56..b02cf027aa 100644 --- a/sysinit/sysinit.go +++ b/sysinit/sysinit.go @@ -27,18 +27,12 @@ func setupEnv(args *execdriver.InitArgs) { func executeProgram(args *execdriver.InitArgs) error { setupEnv(args) + dockerInitFct, err := execdriver.GetInitFunc(args.Driver) if err != nil { panic(err) } return dockerInitFct(args) - - if args.Driver == "lxc" { - // Will never reach - } else if args.Driver == "chroot" { - } - - return nil } // Sys Init code diff --git a/tags_unit_test.go b/tags_unit_test.go index 1341b989fe..b6236280a8 100644 --- a/tags_unit_test.go +++ b/tags_unit_test.go @@ -31,6 +31,8 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img := &Image{ID: testImageID} + // FIXME: this fails on Darwin with: + // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied if err := graph.Register(nil, archive, img); err != nil { t.Fatal(err) } diff --git a/utils.go b/utils.go index e3ba08d51c..ef666b0de1 100644 --- a/utils.go +++ b/utils.go @@ -1,320 +1,33 @@ package docker import ( - "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/namesgenerator" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" - "io" - "strconv" - "strings" - "sync/atomic" ) type Change struct { archive.Change } -// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func CompareConfig(a, b *Config) bool { - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.Memory != b.Memory || - a.MemorySwap != b.MemorySwap || - a.CpuShares != b.CpuShares || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty || - a.VolumesFrom != b.VolumesFrom { - return false - } - if len(a.Cmd) != len(b.Cmd) || - len(a.Dns) != len(b.Dns) || - len(a.Env) != len(b.Env) || - len(a.PortSpecs) != len(b.PortSpecs) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Dns); i++ { - if a.Dns[i] != b.Dns[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for i := 0; i < len(a.PortSpecs); i++ { - if a.PortSpecs[i] != b.PortSpecs[i] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} - -func MergeConfig(userConf, imageConf *Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if userConf.Memory == 0 { - userConf.Memory = imageConf.Memory - } - if userConf.MemorySwap == 0 { - userConf.MemorySwap = imageConf.MemorySwap - } - if userConf.CpuShares == 0 { - userConf.CpuShares = imageConf.CpuShares - } - if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - ports, _, err := parsePortSpecs(userConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - userConf.PortSpecs = nil - } - if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { - utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(map[Port]struct{}) - } - - ports, _, err := parsePortSpecs(imageConf.PortSpecs) - if err != nil { - return err - } - for port := range ports { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - if !userConf.Tty { - userConf.Tty = imageConf.Tty - } - if !userConf.OpenStdin { - userConf.OpenStdin = imageConf.OpenStdin - } - if !userConf.StdinOnce { - userConf.StdinOnce = imageConf.StdinOnce - } - if userConf.Env == nil || len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if imageEnvKey == userEnvKey { - found = true - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - if userConf.Cmd == nil || len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - } - if userConf.Dns == nil || len(userConf.Dns) == 0 { - userConf.Dns = imageConf.Dns - } else { - //duplicates aren't an issue here - userConf.Dns = append(userConf.Dns, imageConf.Dns...) - } - if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { - userConf.Entrypoint = imageConf.Entrypoint - } - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if userConf.VolumesFrom == "" { - userConf.VolumesFrom = imageConf.VolumesFrom - } - if userConf.Volumes == nil || len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - return nil -} - -func parseLxcConfOpts(opts ListOpts) ([]KeyValuePair, error) { - out := make([]KeyValuePair, opts.Len()) - for i, o := range opts.GetAll() { - k, v, err := parseLxcOpt(o) - if err != nil { - return nil, err - } - out[i] = KeyValuePair{Key: k, Value: v} - } - return out, nil -} - -func parseLxcOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// FIXME: network related stuff (including parsing) should be grouped in network file -const ( - PortSpecTemplate = "ip:hostPort:containerPort" - PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" -) - -// We will receive port specs in the format of ip:public:private/proto and these need to be -// parsed in the internal types -func parsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - - for _, rawPort := range ports { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := utils.PartParser(PortSpecTemplate, rawPort) - if err != nil { - return nil, nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIp = parts["ip"] - hostPort = parts["hostPort"] - ) - - if containerPort == "" { - return nil, nil, fmt.Errorf("No port specified: %s", rawPort) - } - if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { - return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { - return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - - port := NewPort(proto, containerPort) - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - - binding := PortBinding{ - HostIp: rawIp, - HostPort: hostPort, - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, binding) - } - return exposedPorts, bindings, nil -} - -// Splits a port in the format of port/proto -func splitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if l == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - return parts[0], parts[1] -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -func migratePortMappings(config *Config, hostConfig *HostConfig) error { +func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { if config.PortSpecs != nil { - ports, bindings, err := parsePortSpecs(config.PortSpecs) + ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) if err != nil { return err } config.PortSpecs = nil if len(bindings) > 0 { if hostConfig == nil { - hostConfig = &HostConfig{} + hostConfig = &runconfig.HostConfig{} } hostConfig.PortBindings = bindings } if config.ExposedPorts == nil { - config.ExposedPorts = make(map[Port]struct{}, len(ports)) + config.ExposedPorts = make(nat.PortSet, len(ports)) } for k, v := range ports { config.ExposedPorts[k] = v @@ -341,28 +54,3 @@ func (c *checker) Exists(name string) bool { func generateRandomName(runtime *Runtime) (string, error) { return namesgenerator.GenerateRandomName(&checker{runtime}) } - -// Read an io.Reader and call a function when it returns EOF -func EofReader(r io.Reader, callback func()) *eofReader { - return &eofReader{ - Reader: r, - callback: callback, - } -} - -type eofReader struct { - io.Reader - gotEOF int32 - callback func() -} - -func (r *eofReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - if err == io.EOF { - // Use atomics to make the gotEOF check threadsafe - if atomic.CompareAndSwapInt32(&r.gotEOF, 0, 1) { - r.callback() - } - } - return -} diff --git a/utils/tarsum.go b/utils/tarsum.go index 786196b6b4..ddeecfb450 100644 --- a/utils/tarsum.go +++ b/utils/tarsum.go @@ -1,8 +1,8 @@ package utils import ( - "archive/tar" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "compress/gzip" "crypto/sha256" "encoding/hex" diff --git a/utils/utils.go b/utils/utils.go index 542ab49702..1aba80ff41 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/dotcloud/docker/dockerversion" "index/suffixarray" "io" "io/ioutil" @@ -23,12 +24,6 @@ import ( "time" ) -var ( - IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary - INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary - INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) -) - // A common interface to access the Fatal method of // both testing.B and testing.T. type Fataler interface { @@ -201,7 +196,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and if target == "" { return false } - if IAMSTATIC { + if dockerversion.IAMSTATIC { if selfPath == "" { return false } @@ -218,7 +213,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and } return os.SameFile(targetFileInfo, selfPathFileInfo) } - return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1 + return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 } // Figure out the path of our dockerinit (which may be SelfPath()) @@ -230,7 +225,7 @@ func DockerInitPath(localCopy string) string { } var possibleInits = []string{ localCopy, - INITPATH, + dockerversion.INITPATH, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." @@ -750,7 +745,7 @@ func GetNameserversAsCIDR(resolvConf []byte) []string { } // FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (string, error) { +func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( proto string host string @@ -758,6 +753,8 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s ) addr = strings.TrimSpace(addr) switch { + case addr == "tcp://": + return "", fmt.Errorf("Invalid bind address format: %s", addr) case strings.HasPrefix(addr, "unix://"): proto = "unix" addr = strings.TrimPrefix(addr, "unix://") @@ -793,12 +790,13 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { port = p } else { - port = defaultPort + return "", fmt.Errorf("Invalid bind address format: %s", addr) } + } else if proto == "tcp" && !strings.Contains(addr, ":") { + return "", fmt.Errorf("Invalid bind address format: %s", addr) } else { host = addr - port = defaultPort } if proto == "unix" { return fmt.Sprintf("%s://%s", proto, host), nil @@ -836,37 +834,6 @@ func ParseRepositoryTag(repos string) (string, string) { return repos, "" } -type User struct { - Uid string // user id - Gid string // primary group id - Username string - Name string - HomeDir string -} - -// UserLookup check if the given username or uid is present in /etc/passwd -// and returns the user struct. -// If the username is not found, an error is returned. -func UserLookup(uid string) (*User, error) { - file, err := ioutil.ReadFile("/etc/passwd") - if err != nil { - return nil, err - } - for _, line := range strings.Split(string(file), "\n") { - data := strings.Split(line, ":") - if len(data) > 5 && (data[0] == uid || data[2] == uid) { - return &User{ - Uid: data[2], - Gid: data[3], - Username: data[0], - Name: data[4], - HomeDir: data[5], - }, nil - } - } - return nil, fmt.Errorf("User not found in /etc/passwd") -} - // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string @@ -912,16 +879,6 @@ func ShellQuoteArguments(args []string) string { return buf.String() } -func IsClosedError(err error) bool { - /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. - * See: - * http://golang.org/src/pkg/net/net.go - * https://code.google.com/p/go/issues/detail?id=4337 - * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ - */ - return strings.HasSuffix(err.Error(), "use of closed network connection") -} - func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( diff --git a/utils/utils_test.go b/utils/utils_test.go index b0a5acb170..7e63a45cf7 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -301,34 +301,36 @@ func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" - defaultHttpPort = 4243 defaultUnix = "/var/run/docker.sock" ) - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" { - t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr) + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { + t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { + t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1"); err == nil { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } - if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1:4243"); err == nil { + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:4243"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } } diff --git a/utils_test.go b/utils_test.go index 4b8cfba39f..6917007575 100644 --- a/utils_test.go +++ b/utils_test.go @@ -1,8 +1,8 @@ package docker import ( - "archive/tar" "bytes" + "code.google.com/p/go/src/pkg/archive/tar" "io" ) diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go new file mode 100644 index 0000000000..e8b973c1fa --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go @@ -0,0 +1,304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeLink, TypeSymlink: + // hard link, symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go new file mode 100644 index 0000000000..351eaa0e6c --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" +) + +func Example() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new tar archive. + tw := tar.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling licence."}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + log.Fatalln(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + log.Fatalln(err) + } + } + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Fatalln(err) + } + + // Open the tar archive for reading. + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Contents of %s:\n", hdr.Name) + if _, err := io.Copy(os.Stdout, tr); err != nil { + log.Fatalln(err) + } + fmt.Println() + } + + // Output: + // Contents of readme.txt: + // This archive contains some text files. + // Contents of gopher.txt: + // Gopher names: + // George + // Geoffrey + // Gonzo + // Contents of todo.txt: + // Get animal handling licence. +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go new file mode 100644 index 0000000000..7cb6e649c7 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go @@ -0,0 +1,402 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + nb int64 // number of unread bytes for current file entry + pad int64 // amount of padding (ignored) after current file entry +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +func (tr *Reader) Next() (*Header, error) { + var hdr *Header + if tr.err == nil { + tr.skipUnread() + } + if tr.err != nil { + return hdr, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return hdr, tr.err + } + // Check for PAX/GNU header. + switch hdr.Typeflag { + case TypeXHeader: + // PAX extended header + headers, err := parsePAX(tr) + if err != nil { + return nil, err + } + // We actually read the whole file, + // but this skips alignment padding + tr.skipUnread() + hdr = tr.readHeader() + mergePAX(hdr, headers) + return hdr, nil + case TypeGNULongName: + // We have a GNU long name header. Its contents are the real file name. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + hdr, err := tr.Next() + hdr.Name = cString(realname) + return hdr, err + case TypeGNULongLink: + // We have a GNU long link header. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + hdr, err := tr.Next() + hdr.Linkname = cString(realname) + return hdr, err + } + return hdr, tr.err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(buf) > 0 { + // or the header was empty to start with. + var sp int + // The size field ends at the first space. + sp = bytes.IndexByte(buf, ' ') + if sp == -1 { + return nil, ErrHeader + } + // Parse the first token as a decimal integer. + n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) + if err != nil { + return nil, ErrHeader + } + // Extract everything between the decimal and the n -1 on the + // beginning to to eat the ' ', -1 on the end to skip the newline. + var record []byte + record, buf = buf[sp+1:n-1], buf[n:] + // The first equals is guaranteed to mark the end of the key. + // Everything else is value. + eq := bytes.IndexByte(record, '=') + if eq == -1 { + return nil, ErrHeader + } + key, value := record[:eq], record[eq+1:] + headers[string(key)] = string(value) + } + return headers, nil +} + +// cString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func cString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +func (tr *Reader) octal(b []byte) int64 { + // Check for binary format first. + if len(b) > 0 && b[0]&0x80 != 0 { + var x int64 + for i, c := range b { + if i == 0 { + c &= 0x7f // ignore signal bit in first byte + } + x = x<<8 | int64(c) + } + return x + } + + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, err := strconv.ParseUint(cString(b), 8, 64) + if err != nil { + tr.err = err + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. +func (tr *Reader) skipUnread() { + nr := tr.nb + tr.pad // number of bytes to skip + tr.nb, tr.pad = 0, 0 + if sr, ok := tr.r.(io.Seeker); ok { + if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { + return + } + } + _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + given := tr.octal(header[148:156]) + unsigned, signed := checksum(header) + return given == unsigned || given == signed +} + +func (tr *Reader) readHeader() *Header { + header := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + hdr := new(Header) + s := slicer(header) + + hdr.Name = cString(s.next(100)) + hdr.Mode = tr.octal(s.next(8)) + hdr.Uid = int(tr.octal(s.next(8))) + hdr.Gid = int(tr.octal(s.next(8))) + hdr.Size = tr.octal(s.next(12)) + hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = cString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch magic { + case "ustar\x0000": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = cString(s.next(32)) + hdr.Gname = cString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = tr.octal(devmajor) + hdr.Devminor = tr.octal(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = cString(s.next(155)) + case "star": + prefix = cString(s.next(131)) + hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) + hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if tr.err != nil { + tr.err = ErrHeader + return nil + } + + // Maximum value of hdr.Size is 64 GB (12 octal digits), + // so there's no risk of int64 overflowing. + tr.nb = int64(hdr.Size) + tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two + + return hdr +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.nb == 0 { + // file consumed + return 0, io.EOF + } + + if int64(len(b)) > tr.nb { + b = b[0:tr.nb] + } + n, err = tr.r.Read(b) + tr.nb -= int64(n) + + if err == io.EOF && tr.nb > 0 { + err = io.ErrUnexpectedEOF + } + tr.err = err + return +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go new file mode 100644 index 0000000000..f84dbebe98 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go @@ -0,0 +1,425 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type untarTest struct { + file string + headers []*Header + cksums []string +} + +var gnuTarTest = &untarTest{ + file: "testdata/gnu.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244428340, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244436044, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + }, + cksums: []string{ + "e38b27eaccb4391bdec553a7f3ae6b2f", + "c65bd2e50a56a2138bf1716f2fd56fe9", + }, +} + +var untarTests = []*untarTest{ + gnuTarTest, + { + file: "testdata/star.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + }, + }, + { + file: "testdata/v7.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + { + Name: "small2.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + }, + }, + { + file: "testdata/pax.tar", + headers: []*Header{ + { + Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + Mode: 0664, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 7, + ModTime: time.Unix(1350244992, 23960108), + ChangeTime: time.Unix(1350244992, 23960108), + AccessTime: time.Unix(1350244992, 23960108), + Typeflag: TypeReg, + }, + { + Name: "a/b", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 0, + ModTime: time.Unix(1350266320, 910238425), + ChangeTime: time.Unix(1350266320, 910238425), + AccessTime: time.Unix(1350266320, 910238425), + Typeflag: TypeSymlink, + Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + }, + }, + }, + { + file: "testdata/nil-uid.tar", // golang.org/issue/5290 + headers: []*Header{ + { + Name: "P1050238.JPG.log", + Mode: 0664, + Uid: 0, + Gid: 0, + Size: 14, + ModTime: time.Unix(1365454838, 0), + Typeflag: TypeReg, + Linkname: "", + Uname: "eyefi", + Gname: "eyefi", + Devmajor: 0, + Devminor: 0, + }, + }, + }, + { + file: "testdata/xattrs.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 5, + ModTime: time.Unix(1386065770, 448252320), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1389782956, 794414986), + Xattrs: map[string]string{ + "user.key": "value", + "user.key2": "value2", + // Interestingly, selinux encodes the terminating null inside the xattr + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + { + Name: "small2.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 11, + ModTime: time.Unix(1386065770, 449252304), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1386065770, 449252304), + Xattrs: map[string]string{ + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + }, + }, +} + +func TestReader(t *testing.T) { +testLoop: + for i, test := range untarTests { + f, err := os.Open(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + defer f.Close() + tr := NewReader(f) + for j, header := range test.headers { + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err) + f.Close() + continue testLoop + } + if !reflect.DeepEqual(*hdr, *header) { + t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v", + i, j, *hdr, *header) + } + } + hdr, err := tr.Next() + if err == io.EOF { + continue testLoop + } + if hdr != nil || err != nil { + t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err) + } + } +} + +func TestPartialRead(t *testing.T) { + f, err := os.Open("testdata/gnu.tar") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + // Read the first four bytes; Next() should skip the last byte. + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get first file: %v", err) + } + buf := make([]byte, 4) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } + + // Second file + hdr, err = tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get second file: %v", err) + } + buf = make([]byte, 6) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Google"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } +} + +func TestIncrementalRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read file contents in little chunks EOF, + // checksumming all the way + h := md5.New() + rdbuf := make([]uint8, 8) + for { + nr, err := tr.Read(rdbuf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + break + } + h.Write(rdbuf[0:nr]) + } + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +func TestNonSeekable(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + type readerOnly struct { + io.Reader + } + tr := NewReader(readerOnly{f}) + nread := 0 + + for ; ; nread++ { + _, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + } + + if nread != len(test.headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread) + } +} + +func TestParsePAXHeader(t *testing.T) { + paxTests := [][3]string{ + {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths + {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length + {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} + for _, test := range paxTests { + key, expected, raw := test[0], test[1], test[2] + reader := bytes.NewReader([]byte(raw)) + headers, err := parsePAX(reader) + if err != nil { + t.Errorf("Couldn't parse correctly formatted headers: %v", err) + continue + } + if strings.EqualFold(headers[key], expected) { + t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) + continue + } + trailer := make([]byte, 100) + n, err := reader.Read(trailer) + if err != io.EOF || n != 0 { + t.Error("Buffer wasn't consumed") + } + } + badHeader := bytes.NewReader([]byte("3 somelongkey=")) + if _, err := parsePAX(badHeader); err != ErrHeader { + t.Fatal("Unexpected success when parsing bad header") + } +} + +func TestParsePAXTime(t *testing.T) { + // Some valid PAX time values + timestamps := map[string]time.Time{ + "1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case + "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value + "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value + "1350244992": time.Unix(1350244992, 0), // Low precision value + } + for input, expected := range timestamps { + ts, err := parsePAXTime(input) + if err != nil { + t.Fatal(err) + } + if !ts.Equal(expected) { + t.Fatalf("Time parsing failure %s %s", ts, expected) + } + } +} + +func TestMergePAX(t *testing.T) { + hdr := new(Header) + // Test a string, integer, and time based value. + headers := map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + } + err := mergePAX(hdr, headers) + if err != nil { + t.Fatal(err) + } + want := &Header{ + Name: "a/b/c", + Uid: 1000, + ModTime: time.Unix(1350244992, 23960108), + } + if !reflect.DeepEqual(hdr, want) { + t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) + } +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go new file mode 100644 index 0000000000..cf9cc79c59 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go new file mode 100644 index 0000000000..6f17dbe307 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go new file mode 100644 index 0000000000..cb843db4cf --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go new file mode 100644 index 0000000000..ed333f3ea4 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go @@ -0,0 +1,284 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +func TestFileInfoHeader(t *testing.T) { + fi, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "small.txt"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(5); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } + // FileInfoHeader should error when passing nil FileInfo + if _, err := FileInfoHeader(nil, ""); err == nil { + t.Fatalf("Expected error when passing nil to FileInfoHeader") + } +} + +func TestFileInfoHeaderDir(t *testing.T) { + fi, err := os.Stat("testdata") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "testdata/"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + // Ignoring c_ISGID for golang.org/issue/4867 + if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(0); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } +} + +func TestFileInfoHeaderSymlink(t *testing.T) { + h, err := FileInfoHeader(symlink{}, "some-target") + if err != nil { + t.Fatal(err) + } + if g, e := h.Name, "some-symlink"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Linkname, "some-target"; g != e { + t.Errorf("Linkname = %q; want %q", g, e) + } +} + +type symlink struct{} + +func (symlink) Name() string { return "some-symlink" } +func (symlink) Size() int64 { return 0 } +func (symlink) Mode() os.FileMode { return os.ModeSymlink } +func (symlink) ModTime() time.Time { return time.Time{} } +func (symlink) IsDir() bool { return false } +func (symlink) Sys() interface{} { return nil } + +func TestRoundTrip(t *testing.T) { + data := []byte("some file contents") + + var b bytes.Buffer + tw := NewWriter(&b) + hdr := &Header{ + Name: "file.txt", + Uid: 1 << 21, // too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now(), + } + // tar only supports second precision. + hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("tw.WriteHeader: %v", err) + } + if _, err := tw.Write(data); err != nil { + t.Fatalf("tw.Write: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("tw.Close: %v", err) + } + + // Read it back. + tr := NewReader(&b) + rHdr, err := tr.Next() + if err != nil { + t.Fatalf("tr.Next: %v", err) + } + if !reflect.DeepEqual(rHdr, hdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + } + rData, err := ioutil.ReadAll(tr) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(rData, data) { + t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) + } +} + +type headerRoundTripTest struct { + h *Header + fm os.FileMode +} + +func TestHeaderRoundTrip(t *testing.T) { + golden := []headerRoundTripTest{ + // regular file. + { + h: &Header{ + Name: "test.txt", + Mode: 0644 | c_ISREG, + Size: 12, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeReg, + }, + fm: 0644, + }, + // hard link. + { + h: &Header{ + Name: "hard.txt", + Mode: 0644 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeLink, + }, + fm: 0644 | os.ModeSymlink, + }, + // symbolic link. + { + h: &Header{ + Name: "link.txt", + Mode: 0777 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600852, 0), + Typeflag: TypeSymlink, + }, + fm: 0777 | os.ModeSymlink, + }, + // character device node. + { + h: &Header{ + Name: "dev/null", + Mode: 0666 | c_ISCHR, + Size: 0, + ModTime: time.Unix(1360578951, 0), + Typeflag: TypeChar, + }, + fm: 0666 | os.ModeDevice | os.ModeCharDevice, + }, + // block device node. + { + h: &Header{ + Name: "dev/sda", + Mode: 0660 | c_ISBLK, + Size: 0, + ModTime: time.Unix(1360578954, 0), + Typeflag: TypeBlock, + }, + fm: 0660 | os.ModeDevice, + }, + // directory. + { + h: &Header{ + Name: "dir/", + Mode: 0755 | c_ISDIR, + Size: 0, + ModTime: time.Unix(1360601116, 0), + Typeflag: TypeDir, + }, + fm: 0755 | os.ModeDir, + }, + // fifo node. + { + h: &Header{ + Name: "dev/initctl", + Mode: 0600 | c_ISFIFO, + Size: 0, + ModTime: time.Unix(1360578949, 0), + Typeflag: TypeFifo, + }, + fm: 0600 | os.ModeNamedPipe, + }, + // setuid. + { + h: &Header{ + Name: "bin/su", + Mode: 0755 | c_ISREG | c_ISUID, + Size: 23232, + ModTime: time.Unix(1355405093, 0), + Typeflag: TypeReg, + }, + fm: 0755 | os.ModeSetuid, + }, + // setguid. + { + h: &Header{ + Name: "group.txt", + Mode: 0750 | c_ISREG | c_ISGID, + Size: 0, + ModTime: time.Unix(1360602346, 0), + Typeflag: TypeReg, + }, + fm: 0750 | os.ModeSetgid, + }, + // sticky. + { + h: &Header{ + Name: "sticky.txt", + Mode: 0600 | c_ISREG | c_ISVTX, + Size: 7, + ModTime: time.Unix(1360602540, 0), + Typeflag: TypeReg, + }, + fm: 0600 | os.ModeSticky, + }, + } + + for i, g := range golden { + fi := g.h.FileInfo() + h2, err := FileInfoHeader(fi, "") + if err != nil { + t.Error(err) + continue + } + if strings.Contains(fi.Name(), "/") { + t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) + } + name := path.Base(g.h.Name) + if fi.IsDir() { + name += "/" + } + if got, want := h2.Name, name; got != want { + t.Errorf("i=%d: Name: got %v, want %v", i, got, want) + } + if got, want := h2.Size, g.h.Size; got != want { + t.Errorf("i=%d: Size: got %v, want %v", i, got, want) + } + if got, want := h2.Mode, g.h.Mode; got != want { + t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) + } + if got, want := fi.Mode(), g.fm; got != want { + t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) + } + if got, want := h2.ModTime, g.h.ModTime; got != want { + t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) + } + if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { + t.Errorf("i=%d: Sys didn't return original *Header", i) + } + } +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar new file mode 100644 index 0000000000..fc899dc8dc Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar new file mode 100644 index 0000000000..cc9cfaa33c Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar new file mode 100644 index 0000000000..9bc24b6587 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt new file mode 100644 index 0000000000..b249bfc518 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt @@ -0,0 +1 @@ +Kilts \ No newline at end of file diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt new file mode 100644 index 0000000000..394ee3ecd0 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt @@ -0,0 +1 @@ +Google.com diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar new file mode 100644 index 0000000000..59e2d4e604 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar new file mode 100644 index 0000000000..29679d9a30 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar new file mode 100644 index 0000000000..eb65fc9410 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar new file mode 100644 index 0000000000..753e883ceb Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar new file mode 100644 index 0000000000..e6d816ad07 Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar new file mode 100644 index 0000000000..9701950edd Binary files /dev/null and b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar differ diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go new file mode 100644 index 0000000000..9ee9499297 --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go @@ -0,0 +1,383 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errNameTooLong = errors.New("archive/tar: name too long") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +// If the value is too long for the field and allowPax is true add a paxheader record instead +func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + if len(s) > len(b) { + if tw.err == nil { + tw.err = ErrFieldTooLong + } + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (tw *Writer) octal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + tw.cString(b, s, false, paxNone, nil) +} + +// Write x into b, either as octal or as binary (GNUtar/star extension). +// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead +func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + tw.octal(b, x) + return + } + + // If it is too long for octal, and pax is preferred, use a pax header + if allowPax && tw.preferPax { + tw.octal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + // Too big: use binary (big-endian). + tw.usedBinary = true + for i := len(b) - 1; x > 0 && i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // highest bit indicates binary format +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + header := make([]byte, blockSize) + s := slicer(header) + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + tw.octal(s.next(8), hdr.Mode) // 100:108 + tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 + tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 + tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 + tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 + tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 + tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 + tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + suffix := hdr.Name + prefix := "" + if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { + var err error + prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) + if err == nil { + // ok we can use a ustar long name instead of pax, now correct the fields + + // remove the path field from the pax header. this will suppress the pax header + delete(paxHeaders, paxPath) + + // update the path fields + tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) + tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) + + // Use the ustar magic if we used ustar long names. + if len(prefix) > 0 { + copy(header[257:265], []byte("ustar\000")) + } + } + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + tw.octal(header[148:155], chksum) + header[155] = ' ' + + if tw.err != nil { + // problem with header; probably integer too big for a field. + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +// writeUSTARLongName splits a USTAR long name hdr.Name. +// name must be < 256 characters. errNameTooLong is returned +// if hdr.Name can't be split. The splitting heuristic +// is compatible with gnu tar. +func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { + length := len(name) + if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + i := strings.LastIndex(name[:length], "/") + // nlen contains the resulting length in the name field. + // plen contains the resulting length in the prefix field. + nlen := len(name) - i - 1 + plen := i + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + err = errNameTooLong + return + } + prefix, suffix = name[:i], name[i+1:] + return +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. + pid := os.Getpid() + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, + fmt.Sprintf("PaxHeaders.%d", pid), file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + for k, v := range paxHeaders { + fmt.Fprint(&buf, paxHeader(k+"="+v)) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// paxHeader formats a single pax record, prefixing it with the appropriate length +func paxHeader(msg string) string { + const padding = 2 // Extra padding for space and newline + size := len(msg) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s\n", size, msg) + if len(record) != size { + // Final adjustment if adding size increased + // the number of digits in size + size = len(record) + record = fmt.Sprintf("%d %s\n", size, msg) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteTooLong + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go new file mode 100644 index 0000000000..2b9ea658db --- /dev/null +++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go @@ -0,0 +1,433 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + "testing/iotest" + "time" +) + +type writerTestEntry struct { + header *Header + contents string +} + +type writerTest struct { + file string // filename of expected output + entries []*writerTestEntry +} + +var writerTests = []*writerTest{ + // The writer test file was produced with this command: + // tar (GNU tar) 1.26 + // ln -s small.txt link.txt + // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt + { + file: "testdata/writer.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1246508266, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Kilts", + }, + { + header: &Header{ + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1245217492, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Google.com\n", + }, + { + header: &Header{ + Name: "link.txt", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Size: 0, + ModTime: time.Unix(1314603082, 0), + Typeflag: '2', + Linkname: "small.txt", + Uname: "strings", + Gname: "strings", + }, + // no contents + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt + // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar + { + file: "testdata/writer-big.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "tmp/16gig.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 16 << 30, + ModTime: time.Unix(1254699560, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // This file was produced using gnu tar 1.17 + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + { + file: "testdata/ustar.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "file.txt", + Mode: 0644, + Uid: 0765, + Gid: 024, + Size: 06, + ModTime: time.Unix(1360135598, 0), + Typeflag: '0', + Uname: "shane", + Gname: "staff", + }, + contents: "hello\n", + }, + }, + }, +} + +// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. +func bytestr(offset int, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("%04x ", offset) + for _, ch := range b { + switch { + case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': + s += fmt.Sprintf(" %c", ch) + default: + s += fmt.Sprintf(" %02x", ch) + } + } + return s +} + +// Render a pseudo-diff between two blocks of bytes. +func bytediff(a []byte, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) + for offset := 0; len(a)+len(b) > 0; offset += rowLen { + na, nb := rowLen, rowLen + if na > len(a) { + na = len(a) + } + if nb > len(b) { + nb = len(b) + } + sa := bytestr(offset, a[0:na]) + sb := bytestr(offset, b[0:nb]) + if sa != sb { + s += fmt.Sprintf("-%v\n+%v\n", sa, sb) + } + a = a[na:] + b = b[nb:] + } + return s +} + +func TestWriter(t *testing.T) { +testLoop: + for i, test := range writerTests { + expected, err := ioutil.ReadFile(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB + big := false + for j, entry := range test.entries { + big = big || entry.header.Size > 1<<10 + if err := tw.WriteHeader(entry.header); err != nil { + t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) + continue testLoop + } + if _, err := io.WriteString(tw, entry.contents); err != nil { + t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) + continue testLoop + } + } + // Only interested in Close failures for the small tests. + if err := tw.Close(); err != nil && !big { + t.Errorf("test %d: Failed closing archive: %v", i, err) + continue testLoop + } + + actual := buf.Bytes() + if !bytes.Equal(expected, actual) { + t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", + i, bytediff(expected, actual)) + } + if testing.Short() { // The second test is expensive. + break + } + } +} + +func TestPax(t *testing.T) { + // Create an archive with a large name + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + // Force a PAX long name to be written + longName := strings.Repeat("ab", 100) + contents := strings.Repeat(" ", int(hdr.Size)) + hdr.Name = longName + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long file name") + } +} + +func TestPaxSymlink(t *testing.T) { + // Create an archive with a large linkname + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeSymlink + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long linkname to be written + longLinkname := strings.Repeat("1234567890/1234567890", 10) + hdr.Linkname = longLinkname + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Linkname != longLinkname { + t.Fatal("Couldn't recover long link name") + } +} + +func TestPaxNonAscii(t *testing.T) { + // Create an archive with non ascii. These should trigger a pax header + // because pax headers have a defined utf-8 encoding. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + + // some sample data + chineseFilename := "文件名" + chineseGroupname := "組" + chineseUsername := "用戶名" + + hdr.Name = chineseFilename + hdr.Gname = chineseGroupname + hdr.Uname = chineseUsername + + contents := strings.Repeat(" ", int(hdr.Size)) + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != chineseFilename { + t.Fatal("Couldn't recover unicode name") + } + if hdr.Gname != chineseGroupname { + t.Fatal("Couldn't recover unicode group") + } + if hdr.Uname != chineseUsername { + t.Fatal("Couldn't recover unicode user") + } +} + +func TestPaxXattrs(t *testing.T) { + xattrs := map[string]string{ + "user.key": "value", + } + + // Create an archive with an xattr + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := "Kilts" + hdr.Xattrs = xattrs + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get the xattrs back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(hdr.Xattrs, xattrs) { + t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", + hdr.Xattrs, xattrs) + } +} + +func TestPAXHeader(t *testing.T) { + medName := strings.Repeat("CD", 50) + longName := strings.Repeat("AB", 100) + paxTests := [][2]string{ + {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"}, + {"a=b", "6 a=b\n"}, // Single digit length + {"a=names", "11 a=names\n"}, // Test case involving carries + {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)}, + {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}} + + for _, test := range paxTests { + key, expected := test[0], test[1] + if result := paxHeader(key); result != expected { + t.Fatalf("paxHeader: got %s, expected %s", result, expected) + } + } +} + +func TestUSTARLongName(t *testing.T) { + // Create an archive with a path that failed to split with USTAR extension in previous versions. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeDir + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long name to be written. The name was taken from a practical example + // that fails and replaced ever char through numbers to anonymize the sample. + longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" + hdr.Name = longName + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long name") + } +} diff --git a/version.go b/version.go index a4288245f7..88298a16cb 100644 --- a/version.go +++ b/version.go @@ -1,6 +1,7 @@ package docker import ( + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "runtime" @@ -22,8 +23,8 @@ func jobVersion(job *engine.Job) engine.Status { // environment. func dockerVersion() *engine.Env { v := &engine.Env{} - v.Set("Version", VERSION) - v.Set("GitCommit", GITCOMMIT) + v.Set("Version", dockerversion.VERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) diff --git a/volumes.go b/volumes.go new file mode 100644 index 0000000000..a53ef722e7 --- /dev/null +++ b/volumes.go @@ -0,0 +1,332 @@ +package docker + +import ( + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" +) + +type BindMap struct { + SrcPath string + DstPath string + Mode string +} + +func prepareVolumesForContainer(container *Container) error { + if container.Volumes == nil || len(container.Volumes) == 0 { + container.Volumes = make(map[string]string) + container.VolumesRW = make(map[string]bool) + } + + if err := applyVolumesFrom(container); err != nil { + return err + } + if err := createVolumes(container); err != nil { + return err + } + return nil +} + +func mountVolumesForContainer(container *Container, envPath string) error { + // Setup the root fs as a bind mount of the base fs + var ( + root = container.RootfsPath() + runtime = container.runtime + ) + if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { + return nil + } + + // Create a bind mount of the base fs as a place where we can add mounts + // without affecting the ability to access the base fs + if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { + return err + } + + // Make sure the root fs is private so the mounts here don't propagate to basefs + if err := mount.ForceMount(root, root, "none", "private"); err != nil { + return err + } + + // Mount docker specific files into the containers root fs + if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { + return err + } + + if container.HostnamePath != "" && container.HostsPath != "" { + if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { + return err + } + } + + // Mount user specified volumes + for r, v := range container.Volumes { + mountAs := "ro" + if container.VolumesRW[r] { + mountAs = "rw" + } + + r = filepath.Join(root, r) + if p, err := utils.FollowSymlinkInScope(r, root); err != nil { + return err + } else { + r = p + } + + if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { + return err + } + } + return nil +} + +func unmountVolumesForContainer(container *Container) { + var ( + root = container.RootfsPath() + mounts = []string{ + root, + filepath.Join(root, "/.dockerinit"), + filepath.Join(root, "/.dockerenv"), + filepath.Join(root, "/etc/resolv.conf"), + } + ) + + if container.HostnamePath != "" && container.HostsPath != "" { + mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) + } + + for r := range container.Volumes { + mounts = append(mounts, filepath.Join(root, r)) + } + + for i := len(mounts) - 1; i >= 0; i-- { + if lastError := mount.Unmount(mounts[i]); lastError != nil { + log.Printf("Failed to umount %v: %v", mounts[i], lastError) + } + } +} + +func applyVolumesFrom(container *Container) error { + if container.Config.VolumesFrom != "" { + for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { + var ( + mountRW = true + specParts = strings.SplitN(containerSpec, ":", 2) + ) + + switch len(specParts) { + case 0: + return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) + case 2: + switch specParts[1] { + case "ro": + mountRW = false + case "rw": // mountRW is already true + default: + return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) + } + } + + c := container.runtime.Get(specParts[0]) + if c == nil { + return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) + } + + for volPath, id := range c.Volumes { + if _, exists := container.Volumes[volPath]; exists { + continue + } + if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { + return err + } + container.Volumes[volPath] = id + if isRW, exists := c.VolumesRW[volPath]; exists { + container.VolumesRW[volPath] = isRW && mountRW + } + } + + } + } + return nil +} + +func getBindMap(container *Container) (map[string]BindMap, error) { + var ( + // Create the requested bind mounts + binds = make(map[string]BindMap) + // Define illegal container destinations + illegalDsts = []string{"/", "."} + ) + + for _, bind := range container.hostConfig.Binds { + // FIXME: factorize bind parsing in parseBind + var ( + src, dst, mode string + arr = strings.Split(bind, ":") + ) + + if len(arr) == 2 { + src = arr[0] + dst = arr[1] + mode = "rw" + } else if len(arr) == 3 { + src = arr[0] + dst = arr[1] + mode = arr[2] + } else { + return nil, fmt.Errorf("Invalid bind specification: %s", bind) + } + + // Bail if trying to mount to an illegal destination + for _, illegal := range illegalDsts { + if dst == illegal { + return nil, fmt.Errorf("Illegal bind destination: %s", dst) + } + } + + bindMap := BindMap{ + SrcPath: src, + DstPath: dst, + Mode: mode, + } + binds[filepath.Clean(dst)] = bindMap + } + return binds, nil +} + +func createVolumes(container *Container) error { + binds, err := getBindMap(container) + if err != nil { + return err + } + + volumesDriver := container.runtime.volumes.driver + // Create the requested volumes if they don't exist + for volPath := range container.Config.Volumes { + volPath = filepath.Clean(volPath) + volIsDir := true + // Skip existing volumes + if _, exists := container.Volumes[volPath]; exists { + continue + } + var srcPath string + var isBindMount bool + srcRW := false + // If an external bind is defined for this volume, use that as a source + if bindMap, exists := binds[volPath]; exists { + isBindMount = true + srcPath = bindMap.SrcPath + if strings.ToLower(bindMap.Mode) == "rw" { + srcRW = true + } + if stat, err := os.Stat(bindMap.SrcPath); err != nil { + return err + } else { + volIsDir = stat.IsDir() + } + // Otherwise create an directory in $ROOT/volumes/ and use that + } else { + + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) + if err != nil { + return err + } + srcPath, err = volumesDriver.Get(c.ID) + if err != nil { + return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + srcRW = true // RW by default + } + + if p, err := filepath.EvalSymlinks(srcPath); err != nil { + return err + } else { + srcPath = p + } + + container.Volumes[volPath] = srcPath + container.VolumesRW[volPath] = srcRW + + // Create the mountpoint + volPath = filepath.Join(container.basefs, volPath) + rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) + if err != nil { + return err + } + + if _, err := os.Stat(rootVolPath); err != nil { + if os.IsNotExist(err) { + if volIsDir { + if err := os.MkdirAll(rootVolPath, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { + return err + } + if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { + return err + } else { + f.Close() + } + } + } + } + + // Do not copy or change permissions if we are mounting from the host + if srcRW && !isBindMount { + volList, err := ioutil.ReadDir(rootVolPath) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(srcPath) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { + return err + } + + var stat syscall.Stat_t + if err := syscall.Stat(rootVolPath, &stat); err != nil { + return err + } + var srcStat syscall.Stat_t + if err := syscall.Stat(srcPath, &srcStat); err != nil { + return err + } + // Change the source volume's ownership if it differs from the root + // files that were just copied + if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { + if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + } + } + } + } + return nil +}