commit
4c52272a2d
302 changed files with 9877 additions and 4804 deletions
2
.dockerignore
Normal file
2
.dockerignore
Normal file
|
@ -0,0 +1,2 @@
|
|||
bundles
|
||||
.gopath
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -26,3 +26,4 @@ Vagrantfile
|
|||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
docs/GITCOMMIT
|
||||
|
|
24
CHANGELOG.md
24
CHANGELOG.md
|
@ -1,5 +1,29 @@
|
|||
# Changelog
|
||||
|
||||
## 1.1.0 (2014-07-03)
|
||||
|
||||
#### Notable features since 1.0.1
|
||||
+ Add `.dockerignore` support
|
||||
+ Pause containers during `docker commit`
|
||||
+ Add `--tail` to `docker logs`
|
||||
|
||||
#### Builder
|
||||
+ Allow a tar file as context for `docker build`
|
||||
* Fix issue with white-spaces and multi-lines in `Dockerfiles`
|
||||
|
||||
#### Runtime
|
||||
* Overall performance improvements
|
||||
* Allow `/` as source of `docker run -v`
|
||||
* Fix port allocation
|
||||
* Fix bug in `docker save`
|
||||
* Add links information to `docker inspect`
|
||||
|
||||
#### Client
|
||||
* Improve command line parsing for `docker commit`
|
||||
|
||||
#### Remote API
|
||||
* Improve status code for the `start` and `stop` endpoints
|
||||
|
||||
## 1.0.1 (2014-06-19)
|
||||
|
||||
#### Notable features since 1.0.0
|
||||
|
|
|
@ -9,7 +9,7 @@ feels wrong or incomplete.
|
|||
When reporting [issues](https://github.com/dotcloud/docker/issues)
|
||||
on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
|
||||
the output of `uname -a` and the output of `docker version` along with
|
||||
the output of `docker info`. Please include the steps required to reproduce
|
||||
the output of `docker -D info`. Please include the steps required to reproduce
|
||||
the problem if possible and applicable.
|
||||
This information will help us review and fix your issue faster.
|
||||
|
||||
|
@ -17,7 +17,7 @@ This information will help us review and fix your issue faster.
|
|||
|
||||
For instructions on setting up your development environment, please
|
||||
see our dedicated [dev environment setup
|
||||
docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
|
||||
docs](http://docs.docker.com/contributing/devenvironment/).
|
||||
|
||||
## Contribution guidelines
|
||||
|
||||
|
@ -190,7 +190,7 @@ There are several exceptions to the signing requirement. Currently these are:
|
|||
* Your patch fixes Markdown formatting or syntax errors in the
|
||||
documentation contained in the `docs` directory.
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
|
|
|
@ -6,3 +6,4 @@ Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
|||
AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
.dockerignore: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -6,6 +6,7 @@ BINDDIR := bundles
|
|||
DOCSPORT := 8000
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
|
||||
|
@ -59,6 +60,7 @@ docs-build:
|
|||
cp ./VERSION docs/VERSION
|
||||
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
|
||||
echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
|
||||
echo "$(GITCOMMIT)" > docs/GITCOMMIT
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
|
||||
bundles:
|
||||
|
|
|
@ -160,7 +160,7 @@ Docker can be used to run short-lived commands, long-running daemons
|
|||
(app servers, databases etc.), interactive shell sessions, etc.
|
||||
|
||||
You can find a [list of real-world
|
||||
examples](http://docs.docker.io/en/latest/examples/) in the
|
||||
examples](http://docs.docker.com/examples/) in the
|
||||
documentation.
|
||||
|
||||
Under the hood
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
1.0.1
|
||||
1.1.0
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -36,6 +37,10 @@ import (
|
|||
"github.com/dotcloud/docker/utils/filters"
|
||||
)
|
||||
|
||||
const (
|
||||
tarHeaderSize = 512
|
||||
)
|
||||
|
||||
func (cli *DockerCli) CmdHelp(args ...string) error {
|
||||
if len(args) > 0 {
|
||||
method, exists := cli.getMethod(args[0])
|
||||
|
@ -51,7 +56,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
|
|||
{"attach", "Attach to a running container"},
|
||||
{"build", "Build an image from a Dockerfile"},
|
||||
{"commit", "Create a new image from a container's changes"},
|
||||
{"cp", "Copy files/folders from the containers filesystem to the host path"},
|
||||
{"cp", "Copy files/folders from a container's filesystem to the host path"},
|
||||
{"diff", "Inspect changes on a container's filesystem"},
|
||||
{"events", "Get real time events from the server"},
|
||||
{"export", "Stream the contents of a container as a tar archive"},
|
||||
|
@ -62,25 +67,25 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
|
|||
{"inspect", "Return low-level information on a container"},
|
||||
{"kill", "Kill a running container"},
|
||||
{"load", "Load an image from a tar archive"},
|
||||
{"login", "Register or Login to the docker registry server"},
|
||||
{"login", "Register or log in to the Docker registry server"},
|
||||
{"logs", "Fetch the logs of a container"},
|
||||
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
|
||||
{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
|
||||
{"pause", "Pause all processes within a container"},
|
||||
{"ps", "List containers"},
|
||||
{"pull", "Pull an image or a repository from the docker registry server"},
|
||||
{"push", "Push an image or a repository to the docker registry server"},
|
||||
{"pull", "Pull an image or a repository from a Docker registry server"},
|
||||
{"push", "Push an image or a repository to a Docker registry server"},
|
||||
{"restart", "Restart a running container"},
|
||||
{"rm", "Remove one or more containers"},
|
||||
{"rmi", "Remove one or more images"},
|
||||
{"run", "Run a command in a new container"},
|
||||
{"save", "Save an image to a tar archive"},
|
||||
{"search", "Search for an image in the docker index"},
|
||||
{"search", "Search for an image on the Docker Hub"},
|
||||
{"start", "Start a stopped container"},
|
||||
{"stop", "Stop a running container"},
|
||||
{"tag", "Tag an image into a repository"},
|
||||
{"top", "Lookup the running processes of a container"},
|
||||
{"unpause", "Unpause a paused container"},
|
||||
{"version", "Show the docker version information"},
|
||||
{"version", "Show the Docker version information"},
|
||||
{"wait", "Block until a container stops, then print its exit code"},
|
||||
} {
|
||||
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
|
||||
|
@ -113,13 +118,22 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
_, err = exec.LookPath("git")
|
||||
hasGit := err == nil
|
||||
if cmd.Arg(0) == "-" {
|
||||
// As a special case, 'docker build -' will build from an empty context with the
|
||||
// contents of stdin as a Dockerfile
|
||||
dockerfile, err := ioutil.ReadAll(cli.in)
|
||||
if err != nil {
|
||||
return err
|
||||
// As a special case, 'docker build -' will build from either an empty context with the
|
||||
// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
|
||||
buf := bufio.NewReader(cli.in)
|
||||
magic, err := buf.Peek(tarHeaderSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
if !archive.IsArchive(magic) {
|
||||
dockerfile, err := ioutil.ReadAll(buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
|
||||
}
|
||||
context, err = archive.Generate("Dockerfile", string(dockerfile))
|
||||
} else {
|
||||
context = ioutil.NopCloser(buf)
|
||||
}
|
||||
context, err = archive.Generate("Dockerfile", string(dockerfile))
|
||||
} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
|
||||
isRemote = true
|
||||
} else {
|
||||
|
@ -150,7 +164,25 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
if err = utils.ValidateContextDirectory(root); err != nil {
|
||||
return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
|
||||
}
|
||||
context, err = archive.Tar(root, archive.Uncompressed)
|
||||
options := &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
}
|
||||
if ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Error reading .dockerignore: '%s'", err)
|
||||
} else if err == nil {
|
||||
for _, pattern := range strings.Split(string(ignore), "\n") {
|
||||
ok, err := filepath.Match(pattern, "Dockerfile")
|
||||
if err != nil {
|
||||
utils.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
|
||||
}
|
||||
options.Excludes = append(options.Excludes, pattern)
|
||||
}
|
||||
}
|
||||
context, err = archive.TarWithOptions(root, options)
|
||||
}
|
||||
var body io.Reader
|
||||
// Setup an upload progress bar
|
||||
|
@ -216,7 +248,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
|
||||
// 'docker login': login / register a user to registry service.
|
||||
func (cli *DockerCli) CmdLogin(args ...string) error {
|
||||
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
|
||||
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
|
||||
|
||||
var username, password, email string
|
||||
|
||||
|
@ -342,7 +374,7 @@ func (cli *DockerCli) CmdWait(args ...string) error {
|
|||
|
||||
// 'docker version': show version information
|
||||
func (cli *DockerCli) CmdVersion(args ...string) error {
|
||||
cmd := cli.Subcmd("version", "", "Show the docker version information.")
|
||||
cmd := cli.Subcmd("version", "", "Show the Docker version information.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -439,6 +471,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
|||
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
|
||||
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
|
||||
}
|
||||
if len(remoteInfo.GetList("Sockets")) != 0 {
|
||||
fmt.Fprintf(cli.out, "Sockets: %v\n", remoteInfo.GetList("Sockets"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
|
||||
|
@ -462,8 +497,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdStop(args ...string) error {
|
||||
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
|
||||
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -490,7 +525,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdRestart(args ...string) error {
|
||||
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -547,8 +582,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
|
|||
tty bool
|
||||
|
||||
cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
|
||||
attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
|
||||
openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
|
||||
attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process")
|
||||
openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
|
||||
)
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -679,7 +714,7 @@ func (cli *DockerCli) CmdPause(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdInspect(args ...string) error {
|
||||
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
|
||||
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image")
|
||||
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
|
@ -759,7 +794,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdTop(args ...string) error {
|
||||
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
|
||||
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -794,7 +829,7 @@ func (cli *DockerCli) CmdTop(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdPort(args ...string) error {
|
||||
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
|
||||
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -842,7 +877,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
|
|||
func (cli *DockerCli) CmdRmi(args ...string) error {
|
||||
var (
|
||||
cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
|
||||
force = cmd.Bool([]string{"f", "-force"}, false, "Force")
|
||||
force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
|
||||
noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
|
||||
)
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -945,7 +980,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdRm(args ...string) error {
|
||||
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
|
||||
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container")
|
||||
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
|
||||
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
|
||||
force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container")
|
||||
|
||||
|
@ -982,7 +1017,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
|
|||
|
||||
// 'docker kill NAME' kills a running container
|
||||
func (cli *DockerCli) CmdKill(args ...string) error {
|
||||
cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)")
|
||||
cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal")
|
||||
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -1114,7 +1149,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdPull(args ...string) error {
|
||||
cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry")
|
||||
tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository")
|
||||
tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in a repository")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1503,25 +1538,21 @@ func (cli *DockerCli) CmdPs(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdCommit(args ...string) error {
|
||||
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
|
||||
flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
|
||||
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
|
||||
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
|
||||
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
|
||||
// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
|
||||
flConfig := cmd.String([]string{"#run", "#-run"}, "", "this option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
|
||||
flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var name, repository, tag string
|
||||
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
|
||||
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
|
||||
} else {
|
||||
name = cmd.Arg(0)
|
||||
var (
|
||||
name = cmd.Arg(0)
|
||||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
)
|
||||
|
||||
if name == "" {
|
||||
if name == "" || len(cmd.Args()) > 2 {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
|
@ -1539,6 +1570,11 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
|
|||
v.Set("tag", tag)
|
||||
v.Set("comment", *flComment)
|
||||
v.Set("author", *flAuthor)
|
||||
|
||||
if *flPause != true {
|
||||
v.Set("pause", "0")
|
||||
}
|
||||
|
||||
var (
|
||||
config *runconfig.Config
|
||||
env engine.Env
|
||||
|
@ -1657,6 +1693,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
|
||||
follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
|
||||
times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
|
||||
tail = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)")
|
||||
)
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -1690,6 +1727,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
if *follow {
|
||||
v.Set("follow", "1")
|
||||
}
|
||||
v.Set("tail", *tail)
|
||||
|
||||
return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil)
|
||||
}
|
||||
|
@ -1697,8 +1735,8 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
func (cli *DockerCli) CmdAttach(args ...string) error {
|
||||
var (
|
||||
cmd = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
|
||||
noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
|
||||
proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
|
||||
noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
|
||||
proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.")
|
||||
)
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
@ -1769,11 +1807,11 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdSearch(args ...string) error {
|
||||
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
|
||||
cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images")
|
||||
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
|
||||
trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
|
||||
automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
|
||||
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
|
||||
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1829,21 +1867,15 @@ func (cli *DockerCli) CmdTag(args ...string) error {
|
|||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
if cmd.NArg() != 2 && cmd.NArg() != 3 {
|
||||
if cmd.NArg() != 2 {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
|
||||
var repository, tag string
|
||||
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n")
|
||||
repository, tag = cmd.Arg(1), cmd.Arg(2)
|
||||
} else {
|
||||
var (
|
||||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v = url.Values{}
|
||||
)
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
|
||||
|
@ -1906,7 +1938,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
}
|
||||
if cidFileInfo.Size() == 0 {
|
||||
if err := os.Remove(hostConfig.ContainerIDFile); err != nil {
|
||||
fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err)
|
||||
fmt.Printf("failed to remove Container ID file '%s': %s \n", hostConfig.ContainerIDFile, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -2156,7 +2188,7 @@ func (cli *DockerCli) CmdCp(args ...string) error {
|
|||
}
|
||||
|
||||
func (cli *DockerCli) CmdSave(args ...string) error {
|
||||
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)")
|
||||
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to STDOUT by default)")
|
||||
outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
APIVERSION version.Version = "1.12"
|
||||
APIVERSION version.Version = "1.13"
|
||||
DEFAULTHTTPHOST = "127.0.0.1"
|
||||
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
)
|
||||
|
|
2
api/server/MAINTAINERS
Normal file
2
api/server/MAINTAINERS
Normal file
|
@ -0,0 +1,2 @@
|
|||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
Johan Euphrosine <proppy@google.com> (@proppy)
|
|
@ -370,13 +370,24 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
|
|||
}
|
||||
|
||||
var (
|
||||
job = eng.Job("container_inspect", vars["name"])
|
||||
c, err = job.Stdout.AddEnv()
|
||||
inspectJob = eng.Job("container_inspect", vars["name"])
|
||||
logsJob = eng.Job("logs", vars["name"])
|
||||
c, err = inspectJob.Stdout.AddEnv()
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = job.Run(); err != nil {
|
||||
logsJob.Setenv("follow", r.Form.Get("follow"))
|
||||
logsJob.Setenv("tail", r.Form.Get("tail"))
|
||||
logsJob.Setenv("stdout", r.Form.Get("stdout"))
|
||||
logsJob.Setenv("stderr", r.Form.Get("stderr"))
|
||||
logsJob.Setenv("timestamps", r.Form.Get("timestamps"))
|
||||
// Validate args here, because we can't return not StatusOK after job.Run() call
|
||||
stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr")
|
||||
if !(stdout || stderr) {
|
||||
return fmt.Errorf("Bad parameters: you must choose at least one stream")
|
||||
}
|
||||
if err = inspectJob.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -390,14 +401,9 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
|
|||
errStream = outStream
|
||||
}
|
||||
|
||||
job = eng.Job("logs", vars["name"])
|
||||
job.Setenv("follow", r.Form.Get("follow"))
|
||||
job.Setenv("stdout", r.Form.Get("stdout"))
|
||||
job.Setenv("stderr", r.Form.Get("stderr"))
|
||||
job.Setenv("timestamps", r.Form.Get("timestamps"))
|
||||
job.Stdout.Add(outStream)
|
||||
job.Stderr.Set(errStream)
|
||||
if err := job.Run(); err != nil {
|
||||
logsJob.Stdout.Add(outStream)
|
||||
logsJob.Stderr.Set(errStream)
|
||||
if err := logsJob.Run(); err != nil {
|
||||
fmt.Fprintf(outStream, "Error running logs job: %s\n", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -434,6 +440,12 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
|
|||
utils.Errorf("%s", err)
|
||||
}
|
||||
|
||||
if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
|
||||
job.Setenv("pause", "1")
|
||||
} else {
|
||||
job.Setenv("pause", r.FormValue("pause"))
|
||||
}
|
||||
|
||||
job.Setenv("repo", r.Form.Get("repo"))
|
||||
job.Setenv("tag", r.Form.Get("tag"))
|
||||
job.Setenv("author", r.Form.Get("author"))
|
||||
|
@ -688,8 +700,11 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
|
|||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
job := eng.Job("start", name)
|
||||
var (
|
||||
name = vars["name"]
|
||||
job = eng.Job("start", name)
|
||||
)
|
||||
|
||||
// allow a nil body for backwards compatibility
|
||||
if r.Body != nil {
|
||||
if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
|
||||
|
@ -699,6 +714,10 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
|
|||
}
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
if err.Error() == "Container already started" {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
@ -715,6 +734,10 @@ func postContainersStop(eng *engine.Engine, version version.Version, w http.Resp
|
|||
job := eng.Job("stop", vars["name"])
|
||||
job.Setenv("t", r.Form.Get("t"))
|
||||
if err := job.Run(); err != nil {
|
||||
if err.Error() == "Container already stopped" {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
@ -855,7 +878,7 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res
|
|||
}
|
||||
var job = eng.Job("container_inspect", vars["name"])
|
||||
if version.LessThan("1.12") {
|
||||
job.SetenvBool("dirty", true)
|
||||
job.SetenvBool("raw", true)
|
||||
}
|
||||
streamJSON(job, w, false)
|
||||
return job.Run()
|
||||
|
@ -867,7 +890,7 @@ func getImagesByName(eng *engine.Engine, version version.Version, w http.Respons
|
|||
}
|
||||
var job = eng.Job("image_inspect", vars["name"])
|
||||
if version.LessThan("1.12") {
|
||||
job.SetenvBool("dirty", true)
|
||||
job.SetenvBool("raw", true)
|
||||
}
|
||||
streamJSON(job, w, false)
|
||||
return job.Run()
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
)
|
||||
|
||||
func TestGetBoolParam(t *testing.T) {
|
||||
|
@ -151,6 +153,172 @@ func TestGetContainersByName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetEvents(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("events", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
since := job.Getenv("since")
|
||||
if since != "1" {
|
||||
t.Fatalf("'since' should be 1, found %#v instead", since)
|
||||
}
|
||||
until := job.Getenv("until")
|
||||
if until != "0" {
|
||||
t.Fatalf("'until' should be 0, found %#v instead", until)
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.Set("since", since)
|
||||
v.Set("until", until)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
var stdout_json struct {
|
||||
Since int
|
||||
Until int
|
||||
}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
|
||||
t.Fatalf("%#v", err)
|
||||
}
|
||||
if stdout_json.Since != 1 {
|
||||
t.Fatalf("since != 1: %#v", stdout_json.Since)
|
||||
}
|
||||
if stdout_json.Until != 0 {
|
||||
t.Fatalf("until != 0: %#v", stdout_json.Until)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogs(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
expected := "logs"
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
follow := job.Getenv("follow")
|
||||
if follow != "1" {
|
||||
t.Fatalf("follow: %s, must be 1", follow)
|
||||
}
|
||||
stdout := job.Getenv("stdout")
|
||||
if stdout != "1" {
|
||||
t.Fatalf("stdout %s, must be 1", stdout)
|
||||
}
|
||||
stderr := job.Getenv("stderr")
|
||||
if stderr != "" {
|
||||
t.Fatalf("stderr %s, must be empty", stderr)
|
||||
}
|
||||
timestamps := job.Getenv("timestamps")
|
||||
if timestamps != "1" {
|
||||
t.Fatalf("timestamps %s, must be 1", timestamps)
|
||||
}
|
||||
job.Stdout.Write([]byte(expected))
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t)
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if !inspect {
|
||||
t.Fatal("container_inspect job was not called")
|
||||
}
|
||||
if !logs {
|
||||
t.Fatal("logs job was not called")
|
||||
}
|
||||
res := r.Body.String()
|
||||
if res != expected {
|
||||
t.Fatalf("Output %s, expected %s", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogsNoStreams(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
|
||||
if r.Code != http.StatusBadRequest {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
|
||||
}
|
||||
if inspect {
|
||||
t.Fatal("container_inspect job was called, but it shouldn't")
|
||||
}
|
||||
if logs {
|
||||
t.Fatal("logs job was called, but it shouldn't")
|
||||
}
|
||||
res := strings.TrimSpace(r.Body.String())
|
||||
expected := "Bad parameters: you must choose at least one stream"
|
||||
if !strings.Contains(res, expected) {
|
||||
t.Fatalf("Output %s, expected %s in it", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesHistory(t *testing.T) {
|
||||
eng := engine.New()
|
||||
imageName := "docker-test-image"
|
||||
var called bool
|
||||
eng.Register("history", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != imageName {
|
||||
t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
|
||||
}
|
||||
v := &engine.Env{}
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
}
|
||||
|
||||
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
|
||||
r := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, target, body)
|
||||
|
|
|
@ -27,6 +27,7 @@ type (
|
|||
Compression int
|
||||
TarOptions struct {
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
}
|
||||
|
@ -43,6 +44,16 @@ const (
|
|||
Xz
|
||||
)
|
||||
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := DetectCompression(header)
|
||||
if compression != Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Bzip2: {0x42, 0x5A, 0x68},
|
||||
|
@ -276,7 +287,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
return TarFilter(path, &TarOptions{Compression: compression})
|
||||
return TarWithOptions(path, &TarOptions{Compression: compression})
|
||||
}
|
||||
|
||||
func escapeName(name string) string {
|
||||
|
@ -295,12 +306,9 @@ func escapeName(name string) string {
|
|||
return string(escaped)
|
||||
}
|
||||
|
||||
// TarFilter creates an archive from the directory at `srcPath` with `options`, and returns it as a
|
||||
// stream of bytes.
|
||||
//
|
||||
// Files are included according to `options.Includes`, default to including all files.
|
||||
// Stream is compressed according to `options.Compression', default to Uncompressed.
|
||||
func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
compressWriter, err := CompressStream(pipeWriter, options.Compression)
|
||||
|
@ -332,6 +340,21 @@ func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
for _, exclude := range options.Excludes {
|
||||
matched, err := filepath.Match(exclude, relFilePath)
|
||||
if err != nil {
|
||||
utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
|
||||
return err
|
||||
}
|
||||
if matched {
|
||||
utils.Debugf("Skipping excluded path: %s", relFilePath)
|
||||
if f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := addTarFile(filePath, relFilePath, tw); err != nil {
|
||||
utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
||||
}
|
||||
|
@ -355,10 +378,13 @@ func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `path`.
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
// If `dest` does not exist, it is created unless there are multiple entries in `archive`.
|
||||
// In the latter case, an error is returned.
|
||||
// If `dest` is an existing file, it gets overwritten.
|
||||
// If `dest` is an existing directory, its files get merged (with overwrite for conflicting files).
|
||||
func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if archive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
|
@ -372,7 +398,22 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
|||
|
||||
tr := tar.NewReader(decompressedArchive)
|
||||
|
||||
var dirs []*tar.Header
|
||||
var (
|
||||
dirs []*tar.Header
|
||||
create bool
|
||||
multipleEntries bool
|
||||
)
|
||||
|
||||
if fi, err := os.Lstat(dest); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// destination does not exist, so it is assumed it has to be created.
|
||||
create = true
|
||||
} else if !fi.IsDir() {
|
||||
// destination exists and is not a directory, so it will be overwritten.
|
||||
create = true
|
||||
}
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
|
@ -385,6 +426,11 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Return an error if destination needs to be created and there is more than 1 entry in the tar stream.
|
||||
if create && multipleEntries {
|
||||
return fmt.Errorf("Trying to untar an archive with multiple entries to an inexistant target `%s`: did you mean `%s` instead?", dest, filepath.Dir(dest))
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
|
@ -400,7 +446,12 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
|||
}
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
var path string
|
||||
if create {
|
||||
path = dest // we are renaming hdr.Name to dest
|
||||
} else {
|
||||
path = filepath.Join(dest, hdr.Name)
|
||||
}
|
||||
|
||||
// If path exits we almost always just want to remove and replace it
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
|
@ -416,10 +467,14 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Successfully added an entry. Predicting multiple entries for next iteration (not current one).
|
||||
multipleEntries = true
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
|
@ -443,7 +498,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
|||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s)", src, dst)
|
||||
archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed})
|
||||
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -63,8 +63,8 @@ func TestCmdStreamGood(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func tarUntar(t *testing.T, origin string, compression Compression) error {
|
||||
archive, err := Tar(origin, compression)
|
||||
func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
|
||||
archive, err := TarWithOptions(origin, options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -72,37 +72,29 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
|
|||
|
||||
buf := make([]byte, 10)
|
||||
if _, err := archive.Read(buf); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
wrap := io.MultiReader(bytes.NewReader(buf), archive)
|
||||
|
||||
detectedCompression := DetectCompression(buf)
|
||||
compression := options.Compression
|
||||
if detectedCompression.Extension() != compression.Extension() {
|
||||
return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
|
||||
return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(wrap, tmp, nil); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes, err := ChangesDirs(origin, tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ChangesDirs(origin, tmp)
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
|
@ -122,9 +114,90 @@ func TestTarUntar(t *testing.T) {
|
|||
Uncompressed,
|
||||
Gzip,
|
||||
} {
|
||||
if err := tarUntar(t, origin, c); err != nil {
|
||||
changes, err := tarUntar(t, origin, &TarOptions{
|
||||
Compression: c,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarWithOptions(t *testing.T) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(origin)
|
||||
if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
opts *TarOptions
|
||||
numChanges int
|
||||
}{
|
||||
{&TarOptions{Includes: []string{"1"}}, 1},
|
||||
{&TarOptions{Excludes: []string{"2"}}, 1},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
changes, err := tarUntar(t, origin, testCase.opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
|
||||
}
|
||||
if len(changes) != testCase.numChanges {
|
||||
t.Errorf("Expected %d changes, got %d for %+v:",
|
||||
testCase.numChanges, len(changes), testCase.opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarUntarFile(t *testing.T) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin-file")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(origin)
|
||||
|
||||
if err := os.MkdirAll(path.Join(origin, "before"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(origin, "after"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "before", "file"), []byte("hello world"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "after", "file2"), []byte("please overwrite me"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tar, err := TarWithOptions(path.Join(origin, "before"), &TarOptions{Compression: Uncompressed, Includes: []string{"file"}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := Untar(tar, path.Join(origin, "after", "file2"), nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
catCmd := exec.Command("cat", path.Join(origin, "after", "file2"))
|
||||
out, err := CmdStream(catCmd, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err != nil {
|
||||
t.Error(err)
|
||||
} else if string(output) != "hello world" {
|
||||
t.Fatalf("Expected 'hello world', got '%s'", output)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -458,10 +458,21 @@ _docker_rm()
|
|||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
*)
|
||||
local force=
|
||||
for arg in "${COMP_WORDS[@]}"; do
|
||||
case "$arg" in
|
||||
-f|--force)
|
||||
__docker_containers_all
|
||||
return
|
||||
;;
|
||||
esac
|
||||
done
|
||||
__docker_containers_stopped
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
|
|
@ -79,13 +79,13 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d '
|
|||
|
||||
# commit
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith <hannibal@a-team.com>"'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>"'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
|
||||
|
||||
# cp
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from the containers filesystem to the host path'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from a container's filesystem to the host path'
|
||||
|
||||
# diff
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#compdef docker
|
||||
#
|
||||
# zsh completion for docker (http://docker.io)
|
||||
# zsh completion for docker (http://docker.com)
|
||||
#
|
||||
# version: 0.2.2
|
||||
# author: Felix Riedel
|
||||
|
|
|
@ -11,6 +11,9 @@ DOCKER_OPTS=${DOCKER_OPTS:-}
|
|||
start() {
|
||||
checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
|
||||
|
||||
ulimit -n 1048576
|
||||
ulimit -u 1048576
|
||||
|
||||
ebegin "Starting docker daemon"
|
||||
start-stop-daemon --start --background \
|
||||
--exec "$DOCKER_BINARY" \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
Documentation=http://docs.docker.com
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
Documentation=http://docs.docker.com
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
|
|
|
@ -22,7 +22,10 @@ BASE=$(basename $0)
|
|||
|
||||
# modify these in /etc/default/$BASE (/etc/default/docker)
|
||||
DOCKER=/usr/bin/$BASE
|
||||
# This is the pid file managed by docker itself
|
||||
DOCKER_PIDFILE=/var/run/$BASE.pid
|
||||
# This is the pid file created/managed by start-stop-daemon
|
||||
DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
|
||||
DOCKER_LOGFILE=/var/log/$BASE.log
|
||||
DOCKER_OPTS=
|
||||
DOCKER_DESC="Docker"
|
||||
|
@ -85,11 +88,15 @@ case "$1" in
|
|||
touch "$DOCKER_LOGFILE"
|
||||
chgrp docker "$DOCKER_LOGFILE"
|
||||
|
||||
ulimit -n 1048576
|
||||
ulimit -u 1048576
|
||||
|
||||
log_begin_msg "Starting $DOCKER_DESC: $BASE"
|
||||
start-stop-daemon --start --background \
|
||||
--no-close \
|
||||
--exec "$DOCKER" \
|
||||
--pidfile "$DOCKER_PIDFILE" \
|
||||
--pidfile "$DOCKER_SSD_PIDFILE" \
|
||||
--make-pidfile \
|
||||
-- \
|
||||
-d -p "$DOCKER_PIDFILE" \
|
||||
$DOCKER_OPTS \
|
||||
|
@ -100,13 +107,13 @@ case "$1" in
|
|||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
|
||||
start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE"
|
||||
start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE"
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart)
|
||||
fail_unless_root
|
||||
docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null`
|
||||
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
|
||||
[ -n "$docker_pid" ] \
|
||||
&& ps -p $docker_pid > /dev/null 2>&1 \
|
||||
&& $0 stop
|
||||
|
@ -119,7 +126,7 @@ case "$1" in
|
|||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker
|
||||
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" docker
|
||||
;;
|
||||
|
||||
*)
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
#
|
||||
# /etc/rc.d/init.d/docker
|
||||
#
|
||||
# Daemon for docker.io
|
||||
# Daemon for docker.com
|
||||
#
|
||||
# chkconfig: 2345 95 95
|
||||
# description: Daemon for docker.io
|
||||
# description: Daemon for docker.com
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: docker
|
||||
|
@ -16,7 +16,7 @@
|
|||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: start and stop docker
|
||||
# Description: Daemon for docker.io
|
||||
# Description: Daemon for docker.com
|
||||
### END INIT INFO
|
||||
|
||||
# Source function library.
|
||||
|
|
2
contrib/man/.gitignore
vendored
2
contrib/man/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
# these are generated by the md/md2man-all.sh script
|
||||
man*
|
|
@ -1,21 +0,0 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
# NAME
|
||||
docker-kill - Kill a running container (send SIGKILL, or specified signal)
|
||||
|
||||
# SYNOPSIS
|
||||
**docker kill** **--signal**[=*"KILL"*] CONTAINER [CONTAINER...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The main process inside each container specified will be sent SIGKILL,
|
||||
or any signal specified with option --signal.
|
||||
|
||||
# OPTIONS
|
||||
**-s**, **--signal**=*"KILL"*
|
||||
Signal to send to the container
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
|
@ -1,15 +0,0 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
# NAME
|
||||
docker-port - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
|
||||
|
||||
# SYNOPSIS
|
||||
**docker port** CONTAINER PRIVATE_PORT
|
||||
|
||||
# DESCRIPTION
|
||||
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
|
@ -1,21 +0,0 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
# NAME
|
||||
docker-restart - Restart a running container
|
||||
|
||||
# SYNOPSIS
|
||||
**docker restart** [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...]
|
||||
|
||||
# DESCRIPTION
|
||||
Restart each container listed.
|
||||
|
||||
# OPTIONS
|
||||
**-t**, **--time**=NUM
|
||||
Number of seconds to try to stop for before killing the container. Once
|
||||
killed it will then be restarted. Default=10
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
# NAME
|
||||
docker-start - Restart a stopped container
|
||||
|
||||
# SYNOPSIS
|
||||
**docker start** [**a**|**--attach**[=*false*]] [**-i**|**--interactive**
|
||||
[=*true*] CONTAINER [CONTAINER...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
Start a stopped container.
|
||||
|
||||
# OPTION
|
||||
**-a**, **--attach**=*true*|*false*
|
||||
When true attach to container's stdout/stderr and forward all signals to
|
||||
the process
|
||||
|
||||
**-i**, **--interactive**=*true*|*false*
|
||||
When true attach to container's stdin
|
||||
|
||||
# NOTES
|
||||
If run on a started container, start takes no action and succeeds
|
||||
unconditionally.
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
|
@ -1,22 +0,0 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
# NAME
|
||||
docker-stop - Stop a running container
|
||||
grace period)
|
||||
|
||||
# SYNOPSIS
|
||||
**docker stop** [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...]
|
||||
|
||||
# DESCRIPTION
|
||||
Stop a running container (Send SIGTERM, and then SIGKILL after
|
||||
grace period)
|
||||
|
||||
# OPTIONS
|
||||
**-t**, **--time**=NUM
|
||||
Wait NUM number of seconds for the container to stop before killing it.
|
||||
The default is 10 seconds.
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
|
@ -1,56 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-attach.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-attach \- Attach to a running container
|
||||
.SH SYNOPSIS
|
||||
.B docker attach
|
||||
\fB--no-stdin\fR[=\fIfalse\fR]
|
||||
\fB--sig-proxy\fR[=\fItrue\fR]
|
||||
container
|
||||
.SH DESCRIPTION
|
||||
If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name.
|
||||
.sp
|
||||
You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B --no-stdin=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, do not attach to stdin. The default is \fIfalse\fR.
|
||||
.TP
|
||||
.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR.
|
||||
.sp
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.PP
|
||||
.B Attaching to a container
|
||||
.TP
|
||||
In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command:
|
||||
.sp
|
||||
.nf
|
||||
.RS
|
||||
# ID=$(sudo docker run -d fedora /usr/bin/top -b)
|
||||
# sudo docker attach $ID
|
||||
top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
|
||||
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
|
||||
Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
|
||||
Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
|
||||
Swap: 786428k total, 0k used, 786428k free, 221740k cached
|
||||
|
||||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
|
||||
1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
|
||||
|
||||
top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
|
||||
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
|
||||
Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
|
||||
Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
|
||||
Swap: 786428k total, 0k used, 786428k free, 221776k cached
|
||||
|
||||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
|
||||
1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
|
||||
.RE
|
||||
.fi
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,65 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-build.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-build \- Build an image from a Dockerfile source at PATH
|
||||
.SH SYNOPSIS
|
||||
.B docker build
|
||||
[\fB--no-cache\fR[=\fIfalse\fR]
|
||||
[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
|
||||
[\fB--rm\fR[=\fitrue\fR]]
|
||||
[\fB-t\fR|\fB--tag\fR=\fItag\fR]
|
||||
PATH | URL | -
|
||||
.SH DESCRIPTION
|
||||
This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile.
|
||||
Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data.
|
||||
If the absolute path is provided instead of ‘.’, only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon.
|
||||
.sp
|
||||
When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, suppress verbose build output. Default is \fIfalse\fR.
|
||||
.TP
|
||||
.B --rm=\fItrue\fr|\fIfalse\fR:
|
||||
When true, remove intermediate containers that are created during the build process. The default is true.
|
||||
.TP
|
||||
.B -t, --tag=\fItag\fR:
|
||||
Tag to be applied to the resulting image on successful completion of the build.
|
||||
.TP
|
||||
.B --no-cache=\fItrue\fR|\fIfalse\fR
|
||||
When set to true, do not use a cache when building the image. The default is \fIfalse\fR.
|
||||
.sp
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.sp
|
||||
.B Building an image from current directory
|
||||
.TP
|
||||
USing a Dockerfile, Docker images are built using the build command:
|
||||
.sp
|
||||
.RS
|
||||
docker build .
|
||||
.RE
|
||||
.sp
|
||||
If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false.
|
||||
.sp
|
||||
.RS
|
||||
docker build --rm=false .
|
||||
.sp
|
||||
.RE
|
||||
.sp
|
||||
A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server.
|
||||
.sp
|
||||
It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target.
|
||||
.sp
|
||||
.B Building an image container using a URL
|
||||
.TP
|
||||
This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the ‘git://’ schema.
|
||||
.sp
|
||||
.RS
|
||||
docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,84 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-images.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "April 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-images \- List the images in the local repository
|
||||
.SH SYNOPSIS
|
||||
.B docker images
|
||||
[\fB-a\fR|\fB--all\fR=\fIfalse\fR]
|
||||
[\fB--no-trunc\fR[=\fIfalse\fR]
|
||||
[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
|
||||
[\fB-t\fR|\fB--tree\fR=\fIfalse\fR]
|
||||
[\fB-v\fR|\fB--viz\fR=\fIfalse\fR]
|
||||
[NAME]
|
||||
.SH DESCRIPTION
|
||||
This command lists the images stored in the local Docker repository.
|
||||
.sp
|
||||
By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size.
|
||||
.sp
|
||||
The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -a, --all=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, also include all intermediate images in the list. The default is false.
|
||||
.TP
|
||||
.B --no-trunc=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, list the full image ID and not the truncated ID. The default is false.
|
||||
.TP
|
||||
.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, list the complete image ID as part of the output. The default is false.
|
||||
.TP
|
||||
.B -t, --tree=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false.
|
||||
.TP
|
||||
.B -v, --viz=\fItrue\fR|\fIfalse\fR
|
||||
When set to true, list the graph in graphviz format. The default is \fIfalse\fR.
|
||||
.sp
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.B Listing the images
|
||||
.TP
|
||||
To list the images in a local repository (not the registry) run:
|
||||
.sp
|
||||
.RS
|
||||
docker images
|
||||
.RE
|
||||
.sp
|
||||
The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE.
|
||||
.sp
|
||||
To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR:
|
||||
.sp
|
||||
.RS
|
||||
docker images -a
|
||||
.RE
|
||||
.sp
|
||||
.B List images dependency tree hierarchy
|
||||
.TP
|
||||
To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option.
|
||||
.sp
|
||||
.RS
|
||||
docker images -t
|
||||
.RE
|
||||
.sp
|
||||
This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch.
|
||||
.sp
|
||||
.B List images in GraphViz format
|
||||
.TP
|
||||
To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use:
|
||||
.sp
|
||||
.RS
|
||||
docker images --viz | dot -Tpng -o docker.png
|
||||
.sp
|
||||
.RE
|
||||
.sp
|
||||
.B Listing only the shortened image IDs
|
||||
.TP
|
||||
Listing just the shortened image IDs. This can be useful for some automated tools.
|
||||
.sp
|
||||
.RS
|
||||
docker images -q
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,39 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-info.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-info \- Display system wide information
|
||||
.SH SYNOPSIS
|
||||
.B docker info
|
||||
.SH DESCRIPTION
|
||||
This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version.
|
||||
.sp
|
||||
The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.
|
||||
.SH "OPTIONS"
|
||||
There are no available options.
|
||||
.sp
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.B Display Docker system information
|
||||
.TP
|
||||
Here is a sample output:
|
||||
.sp
|
||||
.RS
|
||||
# docker info
|
||||
Containers: 18
|
||||
Images: 95
|
||||
Storage Driver: devicemapper
|
||||
Pool Name: docker-8:1-170408448-pool
|
||||
Data file: /var/lib/docker/devicemapper/devicemapper/data
|
||||
Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
|
||||
Data Space Used: 9946.3 Mb
|
||||
Data Space Total: 102400.0 Mb
|
||||
Metadata Space Used: 9.9 Mb
|
||||
Metadata Space Total: 2048.0 Mb
|
||||
Execution Driver: native-0.1
|
||||
Kernel Version: 3.10.0-116.el7.x86_64
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,237 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-inspect.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-inspect \- Return low-level information on a container/image
|
||||
.SH SYNOPSIS
|
||||
.B docker inspect
|
||||
[\fB-f\fR|\fB--format\fR=""
|
||||
CONTAINER|IMAGE [CONTAINER|IMAGE...]
|
||||
.SH DESCRIPTION
|
||||
This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -f, --format="":
|
||||
The text/template package of Go describes all the details of the format. See examples section
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.PP
|
||||
.B Getting information on a container
|
||||
.TP
|
||||
To get information on a container use it's ID or instance name
|
||||
.sp
|
||||
.fi
|
||||
.RS
|
||||
#docker inspect 1eb5fabf5a03
|
||||
|
||||
[{
|
||||
"ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
|
||||
"Created": "2014-04-04T21:33:52.02361335Z",
|
||||
"Path": "/usr/sbin/nginx",
|
||||
"Args": [],
|
||||
"Config": {
|
||||
"Hostname": "1eb5fabf5a03",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"CpuShares": 0,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": {
|
||||
"80/tcp": {}
|
||||
},
|
||||
"Tty": true,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/usr/sbin/nginx"
|
||||
],
|
||||
"Dns": null,
|
||||
"DnsSearch": null,
|
||||
"Image": "summit/nginx",
|
||||
"Volumes": null,
|
||||
"VolumesFrom": "",
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"NetworkDisabled": false,
|
||||
"OnBuild": null,
|
||||
"Context": {
|
||||
"mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
|
||||
"process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
|
||||
}
|
||||
},
|
||||
"State": {
|
||||
"Running": true,
|
||||
"Pid": 858,
|
||||
"ExitCode": 0,
|
||||
"StartedAt": "2014-04-04T21:33:54.16259207Z",
|
||||
"FinishedAt": "0001-01-01T00:00:00Z",
|
||||
"Ghost": false
|
||||
},
|
||||
"Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
|
||||
"NetworkSettings": {
|
||||
"IPAddress": "172.17.0.2",
|
||||
"IPPrefixLen": 16,
|
||||
"Gateway": "172.17.42.1",
|
||||
"Bridge": "docker0",
|
||||
"PortMapping": null,
|
||||
"Ports": {
|
||||
"80/tcp": [
|
||||
{
|
||||
"HostIp": "0.0.0.0",
|
||||
"HostPort": "80"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ResolvConfPath": "/etc/resolv.conf",
|
||||
"HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
|
||||
"HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
|
||||
"Name": "/ecstatic_ptolemy",
|
||||
"Driver": "devicemapper",
|
||||
"ExecDriver": "native-0.1",
|
||||
"Volumes": {},
|
||||
"VolumesRW": {},
|
||||
"HostConfig": {
|
||||
"Binds": null,
|
||||
"ContainerIDFile": "",
|
||||
"LxcConf": [],
|
||||
"Privileged": false,
|
||||
"PortBindings": {
|
||||
"80/tcp": [
|
||||
{
|
||||
"HostIp": "0.0.0.0",
|
||||
"HostPort": "80"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Links": null,
|
||||
"PublishAllPorts": false,
|
||||
"DriverOptions": {
|
||||
"lxc": null
|
||||
},
|
||||
"CliAddress": ""
|
||||
}
|
||||
.RE
|
||||
.nf
|
||||
.sp
|
||||
.B Getting the IP address of a container instance
|
||||
.TP
|
||||
To get the IP address of a container use:
|
||||
.sp
|
||||
.fi
|
||||
.RS
|
||||
# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
|
||||
|
||||
172.17.0.2
|
||||
.RE
|
||||
.nf
|
||||
.sp
|
||||
.B Listing all port bindings
|
||||
.TP
|
||||
One can loop over arrays and maps in the results to produce simple text output:
|
||||
.sp
|
||||
.fi
|
||||
.RS
|
||||
# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
|
||||
|
||||
80/tcp -> 80
|
||||
.RE
|
||||
.nf
|
||||
.sp
|
||||
.B Getting information on an image
|
||||
.TP
|
||||
Use an image's ID or name (e.g. repository/name[:tag]) to get information on it.
|
||||
.sp
|
||||
.fi
|
||||
.RS
|
||||
docker inspect 58394af37342
|
||||
[{
|
||||
"id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
|
||||
"parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
|
||||
"created": "2014-02-03T16:10:40.500814677Z",
|
||||
"container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
|
||||
"container_config": {
|
||||
"Hostname": "88807319f25e",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"CpuShares": 0,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": null,
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"#(nop) ADD fedora-20-medium.tar.xz in /"
|
||||
],
|
||||
"Dns": null,
|
||||
"DnsSearch": null,
|
||||
"Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
|
||||
"Volumes": null,
|
||||
"VolumesFrom": "",
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"NetworkDisabled": false,
|
||||
"OnBuild": null,
|
||||
"Context": null
|
||||
},
|
||||
"docker_version": "0.6.3",
|
||||
"author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh",
|
||||
"config": {
|
||||
"Hostname": "88807319f25e",
|
||||
"Domainname": "",
|
||||
"User": "",
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"CpuShares": 0,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": null,
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": null,
|
||||
"Dns": null,
|
||||
"DnsSearch": null,
|
||||
"Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
|
||||
"Volumes": null,
|
||||
"VolumesFrom": "",
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"NetworkDisabled": false,
|
||||
"OnBuild": null,
|
||||
"Context": null
|
||||
},
|
||||
"architecture": "x86_64",
|
||||
"Size": 385520098
|
||||
}]
|
||||
.RE
|
||||
.nf
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,45 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-rm.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-rm \- Remove one or more containers.
|
||||
.SH SYNOPSIS
|
||||
.B docker rm
|
||||
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
|
||||
[\fB-l\fR|\fB--link\fR[=\fIfalse\fR]
|
||||
[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR]
|
||||
CONTAINER [CONTAINER...]
|
||||
.SH DESCRIPTION
|
||||
This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -f, --force=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, force the removal of the container. The default is \fIfalse\fR.
|
||||
.TP
|
||||
.B -l, --link=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR.
|
||||
.TP
|
||||
.B -v, --volumes=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, remove the volumes associated to the container. The default is \fIfalse\fR.
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.PP
|
||||
.B Removing a container using its ID
|
||||
.TP
|
||||
To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR:
|
||||
.sp
|
||||
.RS
|
||||
docker rm abebf7571666
|
||||
.RE
|
||||
.sp
|
||||
.B Removing a container using the container name:
|
||||
.TP
|
||||
The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
|
||||
.sp
|
||||
.RS
|
||||
docker rm hopeful_morse
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,50 +0,0 @@
|
|||
DOCKER "1" "APRIL 2014" "0.1" "Docker"
|
||||
=======================================
|
||||
|
||||
NAME
|
||||
----
|
||||
|
||||
docker-rm - Remove one or more containers.
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
|
||||
`docker rm` [`-f`|`--force`[=*false*] [`-l`|`--link`[=*false*] [`-v`|`--volumes`[=*false*]
|
||||
CONTAINER [CONTAINER...]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
||||
`docker rm` will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the `docker ps -a` command.
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
|
||||
`-f`, `--force`=*true*|*false*:
|
||||
When set to true, force the removal of the container. The default is *false*.
|
||||
|
||||
`-l`, `--link`=*true*|*false*:
|
||||
When set to true, remove the specified link and not the underlying container. The default is *false*.
|
||||
|
||||
`-v`, `--volumes`=*true*|*false*:
|
||||
When set to true, remove the volumes associated to the container. The default is *false*.
|
||||
|
||||
EXAMPLES
|
||||
--------
|
||||
|
||||
##Removing a container using its ID##
|
||||
|
||||
To remove a container using its ID, find either from a `docker ps -a` command, or use the ID returned from the `docker run` command, or retrieve it from a file used to store it using the `docker run --cidfile`:
|
||||
|
||||
docker rm abebf7571666
|
||||
|
||||
##Removing a container using the container name##
|
||||
|
||||
The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
|
||||
|
||||
docker rm hopeful_morse
|
||||
|
||||
HISTORY
|
||||
-------
|
||||
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,29 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-run.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-rmi \- Remove one or more images.
|
||||
.SH SYNOPSIS
|
||||
.B docker rmi
|
||||
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
|
||||
IMAGE [IMAGE...]
|
||||
.SH DESCRIPTION
|
||||
This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -f, --force=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, force the removal of the image. The default is \fIfalse\fR.
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.PP
|
||||
.B Removing an image
|
||||
.TP
|
||||
Here is an example of removing and image:
|
||||
.sp
|
||||
.RS
|
||||
docker rmi fedora/httpd
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,277 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-run.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-run \- Run a process in an isolated container
|
||||
.SH SYNOPSIS
|
||||
.B docker run
|
||||
[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR]
|
||||
[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR]
|
||||
[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR]
|
||||
[\fB--link\fR=\fIname\fR:\fIalias\fR]
|
||||
[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR]
|
||||
[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]]
|
||||
[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR]
|
||||
[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR]
|
||||
[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR]
|
||||
[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR]
|
||||
[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]]
|
||||
[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR]
|
||||
[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]]
|
||||
IMAGE [COMMAND] [ARG...]
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command.
|
||||
|
||||
If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image.
|
||||
|
||||
|
||||
.SH "OPTIONS"
|
||||
|
||||
.TP
|
||||
.B -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR:
|
||||
Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr.
|
||||
|
||||
.TP
|
||||
.B -c, --cpu-shares=0:
|
||||
CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR.
|
||||
|
||||
.TP
|
||||
.B -m, --memory=\fImemory-limit\fR:
|
||||
Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
|
||||
|
||||
.TP
|
||||
.B --cidfile=\fIfile\fR:
|
||||
Write the container ID to the file specified.
|
||||
|
||||
.TP
|
||||
.B -d, --detach=\fItrue\fR|\fIfalse\fR:
|
||||
Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option.
|
||||
|
||||
.TP
|
||||
.B --dns=\fIIP-address\fR:
|
||||
Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run.
|
||||
|
||||
.TP
|
||||
.B -e, --env=\fIenvironment\fR:
|
||||
Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container.
|
||||
|
||||
.TP
|
||||
.B --entrypoint=\ficommand\fR:
|
||||
This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT.
|
||||
|
||||
.TP
|
||||
.B --expose=\fIport\fR:
|
||||
Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR.
|
||||
|
||||
.TP
|
||||
.B -P, --publish-all=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR.
|
||||
|
||||
.TP
|
||||
.B -p, --publish=[]:
|
||||
Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)
|
||||
|
||||
.TP
|
||||
.B -h , --hostname=\fIhostname\fR:
|
||||
Sets the container host name that is available inside the container.
|
||||
|
||||
.TP
|
||||
.B -i , --interactive=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, keep stdin open even if not attached. The default is false.
|
||||
|
||||
.TP
|
||||
.B --link=\fIname\fR:\fIalias\fR:
|
||||
Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use.
|
||||
|
||||
.TP
|
||||
.B -n, --networking=\fItrue\fR|\fIfalse\fR:
|
||||
By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT.
|
||||
|
||||
Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR.
|
||||
|
||||
.TP
|
||||
.B --name=\fIname\fR:
|
||||
Assign a name to the container. The operator can identify a container in three ways:
|
||||
.sp
|
||||
.nf
|
||||
UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
|
||||
UUID short identifier (“f78375b1c487”)
|
||||
Name (“jonah”)
|
||||
.fi
|
||||
.sp
|
||||
The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers.
|
||||
|
||||
.TP
|
||||
.B --privileged=\fItrue\fR|\fIfalse\fR:
|
||||
Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices.
|
||||
|
||||
When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host.
|
||||
|
||||
.TP
|
||||
.B --rm=\fItrue\fR|\fIfalse\fR:
|
||||
If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR.
|
||||
|
||||
.TP
|
||||
.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, proxify all received signals to the process (even in non-tty mode). The default is true.
|
||||
|
||||
.TP
|
||||
.B -t, --tty=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false.
|
||||
|
||||
.TP
|
||||
.B -u, --user=\fIusername\fR,\fRuid\fR:
|
||||
Set a username or UID for the container.
|
||||
|
||||
.TP
|
||||
.B -v, --volume=\fIvolume\fR:
|
||||
Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples.
|
||||
|
||||
.TP
|
||||
.B --volumes-from=\fIcontainer-id\fR:
|
||||
Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running.
|
||||
|
||||
.TP
|
||||
.B -w, --workdir=\fIdirectory\fR:
|
||||
Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option.
|
||||
|
||||
.TP
|
||||
.B IMAGE:
|
||||
The image name or ID.
|
||||
|
||||
.TP
|
||||
.B COMMAND:
|
||||
The command or program to run inside the image.
|
||||
|
||||
.TP
|
||||
.B ARG:
|
||||
The arguments for the command to be run in the container.
|
||||
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.sp
|
||||
.B Exposing log messages from the container to the host's log
|
||||
.TP
|
||||
If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows.
|
||||
.sp
|
||||
.RS
|
||||
docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
|
||||
.RE
|
||||
.sp
|
||||
From inside the container you can test this by sending a message to the log.
|
||||
.sp
|
||||
.RS
|
||||
logger "Hello from my container"
|
||||
.sp
|
||||
.RE
|
||||
Then exit and check the journal.
|
||||
.RS
|
||||
.sp
|
||||
exit
|
||||
.sp
|
||||
journalctl -b | grep hello
|
||||
.RE
|
||||
.sp
|
||||
This should list the message sent to logger.
|
||||
.sp
|
||||
.B Attaching to one or more from STDIN, STDOUT, STDERR
|
||||
.TP
|
||||
If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in:
|
||||
.sp
|
||||
.RS
|
||||
docker run -a stdin -a stdout -i -t fedora /bin/bash
|
||||
.RE
|
||||
.sp
|
||||
.B Linking Containers
|
||||
.TP
|
||||
The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows:
|
||||
.sp
|
||||
.RS
|
||||
docker run --name=link-test -d -i -t fedora/httpd
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=<name>:<alias>\fR
|
||||
.sp
|
||||
.RS
|
||||
docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR)
|
||||
.sp
|
||||
.nf
|
||||
.RS
|
||||
# env
|
||||
HOSTNAME=668231cb0978
|
||||
TERM=xterm
|
||||
LT_PORT_80_TCP=tcp://172.17.0.3:80
|
||||
LT_PORT_80_TCP_PORT=80
|
||||
LT_PORT_80_TCP_PROTO=tcp
|
||||
LT_PORT=tcp://172.17.0.3:80
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
PWD=/
|
||||
LT_NAME=/linker/lt
|
||||
SHLVL=1
|
||||
HOME=/
|
||||
LT_PORT_80_TCP_ADDR=172.17.0.3
|
||||
_=/usr/bin/env
|
||||
.RE
|
||||
.fi
|
||||
.sp
|
||||
.TP
|
||||
When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access.
|
||||
.TP
|
||||
.sp
|
||||
.B Mapping Ports for External Usage
|
||||
.TP
|
||||
The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following:
|
||||
.sp
|
||||
.RS
|
||||
docker run -p 8080:80 -d -i -t fedora/httpd
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
.B Creating and Mounting a Data Volume Container
|
||||
.TP
|
||||
Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image:
|
||||
.sp
|
||||
.RS
|
||||
docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
|
||||
.sp
|
||||
docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
|
||||
.sp
|
||||
.RS
|
||||
docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
|
||||
.RE
|
||||
.TP
|
||||
.sp
|
||||
.B Mounting External Volumes
|
||||
.TP
|
||||
To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon:
|
||||
.sp
|
||||
.RS
|
||||
docker run -v /var/db:/data1 -i -t fedora bash
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog.
|
||||
.sp
|
||||
.TP
|
||||
To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory:
|
||||
.sp
|
||||
.RS
|
||||
chcon -Rt svirt_sandbox_file_t /var/db
|
||||
.RE
|
||||
.sp
|
||||
.TP
|
||||
Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db.
|
||||
.sp
|
||||
.SH HISTORY
|
||||
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,49 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker-tag.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker-tag \- Tag an image in the repository
|
||||
.SH SYNOPSIS
|
||||
.B docker tag
|
||||
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
|
||||
\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG]
|
||||
.SH DESCRIPTION
|
||||
This will tag an image in the repository.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
.B -f, --force=\fItrue\fR|\fIfalse\fR:
|
||||
When set to true, force the tag name. The default is \fIfalse\fR.
|
||||
.TP
|
||||
.B REGISTRYHOST:
|
||||
The hostname of the registry if required. This may also include the port separated by a ':'
|
||||
.TP
|
||||
.B USERNAME:
|
||||
The username or other qualifying identifier for the image.
|
||||
.TP
|
||||
.B NAME:
|
||||
The image name.
|
||||
.TP
|
||||
.B TAG:
|
||||
The tag you are assigning to the image.
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
.PP
|
||||
.B Tagging an image
|
||||
.TP
|
||||
Here is an example where an image is tagged with the tag 'Version-1.0' :
|
||||
.sp
|
||||
.RS
|
||||
docker tag 0e5574283393 fedora/httpd:Version-1.0
|
||||
.RE
|
||||
.sp
|
||||
.B Tagging an image for an internal repository
|
||||
.TP
|
||||
To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed).
|
||||
.sp
|
||||
.RS
|
||||
docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
|
||||
.RE
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -1,172 +0,0 @@
|
|||
.\" Process this file with
|
||||
.\" nroff -man -Tascii docker.1
|
||||
.\"
|
||||
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
|
||||
.SH NAME
|
||||
docker \- Docker image and container command line interface
|
||||
.SH SYNOPSIS
|
||||
.B docker [OPTIONS] [COMMAND] [arg...]
|
||||
.SH DESCRIPTION
|
||||
\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI.
|
||||
.sp
|
||||
To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option. The other options listed below are for the daemon only.
|
||||
.sp
|
||||
The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements.
|
||||
.sp
|
||||
To see the man page for a command run \fBman docker <command>\fR.
|
||||
.SH "OPTIONS"
|
||||
.B \-D=false:
|
||||
Enable debug mode
|
||||
.TP
|
||||
.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use.
|
||||
When host=[0.0.0.0], port=[2375] or path
|
||||
=[/var/run/docker.sock] is omitted, default values are used.
|
||||
.TP
|
||||
.B \-\-api-enable-cors=false
|
||||
Enable CORS headers in the remote API
|
||||
.TP
|
||||
.B \-b=""
|
||||
Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
|
||||
.TP
|
||||
.B \-\-bip=""
|
||||
Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
|
||||
.TP
|
||||
.B \-d=false
|
||||
Enable daemon mode
|
||||
.TP
|
||||
.B \-\-dns=""
|
||||
Force Docker to use specific DNS servers
|
||||
.TP
|
||||
.B \-g="/var/lib/docker"
|
||||
Path to use as the root of the Docker runtime
|
||||
.TP
|
||||
.B \-\-icc=true
|
||||
Enable inter\-container communication
|
||||
.TP
|
||||
.B \-\-ip="0.0.0.0"
|
||||
Default IP address to use when binding container ports
|
||||
.TP
|
||||
.B \-\-iptables=true
|
||||
Disable Docker's addition of iptables rules
|
||||
.TP
|
||||
.B \-\-mtu=1500
|
||||
Set the containers network mtu
|
||||
.TP
|
||||
.B \-p="/var/run/docker.pid"
|
||||
Path to use for daemon PID file
|
||||
.TP
|
||||
.B \-r=true
|
||||
Restart previously running containers
|
||||
.TP
|
||||
.B \-s=""
|
||||
Force the Docker runtime to use a specific storage driver
|
||||
.TP
|
||||
.B \-v=false
|
||||
Print version information and quit
|
||||
.SH "COMMANDS"
|
||||
.TP
|
||||
.B attach
|
||||
Attach to a running container
|
||||
.TP
|
||||
.B build
|
||||
Build an image from a Dockerfile
|
||||
.TP
|
||||
.B commit
|
||||
Create a new image from a container's changes
|
||||
.TP
|
||||
.B cp
|
||||
Copy files/folders from the containers filesystem to the host at path
|
||||
.TP
|
||||
.B diff
|
||||
Inspect changes on a container's filesystem
|
||||
|
||||
.TP
|
||||
.B events
|
||||
Get real time events from the server
|
||||
.TP
|
||||
.B export
|
||||
Stream the contents of a container as a tar archive
|
||||
.TP
|
||||
.B history
|
||||
Show the history of an image
|
||||
.TP
|
||||
.B images
|
||||
List images
|
||||
.TP
|
||||
.B import
|
||||
Create a new filesystem image from the contents of a tarball
|
||||
.TP
|
||||
.B info
|
||||
Display system-wide information
|
||||
.TP
|
||||
.B insert
|
||||
Insert a file in an image
|
||||
.TP
|
||||
.B inspect
|
||||
Return low-level information on a container
|
||||
.TP
|
||||
.B kill
|
||||
Kill a running container (which includes the wrapper process and everything inside it)
|
||||
.TP
|
||||
.B load
|
||||
Load an image from a tar archive
|
||||
.TP
|
||||
.B login
|
||||
Register or Login to a Docker registry server
|
||||
.TP
|
||||
.B logs
|
||||
Fetch the logs of a container
|
||||
.TP
|
||||
.B port
|
||||
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
|
||||
.TP
|
||||
.B ps
|
||||
List containers
|
||||
.TP
|
||||
.B pull
|
||||
Pull an image or a repository from a Docker registry server
|
||||
.TP
|
||||
.B push
|
||||
Push an image or a repository to a Docker registry server
|
||||
.TP
|
||||
.B restart
|
||||
Restart a running container
|
||||
.TP
|
||||
.B rm
|
||||
Remove one or more containers
|
||||
.TP
|
||||
.B rmi
|
||||
Remove one or more images
|
||||
.TP
|
||||
.B run
|
||||
Run a command in a new container
|
||||
.TP
|
||||
.B save
|
||||
Save an image to a tar archive
|
||||
.TP
|
||||
.B search
|
||||
Search for an image in the Docker index
|
||||
.TP
|
||||
.B start
|
||||
Start a stopped container
|
||||
.TP
|
||||
.B stop
|
||||
Stop a running container
|
||||
.TP
|
||||
.B tag
|
||||
Tag an image into a repository
|
||||
.TP
|
||||
.B top
|
||||
Lookup the running processes of a container
|
||||
.TP
|
||||
.B version
|
||||
Show the Docker version information
|
||||
.TP
|
||||
.B wait
|
||||
Block until a container stops, then print its exit code
|
||||
.SH EXAMPLES
|
||||
.sp
|
||||
For specific examples please see the man page for the specific Docker command.
|
||||
.sp
|
||||
.SH HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
|
|
@ -13,8 +13,8 @@ usage() {
|
|||
}
|
||||
|
||||
tmp() {
|
||||
TMP=$(mktemp -d /tmp/alpine-docker-XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d /tmp/alpine-docker-rootfs-XXXXXXXXXX)
|
||||
TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX)
|
||||
trap "rm -rf $TMP $ROOTFS" EXIT TERM INT
|
||||
}
|
||||
|
||||
|
|
|
@ -9,31 +9,13 @@ hash pacstrap &>/dev/null || {
|
|||
exit 1
|
||||
}
|
||||
|
||||
hash expect &>/dev/null || {
|
||||
echo "Could not find expect. Run pacman -S expect"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
|
||||
chmod 755 $ROOTFS
|
||||
|
||||
# packages to ignore for space savings
|
||||
PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
|
||||
|
||||
expect <<EOF
|
||||
set timeout 60
|
||||
set send_slow {1 1}
|
||||
spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
|
||||
expect {
|
||||
"Install anyway?" { send n\r; exp_continue }
|
||||
"(default=all)" { send \r; exp_continue }
|
||||
"Proceed with installation?" { send "\r"; exp_continue }
|
||||
"skip the above package" {send "y\r"; exp_continue }
|
||||
"checking" { exp_continue }
|
||||
"loading" { exp_continue }
|
||||
"installing" { exp_continue }
|
||||
}
|
||||
EOF
|
||||
pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
|
||||
|
||||
arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
|
||||
arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
|
||||
|
|
|
@ -14,7 +14,7 @@ BUSYBOX=$(which busybox)
|
|||
}
|
||||
|
||||
set -e
|
||||
ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM
|
||||
ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM
|
||||
mkdir $ROOTFS
|
||||
cd $ROOTFS
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@ die () {
|
|||
|
||||
ISO=${1}
|
||||
|
||||
ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX)
|
||||
CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX)
|
||||
TMP=$(mktemp -d /tmp/XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX)
|
||||
CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX)
|
||||
TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX)
|
||||
|
||||
VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/')
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ fi
|
|||
# will be filled in later, if [ -z "$skipDetection" ]
|
||||
lsbDist=''
|
||||
|
||||
target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
|
||||
target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
|
|
@ -39,7 +39,7 @@ if [ ! "$repo" ] || [ ! "$distro" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
|
||||
target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
|
|
@ -15,7 +15,7 @@ SOCAT=$(which socat)
|
|||
|
||||
shopt -s extglob
|
||||
set -ex
|
||||
ROOTFS=`mktemp -d /tmp/rootfs-busybox.XXXXXXXXXX`
|
||||
ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
|
||||
trap "rm -rf $ROOTFS" INT QUIT TERM
|
||||
cd $ROOTFS
|
||||
|
||||
|
|
|
@ -6,9 +6,11 @@ mkimg="$(basename "$0")"
|
|||
usage() {
|
||||
echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]"
|
||||
echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie"
|
||||
echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal trusty"
|
||||
echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components main,universe trusty"
|
||||
echo >&2 " $mkimg -t someuser/busybox busybox-static"
|
||||
echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5"
|
||||
echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4"
|
||||
echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
@ -48,7 +50,7 @@ fi
|
|||
|
||||
delDir=
|
||||
if [ -z "$dir" ]; then
|
||||
dir="$(mktemp -d ${TMPDIR:-/tmp}/docker-mkimage.XXXXXXXXXX)"
|
||||
dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)"
|
||||
delDir=1
|
||||
fi
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ shift
|
|||
# docs
|
||||
rm -rf usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
#rm -rf usr/share/cracklib
|
||||
rm -rf usr/share/cracklib
|
||||
# i18n
|
||||
rm -rf usr/share/i18n
|
||||
# yum cache
|
||||
|
|
|
@ -23,9 +23,14 @@ shift
|
|||
# now for some Docker-specific tweaks
|
||||
|
||||
# prevent init scripts from running during install/update
|
||||
echo >&2 "+ cat > '$rootfsDir/usr/sbin/policy-rc.d'"
|
||||
echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'"
|
||||
cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
|
||||
#!/bin/sh
|
||||
|
||||
# For most Docker users, "apt-get install" only happens during "docker build",
|
||||
# where starting services doesn't work and often fails in humorous ways. This
|
||||
# prevents those failures by stopping the services from attempting to start.
|
||||
|
||||
exit 101
|
||||
EOF
|
||||
chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
|
||||
|
@ -34,17 +39,25 @@ chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
|
|||
(
|
||||
set -x
|
||||
chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl
|
||||
ln -sf /bin/true "$rootfsDir/sbin/initctl"
|
||||
cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl"
|
||||
sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl"
|
||||
)
|
||||
|
||||
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
||||
# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
|
||||
( set -x; chroot "$rootfsDir" apt-get clean )
|
||||
|
||||
# Ubuntu 10.04 sucks... :)
|
||||
if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then
|
||||
# force dpkg not to call sync() after package extraction (speeding up installs)
|
||||
echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'"
|
||||
echo 'force-unsafe-io' > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup"
|
||||
cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF'
|
||||
# For most Docker users, package installs happen during "docker build", which
|
||||
# doesn't survive power loss and gets restarted clean afterwards anyhow, so
|
||||
# this minor tweak gives us a nice speedup (much nicer on spinning disks,
|
||||
# obviously).
|
||||
|
||||
force-unsafe-io
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
|
||||
|
@ -52,16 +65,36 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
|
|||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'"
|
||||
cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF
|
||||
# Since for most Docker users, package installs happen in "docker build" steps,
|
||||
# they essentially become individual layers due to the way Docker handles
|
||||
# layering, especially using CoW filesystems. What this means for us is that
|
||||
# the caches that APT keeps end up just wasting space in those layers, making
|
||||
# our layers unnecessarily large (especially since we'll normally never use
|
||||
# these caches again and will instead just "docker build" again and make a brand
|
||||
# new image).
|
||||
|
||||
# Ideally, these would just be invoking "apt-get clean", but in our testing,
|
||||
# that ended up being cyclic and we got stuck on APT's lock, so we get this fun
|
||||
# creation that's essentially just "apt-get clean".
|
||||
DPkg::Post-Invoke { ${aptGetClean} };
|
||||
APT::Update::Post-Invoke { ${aptGetClean} };
|
||||
|
||||
Dir::Cache::pkgcache "";
|
||||
Dir::Cache::srcpkgcache "";
|
||||
|
||||
# Note that we do realize this isn't the ideal way to do this, and are always
|
||||
# open to better suggestions (https://github.com/dotcloud/docker/issues).
|
||||
EOF
|
||||
|
||||
# remove apt-cache translations for fast "apt-get update"
|
||||
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
|
||||
echo 'Acquire::Languages "none";' > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages"
|
||||
echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
|
||||
cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF'
|
||||
# In Docker, we don't often need the "Translations" files, so we're just wasting
|
||||
# time and space by downloading them, and this inhibits that. For users that do
|
||||
# need them, it's a simple matter to delete this file and "apt-get update". :)
|
||||
|
||||
Acquire::Languages "none";
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
|
||||
|
@ -76,39 +109,53 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
|
|||
if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then
|
||||
lsbDist='Debian'
|
||||
fi
|
||||
# normalize to lowercase for easier matching
|
||||
lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')"
|
||||
case "$lsbDist" in
|
||||
debian|Debian)
|
||||
debian)
|
||||
# updates and security!
|
||||
if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then
|
||||
(
|
||||
set -x
|
||||
sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
|
||||
sed -i "
|
||||
p;
|
||||
s/ $suite / ${suite}-updates /
|
||||
" "$rootfsDir/etc/apt/sources.list"
|
||||
echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
|
||||
# LTS
|
||||
if [ "$suite" = 'squeeze' ]; then
|
||||
head -1 "$rootfsDir/etc/apt/sources.list" \
|
||||
| sed "s/ $suite / ${suite}-lts /" \
|
||||
>> "$rootfsDir/etc/apt/sources.list"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
;;
|
||||
ubuntu|Ubuntu)
|
||||
# add the universe, updates, and security repositories
|
||||
ubuntu)
|
||||
# add the updates and security repositories
|
||||
(
|
||||
set -x
|
||||
sed -i "
|
||||
s/ $suite main$/ $suite main universe/; p;
|
||||
s/ $suite main/ ${suite}-updates main/; p;
|
||||
s/ $suite-updates main/ ${suite}-security main/
|
||||
p;
|
||||
s/ $suite / ${suite}-updates /; p;
|
||||
s/ $suite-updates / ${suite}-security /
|
||||
" "$rootfsDir/etc/apt/sources.list"
|
||||
)
|
||||
;;
|
||||
tanglu|Tanglu)
|
||||
tanglu)
|
||||
# add the updates repository
|
||||
if [ "$suite" != 'devel' ]; then
|
||||
(
|
||||
set -x
|
||||
sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
|
||||
sed -i "
|
||||
p;
|
||||
s/ $suite / ${suite}-updates /
|
||||
" "$rootfsDir/etc/apt/sources.list"
|
||||
)
|
||||
fi
|
||||
;;
|
||||
steamos|SteamOS)
|
||||
# add contrib and non-free
|
||||
steamos)
|
||||
# add contrib and non-free if "main" is the only component
|
||||
(
|
||||
set -x
|
||||
sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list"
|
||||
|
@ -117,9 +164,13 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
|
|||
esac
|
||||
fi
|
||||
|
||||
# make sure we're fully up-to-date, too
|
||||
(
|
||||
set -x
|
||||
chroot "$rootfsDir" apt-get update
|
||||
chroot "$rootfsDir" apt-get dist-upgrade -y
|
||||
|
||||
# make sure we're fully up-to-date
|
||||
chroot "$rootfsDir" bash -c 'apt-get update && apt-get dist-upgrade -y'
|
||||
|
||||
# delete all the apt list files since they're big and get stale quickly
|
||||
rm -rf "$rootfsDir/var/lib/apt/lists"/*
|
||||
# this forces "apt-get update" in dependent images, which is also good
|
||||
)
|
||||
|
|
61
contrib/mkimage/mageia-urpmi
Executable file
61
contrib/mkimage/mageia-urpmi
Executable file
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Needs to be run from Mageia 4 or greater for kernel support for docker.
|
||||
#
|
||||
# Mageia 4 does not have docker available in official repos, so please
|
||||
# install and run the docker binary manually.
|
||||
#
|
||||
# Tested working versions are for Mageia 2 onwards (inc. cauldron).
|
||||
#
|
||||
set -e
|
||||
|
||||
rootfsDir="$1"
|
||||
shift
|
||||
|
||||
optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@")
|
||||
eval set -- "$optTemp"
|
||||
unset optTemp
|
||||
|
||||
installversion=
|
||||
mirror=
|
||||
while true; do
|
||||
case "$1" in
|
||||
-v|--version) installversion="$2" ; shift 2 ;;
|
||||
-m|--mirror) mirror="$2" ; shift 2 ;;
|
||||
--) shift ; break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z $installversion ]; then
|
||||
# Attempt to match host version
|
||||
if [ -r /etc/mageia-release ]; then
|
||||
installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)"
|
||||
else
|
||||
echo "Error: no version supplied and unable to detect host mageia version"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z $mirror ]; then
|
||||
# No mirror provided, default to mirrorlist
|
||||
mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list"
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
urpmi.addmedia --distrib \
|
||||
$mirror \
|
||||
--urpmi-root "$rootfsDir"
|
||||
urpmi basesystem-minimal urpmi \
|
||||
--auto \
|
||||
--no-suggests \
|
||||
--urpmi-root "$rootfsDir" \
|
||||
--root "$rootfsDir"
|
||||
)
|
||||
|
||||
"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir"
|
||||
|
||||
if [ -d "$rootfsDir/etc/sysconfig" ]; then
|
||||
# allow networking init scripts inside the container to work without extra steps
|
||||
echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network"
|
||||
fi
|
|
@ -53,7 +53,7 @@ type Container struct {
|
|||
Args []string
|
||||
|
||||
Config *runconfig.Config
|
||||
State State
|
||||
State *State
|
||||
Image string
|
||||
|
||||
NetworkSettings *NetworkSettings
|
||||
|
@ -74,8 +74,7 @@ type Container struct {
|
|||
daemon *Daemon
|
||||
MountLabel, ProcessLabel string
|
||||
|
||||
waitLock chan struct{}
|
||||
Volumes map[string]string
|
||||
Volumes map[string]string
|
||||
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
|
||||
// Easier than migrating older container configs :)
|
||||
VolumesRW map[string]bool
|
||||
|
@ -284,7 +283,6 @@ func (container *Container) Start() (err error) {
|
|||
if err := container.startLoggingToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.waitLock = make(chan struct{})
|
||||
|
||||
return container.waitForStart()
|
||||
}
|
||||
|
@ -293,7 +291,7 @@ func (container *Container) Run() error {
|
|||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Wait()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -307,7 +305,7 @@ func (container *Container) Output() (output []byte, err error) {
|
|||
return nil, err
|
||||
}
|
||||
output, err = ioutil.ReadAll(pipe)
|
||||
container.Wait()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
return output, err
|
||||
}
|
||||
|
||||
|
@ -467,6 +465,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
if err != nil {
|
||||
utils.Errorf("Error running container: %s", err)
|
||||
}
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
// Cleanup
|
||||
container.cleanup()
|
||||
|
@ -475,28 +474,17 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
if container.Config.OpenStdin {
|
||||
container.stdin, container.stdinPipe = io.Pipe()
|
||||
}
|
||||
|
||||
if container.daemon != nil && container.daemon.srv != nil {
|
||||
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
|
||||
}
|
||||
|
||||
close(container.waitLock)
|
||||
|
||||
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
// FIXME: there is a race condition here which causes this to fail during the unit tests.
|
||||
// If another goroutine was waiting for Wait() to return before removing the container's root
|
||||
// from the filesystem... At this point it may already have done so.
|
||||
// This is because State.setStopped() has already been called, and has caused Wait()
|
||||
// to return.
|
||||
// FIXME: why are we serializing running state to disk in the first place?
|
||||
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
|
||||
// FIXME: here is race condition between two RUN instructions in Dockerfile
|
||||
// because they share same runconfig and change image. Must be fixed
|
||||
// in server/buildfile.go
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Errorf("Error dumping container state to disk: %s\n", err)
|
||||
utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -532,6 +520,7 @@ func (container *Container) cleanup() {
|
|||
}
|
||||
|
||||
func (container *Container) KillSig(sig int) error {
|
||||
utils.Debugf("Sending %d to %s", sig, container.ID)
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
|
@ -577,9 +566,9 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
if _, err := container.State.WaitStop(10 * time.Second); err != nil {
|
||||
// Ensure that we don't kill ourselves
|
||||
if pid := container.State.Pid; pid != 0 {
|
||||
if pid := container.State.GetPid(); pid != 0 {
|
||||
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := syscall.Kill(pid, 9); err != nil {
|
||||
return err
|
||||
|
@ -587,7 +576,7 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
}
|
||||
|
||||
container.Wait()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -605,11 +594,11 @@ func (container *Container) Stop(seconds int) error {
|
|||
}
|
||||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
|
||||
if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
// 3. If it doesn't, then send SIGKILL
|
||||
if err := container.Kill(); err != nil {
|
||||
container.Wait()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -630,12 +619,6 @@ func (container *Container) Restart(seconds int) error {
|
|||
return container.Start()
|
||||
}
|
||||
|
||||
// Wait blocks until the container stops running, then returns its exit code.
|
||||
func (container *Container) Wait() int {
|
||||
<-container.waitLock
|
||||
return container.State.GetExitCode()
|
||||
}
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
return container.command.Terminal.Resize(h, w)
|
||||
}
|
||||
|
@ -678,21 +661,6 @@ func (container *Container) Export() (archive.Archive, error) {
|
|||
nil
|
||||
}
|
||||
|
||||
func (container *Container) WaitTimeout(timeout time.Duration) error {
|
||||
done := make(chan bool, 1)
|
||||
go func() {
|
||||
container.Wait()
|
||||
done <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf("Timed Out")
|
||||
case <-done:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container) Mount() error {
|
||||
return container.daemon.Mount(container)
|
||||
}
|
||||
|
@ -813,7 +781,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
|
|||
basePath = path.Dir(basePath)
|
||||
}
|
||||
|
||||
archive, err := archive.TarFilter(basePath, &archive.TarOptions{
|
||||
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
Includes: filter,
|
||||
})
|
||||
|
@ -1103,9 +1071,7 @@ func (container *Container) startLoggingToDisk() error {
|
|||
}
|
||||
|
||||
func (container *Container) waitForStart() error {
|
||||
callbackLock := make(chan struct{})
|
||||
callback := func(command *execdriver.Command) {
|
||||
container.State.SetRunning(command.Pid())
|
||||
if command.Tty {
|
||||
// The callback is called after the process Start()
|
||||
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
|
||||
|
@ -1117,16 +1083,23 @@ func (container *Container) waitForStart() error {
|
|||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("%s", err)
|
||||
}
|
||||
close(callbackLock)
|
||||
container.State.SetRunning(command.Pid())
|
||||
}
|
||||
|
||||
// We use a callback here instead of a goroutine and an chan for
|
||||
// syncronization purposes
|
||||
cErr := utils.Go(func() error { return container.monitor(callback) })
|
||||
|
||||
waitStart := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
container.State.WaitRunning(-1 * time.Second)
|
||||
close(waitStart)
|
||||
}()
|
||||
|
||||
// Start should not return until the process is actually running
|
||||
select {
|
||||
case <-callbackLock:
|
||||
case <-waitStart:
|
||||
case err := <-cErr:
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/dotcloud/docker/pkg/namesgenerator"
|
||||
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/dotcloud/docker/pkg/sysinfo"
|
||||
"github.com/dotcloud/docker/pkg/truncindex"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
@ -87,7 +88,7 @@ type Daemon struct {
|
|||
containers *contStore
|
||||
graph *graph.Graph
|
||||
repositories *graph.TagStore
|
||||
idIndex *utils.TruncIndex
|
||||
idIndex *truncindex.TruncIndex
|
||||
sysInfo *sysinfo.SysInfo
|
||||
volumes *graph.Graph
|
||||
srv Server
|
||||
|
@ -96,6 +97,7 @@ type Daemon struct {
|
|||
containerGraph *graphdb.Database
|
||||
driver graphdriver.Driver
|
||||
execDriver execdriver.Driver
|
||||
Sockets []string
|
||||
}
|
||||
|
||||
// Install installs daemon capabilities to eng.
|
||||
|
@ -136,7 +138,7 @@ func (daemon *Daemon) containerRoot(id string) string {
|
|||
// Load reads the contents of a container from disk
|
||||
// This is typically done at startup.
|
||||
func (daemon *Daemon) load(id string) (*Container, error) {
|
||||
container := &Container{root: daemon.containerRoot(id)}
|
||||
container := &Container{root: daemon.containerRoot(id), State: NewState()}
|
||||
if err := container.FromDisk(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -180,11 +182,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
|
|||
|
||||
// don't update the Suffixarray if we're starting up
|
||||
// we'll waste time if we update it for every container
|
||||
if updateSuffixarray {
|
||||
daemon.idIndex.Add(container.ID)
|
||||
} else {
|
||||
daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID)
|
||||
}
|
||||
daemon.idIndex.Add(container.ID)
|
||||
|
||||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
|
@ -238,12 +236,6 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
|
|||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// When the container is not running, we still initialize the waitLock
|
||||
// chan and close it. Receiving on nil chan blocks whereas receiving on a
|
||||
// closed chan does not. In this case we do not want to block.
|
||||
container.waitLock = make(chan struct{})
|
||||
close(container.waitLock)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -375,8 +367,6 @@ func (daemon *Daemon) restore() error {
|
|||
}
|
||||
}
|
||||
|
||||
daemon.idIndex.UpdateSuffixarray()
|
||||
|
||||
for _, container := range containersToStart {
|
||||
utils.Debugf("Starting container %d", container.ID)
|
||||
if err := container.Start(); err != nil {
|
||||
|
@ -592,6 +582,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
|
|||
Name: name,
|
||||
Driver: daemon.driver.String(),
|
||||
ExecDriver: daemon.execDriver.Name(),
|
||||
State: NewState(),
|
||||
}
|
||||
container.root = daemon.containerRoot(container.ID)
|
||||
|
||||
|
@ -629,8 +620,12 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
|
|||
|
||||
// Commit creates a new filesystem image from the current state of a container.
|
||||
// The image can optionally be tagged into a repository
|
||||
func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
|
||||
// FIXME: freeze the container before copying it to avoid data corruption?
|
||||
func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
|
||||
if pause {
|
||||
container.Pause()
|
||||
defer container.Unpause()
|
||||
}
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -841,7 +836,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
|
|||
localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
|
||||
sysInitPath := utils.DockerInitPath(localCopy)
|
||||
if sysInitPath == "" {
|
||||
return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.")
|
||||
return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.")
|
||||
}
|
||||
|
||||
if sysInitPath != localCopy {
|
||||
|
@ -869,7 +864,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
|
|||
containers: &contStore{s: make(map[string]*Container)},
|
||||
graph: g,
|
||||
repositories: repositories,
|
||||
idIndex: utils.NewTruncIndex([]string{}),
|
||||
idIndex: truncindex.NewTruncIndex([]string{}),
|
||||
sysInfo: sysInfo,
|
||||
volumes: volumes,
|
||||
config: config,
|
||||
|
@ -878,6 +873,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
|
|||
sysInitPath: sysInitPath,
|
||||
execDriver: ed,
|
||||
eng: eng,
|
||||
Sockets: config.Sockets,
|
||||
}
|
||||
|
||||
if err := daemon.checkLocaldns(); err != nil {
|
||||
|
@ -903,7 +899,7 @@ func (daemon *Daemon) shutdown() error {
|
|||
if err := c.KillSig(15); err != nil {
|
||||
utils.Debugf("kill 15 error for %s - %s", c.ID, err)
|
||||
}
|
||||
c.Wait()
|
||||
c.State.WaitStop(-1 * time.Second)
|
||||
utils.Debugf("container stopped %s", c.ID)
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
|
|
|
@ -29,7 +29,7 @@ func finalizeNamespace(args *execdriver.InitArgs) error {
|
|||
|
||||
if !args.Privileged {
|
||||
// drop capabilities in bounding set before changing user
|
||||
if err := capabilities.DropBoundingSet(container); err != nil {
|
||||
if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
|
||||
return fmt.Errorf("drop bounding set %s", err)
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func finalizeNamespace(args *execdriver.InitArgs) error {
|
|||
}
|
||||
|
||||
// drop all other capabilities
|
||||
if err := capabilities.DropCapabilities(container); err != nil {
|
||||
if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
|
||||
return fmt.Errorf("drop capabilities %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/dotcloud/docker/pkg/units"
|
||||
)
|
||||
|
||||
type Action func(*libcontainer.Container, interface{}, string) error
|
||||
type Action func(*libcontainer.Config, interface{}, string) error
|
||||
|
||||
var actions = map[string]Action{
|
||||
"cap.add": addCap, // add a cap
|
||||
|
@ -35,7 +35,7 @@ var actions = map[string]Action{
|
|||
"fs.readonly": readonlyFs, // make the rootfs of the container read only
|
||||
}
|
||||
|
||||
func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func cpusetCpus(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func cpusetCpus(container *libcontainer.Container, context interface{}, value st
|
|||
return nil
|
||||
}
|
||||
|
||||
func systemdSlice(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func systemdSlice(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set slice when cgroups are disabled")
|
||||
}
|
||||
|
@ -53,12 +53,12 @@ func systemdSlice(container *libcontainer.Container, context interface{}, value
|
|||
return nil
|
||||
}
|
||||
|
||||
func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error {
|
||||
container.Context["apparmor_profile"] = value
|
||||
func apparmorProfile(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.AppArmorProfile = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func cpuShares(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func cpuShares(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func cpuShares(container *libcontainer.Container, context interface{}, value str
|
|||
return nil
|
||||
}
|
||||
|
||||
func memory(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func memory(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func memory(container *libcontainer.Container, context interface{}, value string
|
|||
return nil
|
||||
}
|
||||
|
||||
func memoryReservation(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func memoryReservation(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func memoryReservation(container *libcontainer.Container, context interface{}, v
|
|||
return nil
|
||||
}
|
||||
|
||||
func memorySwap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func memorySwap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
@ -108,12 +108,12 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st
|
|||
return nil
|
||||
}
|
||||
|
||||
func addCap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func addCap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Capabilities = append(container.Capabilities, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropCap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func dropCap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
// If the capability is specified multiple times, remove all instances.
|
||||
for i, capability := range container.Capabilities {
|
||||
if capability == value {
|
||||
|
@ -125,27 +125,27 @@ func dropCap(container *libcontainer.Container, context interface{}, value strin
|
|||
return nil
|
||||
}
|
||||
|
||||
func addNamespace(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func addNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Namespaces[value] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropNamespace(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func dropNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Namespaces[value] = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func readonlyFs(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func readonlyFs(container *libcontainer.Config, context interface{}, value string) error {
|
||||
switch value {
|
||||
case "1", "true":
|
||||
container.ReadonlyFs = true
|
||||
container.MountConfig.ReadonlyFs = true
|
||||
default:
|
||||
container.ReadonlyFs = false
|
||||
container.MountConfig.ReadonlyFs = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func joinNetNamespace(container *libcontainer.Container, context interface{}, value string) error {
|
||||
func joinNetNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
var (
|
||||
running = context.(map[string]*exec.Cmd)
|
||||
cmd = running[value]
|
||||
|
@ -154,28 +154,13 @@ func joinNetNamespace(container *libcontainer.Container, context interface{}, va
|
|||
if cmd == nil || cmd.Process == nil {
|
||||
return fmt.Errorf("%s is not a valid running container to join", value)
|
||||
}
|
||||
|
||||
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
|
||||
container.Networks = append(container.Networks, &libcontainer.Network{
|
||||
Type: "netns",
|
||||
Context: libcontainer.Context{
|
||||
"nspath": nspath,
|
||||
},
|
||||
Type: "netns",
|
||||
NsPath: nspath,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func vethMacAddress(container *libcontainer.Container, context interface{}, value string) error {
|
||||
var veth *libcontainer.Network
|
||||
for _, network := range container.Networks {
|
||||
if network.Type == "veth" {
|
||||
veth = network
|
||||
break
|
||||
}
|
||||
}
|
||||
if veth == nil {
|
||||
return fmt.Errorf("not veth configured for container")
|
||||
}
|
||||
veth.Context["mac"] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -183,7 +168,7 @@ func vethMacAddress(container *libcontainer.Container, context interface{}, valu
|
|||
// container's default configuration.
|
||||
//
|
||||
// TODO: this can be moved to a general utils or parser in pkg
|
||||
func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error {
|
||||
func ParseConfiguration(container *libcontainer.Config, running map[string]*exec.Cmd, opts []string) error {
|
||||
for _, opt := range opts {
|
||||
kv := strings.SplitN(opt, "=", 2)
|
||||
if len(kv) < 2 {
|
||||
|
|
|
@ -3,7 +3,7 @@ package configuration
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/security/capabilities"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
)
|
||||
|
||||
|
@ -25,14 +25,14 @@ func TestSetReadonlyRootFs(t *testing.T) {
|
|||
}
|
||||
)
|
||||
|
||||
if container.ReadonlyFs {
|
||||
if container.MountConfig.ReadonlyFs {
|
||||
t.Fatal("container should not have a readonly rootfs by default")
|
||||
}
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container.ReadonlyFs {
|
||||
if !container.MountConfig.ReadonlyFs {
|
||||
t.Fatal("container should have a readonly rootfs")
|
||||
}
|
||||
}
|
||||
|
@ -84,8 +84,9 @@ func TestAppArmorProfile(t *testing.T) {
|
|||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expected := "koye-the-protector"; container.Context["apparmor_profile"] != expected {
|
||||
t.Fatalf("expected profile %s got %s", expected, container.Context["apparmor_profile"])
|
||||
|
||||
if expected := "koye-the-protector"; container.AppArmorProfile != expected {
|
||||
t.Fatalf("expected profile %s got %s", expected, container.AppArmorProfile)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,7 +166,7 @@ func TestDropCap(t *testing.T) {
|
|||
}
|
||||
)
|
||||
// enabled all caps like in privileged mode
|
||||
container.Capabilities = libcontainer.GetAllCapabilities()
|
||||
container.Capabilities = capabilities.GetAllCapabilities()
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/apparmor"
|
||||
"github.com/docker/libcontainer/devices"
|
||||
"github.com/docker/libcontainer/mount"
|
||||
"github.com/docker/libcontainer/security/capabilities"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
|
@ -16,7 +18,7 @@ import (
|
|||
|
||||
// createContainer populates and configures the container type with the
|
||||
// data provided by the execdriver.Command
|
||||
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) {
|
||||
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
|
||||
container := template.New()
|
||||
|
||||
container.Hostname = getEnv("HOSTNAME", c.Env)
|
||||
|
@ -26,65 +28,71 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
|
|||
container.Env = c.Env
|
||||
container.Cgroups.Name = c.ID
|
||||
container.Cgroups.AllowedDevices = c.AllowedDevices
|
||||
container.DeviceNodes = c.AutoCreatedDevices
|
||||
container.MountConfig.DeviceNodes = c.AutoCreatedDevices
|
||||
|
||||
// check to see if we are running in ramdisk to disable pivot root
|
||||
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
||||
container.Context["restrictions"] = "true"
|
||||
container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
||||
container.RestrictSys = true
|
||||
|
||||
if err := d.createNetwork(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Privileged {
|
||||
if err := d.setPrivileged(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.setupCgroups(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.setupMounts(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.setupLabels(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmds := make(map[string]*exec.Cmd)
|
||||
d.Lock()
|
||||
for k, v := range d.activeContainers {
|
||||
cmds[k] = v.cmd
|
||||
}
|
||||
d.Unlock()
|
||||
|
||||
if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error {
|
||||
if c.Network.HostNetworking {
|
||||
container.Namespaces["NEWNET"] = false
|
||||
return nil
|
||||
}
|
||||
|
||||
container.Networks = []*libcontainer.Network{
|
||||
{
|
||||
Mtu: c.Network.Mtu,
|
||||
Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0),
|
||||
Gateway: "localhost",
|
||||
Type: "loopback",
|
||||
Context: libcontainer.Context{},
|
||||
},
|
||||
}
|
||||
|
||||
if c.Network.Interface != nil {
|
||||
vethNetwork := libcontainer.Network{
|
||||
Mtu: c.Network.Mtu,
|
||||
Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
|
||||
Gateway: c.Network.Interface.Gateway,
|
||||
Type: "veth",
|
||||
Context: libcontainer.Context{
|
||||
"prefix": "veth",
|
||||
"bridge": c.Network.Interface.Bridge,
|
||||
},
|
||||
Mtu: c.Network.Mtu,
|
||||
Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
|
||||
Gateway: c.Network.Interface.Gateway,
|
||||
Type: "veth",
|
||||
Bridge: c.Network.Interface.Bridge,
|
||||
VethPrefix: "veth",
|
||||
}
|
||||
container.Networks = append(container.Networks, &vethNetwork)
|
||||
}
|
||||
|
@ -93,6 +101,7 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
|
|||
d.Lock()
|
||||
active := d.activeContainers[c.Network.ContainerID]
|
||||
d.Unlock()
|
||||
|
||||
if active == nil || active.cmd.Process == nil {
|
||||
return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
|
||||
}
|
||||
|
@ -100,34 +109,34 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
|
|||
|
||||
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
|
||||
container.Networks = append(container.Networks, &libcontainer.Network{
|
||||
Type: "netns",
|
||||
Context: libcontainer.Context{
|
||||
"nspath": nspath,
|
||||
},
|
||||
Type: "netns",
|
||||
NsPath: nspath,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setPrivileged(container *libcontainer.Container) (err error) {
|
||||
container.Capabilities = libcontainer.GetAllCapabilities()
|
||||
func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
|
||||
container.Capabilities = capabilities.GetAllCapabilities()
|
||||
container.Cgroups.AllowAllDevices = true
|
||||
|
||||
hostDeviceNodes, err := devices.GetHostDeviceNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.DeviceNodes = hostDeviceNodes
|
||||
container.MountConfig.DeviceNodes = hostDeviceNodes
|
||||
|
||||
delete(container.Context, "restrictions")
|
||||
container.RestrictSys = false
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
container.Context["apparmor_profile"] = "unconfined"
|
||||
container.AppArmorProfile = "unconfined"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
|
||||
if c.Resources != nil {
|
||||
container.Cgroups.CpuShares = c.Resources.CpuShares
|
||||
container.Cgroups.Memory = c.Resources.Memory
|
||||
|
@ -135,12 +144,13 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
|
|||
container.Cgroups.MemorySwap = c.Resources.MemorySwap
|
||||
container.Cgroups.CpusetCpus = c.Resources.Cpuset
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error {
|
||||
for _, m := range c.Mounts {
|
||||
container.Mounts = append(container.Mounts, libcontainer.Mount{
|
||||
container.MountConfig.Mounts = append(container.MountConfig.Mounts, mount.Mount{
|
||||
Type: "bind",
|
||||
Source: m.Source,
|
||||
Destination: m.Destination,
|
||||
|
@ -148,11 +158,13 @@ func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Co
|
|||
Private: m.Private,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
container.Context["process_label"] = c.Config["process_label"][0]
|
||||
container.Context["mount_label"] = c.Config["mount_label"][0]
|
||||
func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error {
|
||||
container.ProcessLabel = c.Config["process_label"][0]
|
||||
container.MountConfig.MountLabel = c.Config["mount_label"][0]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ const (
|
|||
|
||||
func init() {
|
||||
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
|
||||
var container *libcontainer.Container
|
||||
var container *libcontainer.Config
|
||||
f, err := os.Open(filepath.Join(args.Root, "container.json"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -54,7 +54,7 @@ func init() {
|
|||
}
|
||||
|
||||
type activeContainer struct {
|
||||
container *libcontainer.Container
|
||||
container *libcontainer.Config
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func NewDriver(root, initPath string) (*driver, error) {
|
|||
}
|
||||
|
||||
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
|
||||
// take the Command and populate the libcontainer.Container from it
|
||||
// take the Command and populate the libcontainer.Config from it
|
||||
container, err := d.createContainer(c)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
@ -110,7 +110,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
|
||||
term := getTerminal(c, pipes)
|
||||
|
||||
return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
|
||||
return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
|
||||
// we need to join the rootfs because namespaces will setup the rootfs and chroot
|
||||
initPath := filepath.Join(c.Rootfs, c.InitPath)
|
||||
|
||||
|
@ -171,21 +171,30 @@ func (d *driver) Unpause(c *execdriver.Command) error {
|
|||
|
||||
func (d *driver) Terminate(p *execdriver.Command) error {
|
||||
// lets check the start time for the process
|
||||
started, err := d.readStartTime(p)
|
||||
state, err := libcontainer.GetState(filepath.Join(d.root, p.ID))
|
||||
if err != nil {
|
||||
// if we don't have the data on disk then we can assume the process is gone
|
||||
// because this is only removed after we know the process has stopped
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
// TODO: Remove this part for version 1.2.0
|
||||
// This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
|
||||
data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
|
||||
if err != nil {
|
||||
// if we don't have the data on disk then we can assume the process is gone
|
||||
// because this is only removed after we know the process has stopped
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
state = &libcontainer.State{InitStartTime: string(data)}
|
||||
}
|
||||
|
||||
currentStartTime, err := system.GetProcessStartTime(p.Process.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if started == currentStartTime {
|
||||
if state.InitStartTime == currentStartTime {
|
||||
err = syscall.Kill(p.Process.Pid, 9)
|
||||
syscall.Wait4(p.Process.Pid, nil, 0, nil)
|
||||
}
|
||||
|
@ -194,14 +203,6 @@ func (d *driver) Terminate(p *execdriver.Command) error {
|
|||
|
||||
}
|
||||
|
||||
func (d *driver) readStartTime(p *execdriver.Command) (string, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func (d *driver) Info(id string) execdriver.Info {
|
||||
return &info{
|
||||
ID: id,
|
||||
|
@ -229,7 +230,7 @@ func (d *driver) GetPidsForContainer(id string) ([]int, error) {
|
|||
return fs.GetPids(c)
|
||||
}
|
||||
|
||||
func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error {
|
||||
func (d *driver) writeContainerFile(container *libcontainer.Config, id string) error {
|
||||
data, err := json.Marshal(container)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -3,6 +3,8 @@ package native
|
|||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
)
|
||||
|
||||
type info struct {
|
||||
|
@ -14,6 +16,11 @@ type info struct {
|
|||
// pid file for a container. If the file exists then the
|
||||
// container is currently running
|
||||
func (i *info) IsRunning() bool {
|
||||
if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil {
|
||||
return true
|
||||
}
|
||||
// TODO: Remove this part for version 1.2.0
|
||||
// This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
|
||||
if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
)
|
||||
|
||||
// New returns the docker default configuration for libcontainer
|
||||
func New() *libcontainer.Container {
|
||||
container := &libcontainer.Container{
|
||||
func New() *libcontainer.Config {
|
||||
container := &libcontainer.Config{
|
||||
Capabilities: []string{
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
|
@ -34,10 +34,12 @@ func New() *libcontainer.Container {
|
|||
Parent: "docker",
|
||||
AllowAllDevices: false,
|
||||
},
|
||||
Context: libcontainer.Context{},
|
||||
MountConfig: &libcontainer.MountConfig{},
|
||||
}
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
container.Context["apparmor_profile"] = "docker-default"
|
||||
container.AppArmorProfile = "docker-default"
|
||||
}
|
||||
|
||||
return container
|
||||
}
|
||||
|
|
|
@ -295,7 +295,7 @@ func (a *Driver) Put(id string) {
|
|||
|
||||
// Returns an archive of the contents for the id
|
||||
func (a *Driver) Diff(id string) (archive.Archive, error) {
|
||||
return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"os"
|
||||
|
@ -35,8 +36,24 @@ func (d *Driver) Cleanup() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func isGNUcoreutils() bool {
|
||||
if stdout, err := exec.Command("cp", "--version").Output(); err == nil {
|
||||
return bytes.Contains(stdout, []byte("GNU coreutils"))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil {
|
||||
argv := make([]string, 0, 4)
|
||||
|
||||
if isGNUcoreutils() {
|
||||
argv = append(argv, "-aT", "--reflink=auto", src, dst)
|
||||
} else {
|
||||
argv = append(argv, "-a", src+"/.", dst+"/.")
|
||||
}
|
||||
|
||||
if output, err := exec.Command("cp", argv...).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -2,6 +2,7 @@ package daemon
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
|
@ -15,7 +16,7 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
|
|||
if container := daemon.Get(name); container != nil {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
if job.GetenvBool("dirty") {
|
||||
if job.GetenvBool("raw") {
|
||||
b, err := json.Marshal(&struct {
|
||||
*Container
|
||||
HostConfig *runconfig.HostConfig
|
||||
|
@ -46,7 +47,16 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
|
|||
out.Set("ProcessLabel", container.ProcessLabel)
|
||||
out.SetJson("Volumes", container.Volumes)
|
||||
out.SetJson("VolumesRW", container.VolumesRW)
|
||||
|
||||
if children, err := daemon.Children(container.Name); err == nil {
|
||||
for linkAlias, child := range children {
|
||||
container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
|
||||
}
|
||||
}
|
||||
|
||||
out.SetJson("HostConfig", container.hostConfig)
|
||||
|
||||
container.hostConfig.Links = nil
|
||||
if _, err := out.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultNetworkBridge = "docker0"
|
||||
DefaultNetworkBridge = "docker0"
|
||||
MaxAllocatedPortAttempts = 10
|
||||
)
|
||||
|
||||
// Network interface represents the networking stack of a container
|
||||
|
@ -354,9 +355,6 @@ func Release(job *engine.Job) engine.Status {
|
|||
var (
|
||||
id = job.Args[0]
|
||||
containerInterface = currentInterfaces.Get(id)
|
||||
ip net.IP
|
||||
port int
|
||||
proto string
|
||||
)
|
||||
|
||||
if containerInterface == nil {
|
||||
|
@ -367,22 +365,6 @@ func Release(job *engine.Job) engine.Status {
|
|||
if err := portmapper.Unmap(nat); err != nil {
|
||||
log.Printf("Unable to unmap port %s: %s", nat, err)
|
||||
}
|
||||
|
||||
// this is host mappings
|
||||
switch a := nat.(type) {
|
||||
case *net.TCPAddr:
|
||||
proto = "tcp"
|
||||
ip = a.IP
|
||||
port = a.Port
|
||||
case *net.UDPAddr:
|
||||
proto = "udp"
|
||||
ip = a.IP
|
||||
port = a.Port
|
||||
}
|
||||
|
||||
if err := portallocator.ReleasePort(ip, proto, port); err != nil {
|
||||
log.Printf("Unable to release port %s", nat)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
|
||||
|
@ -399,7 +381,7 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
ip = defaultBindingIP
|
||||
id = job.Args[0]
|
||||
hostIP = job.Getenv("HostIP")
|
||||
origHostPort = job.GetenvInt("HostPort")
|
||||
hostPort = job.GetenvInt("HostPort")
|
||||
containerPort = job.GetenvInt("ContainerPort")
|
||||
proto = job.Getenv("Proto")
|
||||
network = currentInterfaces.Get(id)
|
||||
|
@ -409,39 +391,46 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
ip = net.ParseIP(hostIP)
|
||||
}
|
||||
|
||||
var (
|
||||
hostPort int
|
||||
container net.Addr
|
||||
host net.Addr
|
||||
)
|
||||
// host ip, proto, and host port
|
||||
var container net.Addr
|
||||
switch proto {
|
||||
case "tcp":
|
||||
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
|
||||
case "udp":
|
||||
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
|
||||
default:
|
||||
return job.Errorf("unsupported address type %s", proto)
|
||||
}
|
||||
|
||||
/*
|
||||
Try up to 10 times to get a port that's not already allocated.
|
||||
//
|
||||
// Try up to 10 times to get a port that's not already allocated.
|
||||
//
|
||||
// In the event of failure to bind, return the error that portmapper.Map
|
||||
// yields.
|
||||
//
|
||||
|
||||
In the event of failure to bind, return the error that portmapper.Map
|
||||
yields.
|
||||
*/
|
||||
for i := 0; i < 10; i++ {
|
||||
// host ip, proto, and host port
|
||||
hostPort, err = portallocator.RequestPort(ip, proto, origHostPort)
|
||||
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
if proto == "tcp" {
|
||||
host = &net.TCPAddr{IP: ip, Port: hostPort}
|
||||
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
|
||||
} else {
|
||||
host = &net.UDPAddr{IP: ip, Port: hostPort}
|
||||
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
|
||||
}
|
||||
|
||||
if err = portmapper.Map(container, ip, hostPort); err == nil {
|
||||
var host net.Addr
|
||||
for i := 0; i < MaxAllocatedPortAttempts; i++ {
|
||||
if host, err = portmapper.Map(container, ip, hostPort); err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort)
|
||||
switch allocerr := err.(type) {
|
||||
case portallocator.ErrPortAlreadyAllocated:
|
||||
// There is no point in immediately retrying to map an explicitly
|
||||
// chosen port.
|
||||
if hostPort != 0 {
|
||||
job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
|
||||
break
|
||||
}
|
||||
|
||||
// Automatically chosen 'free' port failed to bind: move on the next.
|
||||
job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
|
||||
default:
|
||||
// some other error during mapping
|
||||
job.Logf("Received an unexpected error during port allocation: %s", err.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -451,12 +440,18 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
network.PortMappings = append(network.PortMappings, host)
|
||||
|
||||
out := engine.Env{}
|
||||
out.Set("HostIP", ip.String())
|
||||
out.SetInt("HostPort", hostPort)
|
||||
|
||||
switch netAddr := host.(type) {
|
||||
case *net.TCPAddr:
|
||||
out.Set("HostIP", netAddr.IP.String())
|
||||
out.SetInt("HostPort", netAddr.Port)
|
||||
case *net.UDPAddr:
|
||||
out.Set("HostIP", netAddr.IP.String())
|
||||
out.SetInt("HostPort", netAddr.Port)
|
||||
}
|
||||
if _, err := out.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
|
|
106
daemon/networkdriver/bridge/driver_test.go
Normal file
106
daemon/networkdriver/bridge/driver_test.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/dotcloud/docker/engine"
|
||||
)
|
||||
|
||||
func findFreePort(t *testing.T) int {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatal("Failed to find a free port")
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
result, err := net.ResolveTCPAddr("tcp", l.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal("Failed to resolve address to identify free port")
|
||||
}
|
||||
return result.Port
|
||||
}
|
||||
|
||||
func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) {
|
||||
strPort := strconv.Itoa(port)
|
||||
|
||||
job = eng.Job("allocate_port", "container_id")
|
||||
job.Setenv("HostIP", "127.0.0.1")
|
||||
job.Setenv("HostPort", strPort)
|
||||
job.Setenv("Proto", "tcp")
|
||||
job.Setenv("ContainerPort", strPort)
|
||||
return
|
||||
}
|
||||
|
||||
func TestAllocatePortDetection(t *testing.T) {
|
||||
eng := engine.New()
|
||||
eng.Logging = false
|
||||
|
||||
freePort := findFreePort(t)
|
||||
|
||||
// Init driver
|
||||
job := eng.Job("initdriver")
|
||||
if res := InitDriver(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to initialize network driver")
|
||||
}
|
||||
|
||||
// Allocate interface
|
||||
job = eng.Job("allocate_interface", "container_id")
|
||||
if res := Allocate(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate network interface")
|
||||
}
|
||||
|
||||
// Allocate same port twice, expect failure on second call
|
||||
job = newPortAllocationJob(eng, freePort)
|
||||
if res := AllocatePort(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to find a free port to allocate")
|
||||
}
|
||||
if res := AllocatePort(job); res == engine.StatusOK {
|
||||
t.Fatal("Duplicate port allocation granted by AllocatePort")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatePortReclaim(t *testing.T) {
|
||||
eng := engine.New()
|
||||
eng.Logging = false
|
||||
|
||||
freePort := findFreePort(t)
|
||||
|
||||
// Init driver
|
||||
job := eng.Job("initdriver")
|
||||
if res := InitDriver(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to initialize network driver")
|
||||
}
|
||||
|
||||
// Allocate interface
|
||||
job = eng.Job("allocate_interface", "container_id")
|
||||
if res := Allocate(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate network interface")
|
||||
}
|
||||
|
||||
// Occupy port
|
||||
listenAddr := fmt.Sprintf(":%d", freePort)
|
||||
tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to resolve TCP address '%s'", listenAddr)
|
||||
}
|
||||
|
||||
l, err := net.ListenTCP("tcp", tcpListenAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("Fail to listen on port %d", freePort)
|
||||
}
|
||||
|
||||
// Allocate port, expect failure
|
||||
job = newPortAllocationJob(eng, freePort)
|
||||
if res := AllocatePort(job); res == engine.StatusOK {
|
||||
t.Fatal("Successfully allocated currently used port")
|
||||
}
|
||||
|
||||
// Reclaim port, retry allocation
|
||||
l.Close()
|
||||
if res := AllocatePort(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate previously reclaimed port")
|
||||
}
|
||||
}
|
|
@ -2,13 +2,18 @@ package portallocator
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type portMap struct {
|
||||
p map[int]struct{}
|
||||
last int
|
||||
}
|
||||
|
||||
type (
|
||||
portMap map[int]bool
|
||||
protocolMap map[string]portMap
|
||||
protocolMap map[string]*portMap
|
||||
ipMapping map[string]protocolMap
|
||||
)
|
||||
|
||||
|
@ -18,9 +23,8 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrAllPortsAllocated = errors.New("all ports are allocated")
|
||||
ErrPortAlreadyAllocated = errors.New("port has already been allocated")
|
||||
ErrUnknownProtocol = errors.New("unknown protocol")
|
||||
ErrAllPortsAllocated = errors.New("all ports are allocated")
|
||||
ErrUnknownProtocol = errors.New("unknown protocol")
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -30,6 +34,34 @@ var (
|
|||
globalMap = ipMapping{}
|
||||
)
|
||||
|
||||
type ErrPortAlreadyAllocated struct {
|
||||
ip string
|
||||
port int
|
||||
}
|
||||
|
||||
func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
|
||||
return ErrPortAlreadyAllocated{
|
||||
ip: ip,
|
||||
port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func (e ErrPortAlreadyAllocated) IP() string {
|
||||
return e.ip
|
||||
}
|
||||
|
||||
func (e ErrPortAlreadyAllocated) Port() int {
|
||||
return e.port
|
||||
}
|
||||
|
||||
func (e ErrPortAlreadyAllocated) IPPort() string {
|
||||
return fmt.Sprintf("%s:%d", e.ip, e.port)
|
||||
}
|
||||
|
||||
func (e ErrPortAlreadyAllocated) Error() string {
|
||||
return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
|
||||
}
|
||||
|
||||
func RequestPort(ip net.IP, proto string, port int) (int, error) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
|
@ -43,11 +75,11 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) {
|
|||
mapping := getOrCreate(ip)
|
||||
|
||||
if port > 0 {
|
||||
if !mapping[proto][port] {
|
||||
mapping[proto][port] = true
|
||||
if _, ok := mapping[proto].p[port]; !ok {
|
||||
mapping[proto].p[port] = struct{}{}
|
||||
return port, nil
|
||||
} else {
|
||||
return 0, ErrPortAlreadyAllocated
|
||||
return 0, NewErrPortAlreadyAllocated(ip.String(), port)
|
||||
}
|
||||
} else {
|
||||
port, err := findPort(ip, proto)
|
||||
|
@ -66,8 +98,8 @@ func ReleasePort(ip net.IP, proto string, port int) error {
|
|||
|
||||
ip = getDefault(ip)
|
||||
|
||||
mapping := getOrCreate(ip)
|
||||
delete(mapping[proto], port)
|
||||
mapping := getOrCreate(ip)[proto]
|
||||
delete(mapping.p, port)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -86,8 +118,8 @@ func getOrCreate(ip net.IP) protocolMap {
|
|||
|
||||
if _, ok := globalMap[ipstr]; !ok {
|
||||
globalMap[ipstr] = protocolMap{
|
||||
"tcp": portMap{},
|
||||
"udp": portMap{},
|
||||
"tcp": &portMap{p: map[int]struct{}{}, last: 0},
|
||||
"udp": &portMap{p: map[int]struct{}{}, last: 0},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,21 +127,28 @@ func getOrCreate(ip net.IP) protocolMap {
|
|||
}
|
||||
|
||||
func findPort(ip net.IP, proto string) (int, error) {
|
||||
port := BeginPortRange
|
||||
mapping := getOrCreate(ip)[proto]
|
||||
|
||||
mapping := getOrCreate(ip)
|
||||
|
||||
for mapping[proto][port] {
|
||||
port++
|
||||
|
||||
if port > EndPortRange {
|
||||
return 0, ErrAllPortsAllocated
|
||||
}
|
||||
if mapping.last == 0 {
|
||||
mapping.p[BeginPortRange] = struct{}{}
|
||||
mapping.last = BeginPortRange
|
||||
return BeginPortRange, nil
|
||||
}
|
||||
|
||||
mapping[proto][port] = true
|
||||
for port := mapping.last + 1; port != mapping.last; port++ {
|
||||
if port > EndPortRange {
|
||||
port = BeginPortRange
|
||||
}
|
||||
|
||||
return port, nil
|
||||
if _, ok := mapping.p[port]; !ok {
|
||||
mapping.p[port] = struct{}{}
|
||||
mapping.last = port
|
||||
return port, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0, ErrAllPortsAllocated
|
||||
}
|
||||
|
||||
func getDefault(ip net.IP) net.IP {
|
||||
|
|
|
@ -83,8 +83,11 @@ func TestReleaseUnreadledPort(t *testing.T) {
|
|||
}
|
||||
|
||||
port, err = RequestPort(defaultIP, "tcp", 5000)
|
||||
if err != ErrPortAlreadyAllocated {
|
||||
t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err)
|
||||
|
||||
switch err.(type) {
|
||||
case ErrPortAlreadyAllocated:
|
||||
default:
|
||||
t.Fatalf("Expected port allocation error got %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,10 +3,12 @@ package portmapper
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/proxy"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/proxy"
|
||||
)
|
||||
|
||||
type mapping struct {
|
||||
|
@ -35,43 +37,66 @@ func SetIptablesChain(c *iptables.Chain) {
|
|||
chain = c
|
||||
}
|
||||
|
||||
func Map(container net.Addr, hostIP net.IP, hostPort int) error {
|
||||
func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
var m *mapping
|
||||
var (
|
||||
m *mapping
|
||||
err error
|
||||
proto string
|
||||
allocatedHostPort int
|
||||
)
|
||||
|
||||
// release the port on any error during return.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
portallocator.ReleasePort(hostIP, proto, allocatedHostPort)
|
||||
}
|
||||
}()
|
||||
|
||||
switch container.(type) {
|
||||
case *net.TCPAddr:
|
||||
proto = "tcp"
|
||||
if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m = &mapping{
|
||||
proto: "tcp",
|
||||
host: &net.TCPAddr{IP: hostIP, Port: hostPort},
|
||||
proto: proto,
|
||||
host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
|
||||
container: container,
|
||||
}
|
||||
case *net.UDPAddr:
|
||||
proto = "udp"
|
||||
if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m = &mapping{
|
||||
proto: "udp",
|
||||
host: &net.UDPAddr{IP: hostIP, Port: hostPort},
|
||||
proto: proto,
|
||||
host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
|
||||
container: container,
|
||||
}
|
||||
default:
|
||||
return ErrUnknownBackendAddressType
|
||||
err = ErrUnknownBackendAddressType
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := getKey(m.host)
|
||||
if _, exists := currentMappings[key]; exists {
|
||||
return ErrPortMappedForIP
|
||||
err = ErrPortMappedForIP
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerIP, containerPort := getIPAndPort(m.container)
|
||||
if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
||||
return err
|
||||
if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := newProxy(m.host, m.container)
|
||||
if err != nil {
|
||||
// need to undo the iptables rules before we reutrn
|
||||
forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort)
|
||||
return err
|
||||
// need to undo the iptables rules before we return
|
||||
forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.userlandProxy = p
|
||||
|
@ -79,7 +104,7 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) error {
|
|||
|
||||
go p.Run()
|
||||
|
||||
return nil
|
||||
return m.host, nil
|
||||
}
|
||||
|
||||
func Unmap(host net.Addr) error {
|
||||
|
@ -100,6 +125,18 @@ func Unmap(host net.Addr) error {
|
|||
if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch a := host.(type) {
|
||||
case *net.TCPAddr:
|
||||
if err := portallocator.ReleasePort(a.IP, "tcp", a.Port); err != nil {
|
||||
return err
|
||||
}
|
||||
case *net.UDPAddr:
|
||||
if err := portallocator.ReleasePort(a.IP, "udp", a.Port); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package portmapper
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/proxy"
|
||||
"net"
|
||||
|
@ -44,19 +45,26 @@ func TestMapPorts(t *testing.T) {
|
|||
srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
|
||||
srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
|
||||
|
||||
if err := Map(srcAddr1, dstIp1, 80); err != nil {
|
||||
addrEqual := func(addr1, addr2 net.Addr) bool {
|
||||
return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
|
||||
}
|
||||
|
||||
if host, err := Map(srcAddr1, dstIp1, 80); err != nil {
|
||||
t.Fatalf("Failed to allocate port: %s", err)
|
||||
} else if !addrEqual(dstAddr1, host) {
|
||||
t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
|
||||
dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
|
||||
}
|
||||
|
||||
if Map(srcAddr1, dstIp1, 80) == nil {
|
||||
if _, err := Map(srcAddr1, dstIp1, 80); err == nil {
|
||||
t.Fatalf("Port is in use - mapping should have failed")
|
||||
}
|
||||
|
||||
if Map(srcAddr2, dstIp1, 80) == nil {
|
||||
if _, err := Map(srcAddr2, dstIp1, 80); err == nil {
|
||||
t.Fatalf("Port is in use - mapping should have failed")
|
||||
}
|
||||
|
||||
if err := Map(srcAddr2, dstIp2, 80); err != nil {
|
||||
if _, err := Map(srcAddr2, dstIp2, 80); err != nil {
|
||||
t.Fatalf("Failed to allocate port: %s", err)
|
||||
}
|
||||
|
||||
|
@ -105,3 +113,40 @@ func TestGetUDPIPAndPort(t *testing.T) {
|
|||
t.Fatalf("expected port %d got %d", ep, port)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapAllPortsSingleInterface(t *testing.T) {
|
||||
dstIp1 := net.ParseIP("0.0.0.0")
|
||||
srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
|
||||
|
||||
hosts := []net.Addr{}
|
||||
var host net.Addr
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
for _, val := range hosts {
|
||||
Unmap(val)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := portallocator.BeginPortRange; i < portallocator.EndPortRange; i++ {
|
||||
if host, err = Map(srcAddr1, dstIp1, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
|
||||
if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil {
|
||||
t.Fatal("Port %d should be bound but is not", portallocator.BeginPortRange)
|
||||
}
|
||||
|
||||
for _, val := range hosts {
|
||||
if err := Unmap(val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
hosts = []net.Addr{}
|
||||
}
|
||||
}
|
||||
|
|
115
daemon/state.go
115
daemon/state.go
|
@ -16,6 +16,13 @@ type State struct {
|
|||
ExitCode int
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
waitChan chan struct{}
|
||||
}
|
||||
|
||||
func NewState() *State {
|
||||
return &State{
|
||||
waitChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the state
|
||||
|
@ -35,56 +42,118 @@ func (s *State) String() string {
|
|||
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
func wait(waitChan <-chan struct{}, timeout time.Duration) error {
|
||||
if timeout < 0 {
|
||||
<-waitChan
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf("Timed out: %v", timeout)
|
||||
case <-waitChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WaitRunning waits until state is running. If state already running it returns
|
||||
// immediatly. If you want wait forever you must supply negative timeout.
|
||||
// Returns pid, that was passed to SetRunning
|
||||
func (s *State) WaitRunning(timeout time.Duration) (int, error) {
|
||||
s.RLock()
|
||||
if s.IsRunning() {
|
||||
pid := s.Pid
|
||||
s.RUnlock()
|
||||
return pid, nil
|
||||
}
|
||||
waitChan := s.waitChan
|
||||
s.RUnlock()
|
||||
if err := wait(waitChan, timeout); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return s.GetPid(), nil
|
||||
}
|
||||
|
||||
// WaitStop waits until state is stopped. If state already stopped it returns
|
||||
// immediatly. If you want wait forever you must supply negative timeout.
|
||||
// Returns exit code, that was passed to SetStopped
|
||||
func (s *State) WaitStop(timeout time.Duration) (int, error) {
|
||||
s.RLock()
|
||||
if !s.Running {
|
||||
exitCode := s.ExitCode
|
||||
s.RUnlock()
|
||||
return exitCode, nil
|
||||
}
|
||||
waitChan := s.waitChan
|
||||
s.RUnlock()
|
||||
if err := wait(waitChan, timeout); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return s.GetExitCode(), nil
|
||||
}
|
||||
|
||||
func (s *State) IsRunning() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
res := s.Running
|
||||
s.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
return s.Running
|
||||
func (s *State) GetPid() int {
|
||||
s.RLock()
|
||||
res := s.Pid
|
||||
s.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *State) GetExitCode() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.ExitCode
|
||||
res := s.ExitCode
|
||||
s.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *State) SetRunning(pid int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Running = true
|
||||
s.Paused = false
|
||||
s.ExitCode = 0
|
||||
s.Pid = pid
|
||||
s.StartedAt = time.Now().UTC()
|
||||
if !s.Running {
|
||||
s.Running = true
|
||||
s.Paused = false
|
||||
s.ExitCode = 0
|
||||
s.Pid = pid
|
||||
s.StartedAt = time.Now().UTC()
|
||||
close(s.waitChan) // fire waiters for start
|
||||
s.waitChan = make(chan struct{})
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *State) SetStopped(exitCode int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Running = false
|
||||
s.Pid = 0
|
||||
s.FinishedAt = time.Now().UTC()
|
||||
s.ExitCode = exitCode
|
||||
if s.Running {
|
||||
s.Running = false
|
||||
s.Pid = 0
|
||||
s.FinishedAt = time.Now().UTC()
|
||||
s.ExitCode = exitCode
|
||||
close(s.waitChan) // fire waiters for stop
|
||||
s.waitChan = make(chan struct{})
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *State) SetPaused() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.Paused = true
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *State) SetUnpaused() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.Paused = false
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *State) IsPaused() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.Paused
|
||||
res := s.Paused
|
||||
s.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
|
102
daemon/state_test.go
Normal file
102
daemon/state_test.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestStateRunStop(t *testing.T) {
|
||||
s := NewState()
|
||||
for i := 1; i < 3; i++ { // full lifecycle two times
|
||||
started := make(chan struct{})
|
||||
var pid int64
|
||||
go func() {
|
||||
runPid, _ := s.WaitRunning(-1 * time.Second)
|
||||
atomic.StoreInt64(&pid, int64(runPid))
|
||||
close(started)
|
||||
}()
|
||||
s.SetRunning(i + 100)
|
||||
if !s.IsRunning() {
|
||||
t.Fatal("State not running")
|
||||
}
|
||||
if s.Pid != i+100 {
|
||||
t.Fatalf("Pid %v, expected %v", s.Pid, i+100)
|
||||
}
|
||||
if s.ExitCode != 0 {
|
||||
t.Fatalf("ExitCode %v, expected 0", s.ExitCode)
|
||||
}
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
||||
case <-started:
|
||||
t.Log("Start callback fired")
|
||||
}
|
||||
runPid := int(atomic.LoadInt64(&pid))
|
||||
if runPid != i+100 {
|
||||
t.Fatalf("Pid %v, expected %v", runPid, i+100)
|
||||
}
|
||||
if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 {
|
||||
t.Fatal("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
|
||||
}
|
||||
|
||||
stopped := make(chan struct{})
|
||||
var exit int64
|
||||
go func() {
|
||||
exitCode, _ := s.WaitStop(-1 * time.Second)
|
||||
atomic.StoreInt64(&exit, int64(exitCode))
|
||||
close(stopped)
|
||||
}()
|
||||
s.SetStopped(i)
|
||||
if s.IsRunning() {
|
||||
t.Fatal("State is running")
|
||||
}
|
||||
if s.ExitCode != i {
|
||||
t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i)
|
||||
}
|
||||
if s.Pid != 0 {
|
||||
t.Fatalf("Pid %v, expected 0", s.Pid)
|
||||
}
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("Stop callback doesn't fire in 100 milliseconds")
|
||||
case <-stopped:
|
||||
t.Log("Stop callback fired")
|
||||
}
|
||||
exitCode := int(atomic.LoadInt64(&exit))
|
||||
if exitCode != i {
|
||||
t.Fatalf("ExitCode %v, expected %v", exitCode, i)
|
||||
}
|
||||
if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i {
|
||||
t.Fatal("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateTimeoutWait(t *testing.T) {
|
||||
s := NewState()
|
||||
started := make(chan struct{})
|
||||
go func() {
|
||||
s.WaitRunning(100 * time.Millisecond)
|
||||
close(started)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
||||
case <-started:
|
||||
t.Log("Start callback fired")
|
||||
}
|
||||
s.SetRunning(42)
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
s.WaitRunning(100 * time.Millisecond)
|
||||
close(stopped)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatal("Start callback doesn't fire in 100 milliseconds")
|
||||
case <-stopped:
|
||||
t.Log("Start callback fired")
|
||||
}
|
||||
|
||||
}
|
|
@ -31,6 +31,7 @@ type Config struct {
|
|||
DisableNetwork bool
|
||||
EnableSelinuxSupport bool
|
||||
Context map[string][]string
|
||||
Sockets []string
|
||||
}
|
||||
|
||||
// ConfigFromJob creates and returns a new DaemonConfig object
|
||||
|
@ -66,6 +67,9 @@ func ConfigFromJob(job *engine.Job) *Config {
|
|||
config.Mtu = GetDefaultNetworkMtu()
|
||||
}
|
||||
config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
|
||||
if sockets := job.GetenvList("Sockets"); sockets != nil {
|
||||
config.Sockets = sockets
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -47,7 +48,7 @@ func main() {
|
|||
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
|
||||
bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
|
||||
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
|
||||
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
|
||||
flDns = opts.NewListOpts(opts.ValidateIp4Address)
|
||||
|
@ -56,8 +57,8 @@ func main() {
|
|||
flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
|
||||
flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
|
||||
flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
|
||||
flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver")
|
||||
flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
|
||||
flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
|
||||
flHosts = opts.NewListOpts(api.ValidateHost)
|
||||
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
|
||||
flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
|
||||
|
@ -67,7 +68,7 @@ func main() {
|
|||
flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
|
||||
flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support")
|
||||
)
|
||||
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
|
||||
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
|
||||
flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
|
||||
flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
|
||||
flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options")
|
||||
|
@ -95,6 +96,14 @@ func main() {
|
|||
log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.")
|
||||
}
|
||||
|
||||
if !*flEnableIptables && !*flInterContainerComm {
|
||||
log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
|
||||
}
|
||||
|
||||
if net.ParseIP(*flDefaultIp) == nil {
|
||||
log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp)
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
|
@ -162,6 +171,7 @@ func main() {
|
|||
job.Setenv("ExecDriver", *flExecDriver)
|
||||
job.SetenvInt("Mtu", *flMtu)
|
||||
job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled)
|
||||
job.SetenvList("Sockets", flHosts.GetAll())
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -259,7 +269,7 @@ func showVersion() {
|
|||
func checkKernelAndArch() error {
|
||||
// Check for unsupported architectures
|
||||
if runtime.GOARCH != "amd64" {
|
||||
return fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
|
||||
return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
|
||||
}
|
||||
// Check for unsupported kernel versions
|
||||
// FIXME: it would be cleaner to not test for specific versions, but rather
|
||||
|
|
5
docs/.gitignore
vendored
Normal file
5
docs/.gitignore
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# generated by man/man/md2man-all.sh
|
||||
man1/
|
||||
man5/
|
||||
# avoid commiting the awsconfig file used for releases
|
||||
awsconfig
|
|
@ -28,8 +28,12 @@ WORKDIR /docs
|
|||
|
||||
RUN VERSION=$(cat /docs/VERSION) &&\
|
||||
GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\
|
||||
GITCOMMIT=$(cat /docs/GITCOMMIT) &&\
|
||||
AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\
|
||||
echo "{% set docker_version = \"${VERSION}\" %}{% set docker_branch = \"${GIT_BRANCH}\" %}{% set aws_bucket = \"${AWS_S3_BUCKET}\" %}{% include \"beta_warning.html\" %}" > /docs/theme/mkdocs/version.html
|
||||
sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\
|
||||
sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\
|
||||
sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\
|
||||
sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html
|
||||
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
O.S. Tezer <ostezer@gmail.com> (@OSTezer)
|
||||
Fred Lifton <fred.lifton@docker.com> (@fredlf)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
The source for Docker documentation is here under `sources/` and uses extended
|
||||
Markdown, as implemented by [MkDocs](http://mkdocs.org).
|
||||
|
||||
The HTML files are built and hosted on `https://docs.docker.io`, and update
|
||||
The HTML files are built and hosted on `https://docs.docker.com`, and update
|
||||
automatically after each change to the master or release branch of [Docker on
|
||||
GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The
|
||||
`docs` branch maps to the "latest" documentation and the `master` (unreleased
|
||||
|
@ -21,14 +21,14 @@ In the rare case where your change is not forward-compatible, you may need to
|
|||
base your changes on the `docs` branch.
|
||||
|
||||
Also, now that we have a `docs` branch, we can keep the
|
||||
[http://docs.docker.io](http://docs.docker.io) docs up to date with any bugs
|
||||
[http://docs.docker.com](http://docs.docker.com) docs up to date with any bugs
|
||||
found between Docker code releases.
|
||||
|
||||
**Warning**: When *reading* the docs, the
|
||||
[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may
|
||||
[http://docs-stage.docker.com](http://docs-stage.docker.com) documentation may
|
||||
include features not yet part of any official Docker release. The `beta-docs`
|
||||
site should be used only for understanding bleeding-edge development and
|
||||
`docs.docker.io` (which points to the `docs` branch`) should be used for the
|
||||
`docs.docker.com` (which points to the `docs` branch`) should be used for the
|
||||
latest official release.
|
||||
|
||||
## Contributing
|
||||
|
@ -70,7 +70,7 @@ in their shell:
|
|||
|
||||
### Images
|
||||
|
||||
When you need to add images, try to make them as small as possible (e.g. as
|
||||
When you need to add images, try to make them as small as possible (e.g., as
|
||||
gifs). Usually images should go in the same directory as the `.md` file which
|
||||
references them, or in a subdirectory if one already exists.
|
||||
|
||||
|
|
214
docs/docs-update.py
Executable file
214
docs/docs-update.py
Executable file
|
@ -0,0 +1,214 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
#
|
||||
# Sven's quick hack script to update the documentation
|
||||
#
|
||||
# call with:
|
||||
# ./docs/update.py /usr/bin/docker
|
||||
#
|
||||
|
||||
import re
|
||||
from sys import argv
|
||||
import subprocess
|
||||
import os
|
||||
import os.path
|
||||
|
||||
script, docker_cmd = argv
|
||||
|
||||
def print_usage(outtext, docker_cmd, command):
|
||||
help = ""
|
||||
try:
|
||||
#print "RUN ", "".join((docker_cmd, " ", command, " --help"))
|
||||
help = subprocess.check_output("".join((docker_cmd, " ", command, " --help")), stderr=subprocess.STDOUT, shell=True)
|
||||
except subprocess.CalledProcessError, e:
|
||||
help = e.output
|
||||
for l in str(help).strip().split("\n"):
|
||||
l = l.rstrip()
|
||||
if l == '':
|
||||
outtext.write("\n")
|
||||
else:
|
||||
# `docker --help` tells the user the path they called it with
|
||||
l = re.sub(docker_cmd, "docker", l)
|
||||
outtext.write(" "+l+"\n")
|
||||
outtext.write("\n")
|
||||
|
||||
# TODO: look for an complain about any missing commands
|
||||
def update_cli_reference():
|
||||
originalFile = "docs/sources/reference/commandline/cli.md"
|
||||
os.rename(originalFile, originalFile+".bak")
|
||||
|
||||
intext = open(originalFile+".bak", "r")
|
||||
outtext = open(originalFile, "w")
|
||||
|
||||
mode = 'p'
|
||||
space = " "
|
||||
command = ""
|
||||
# 2 mode line-by line parser
|
||||
for line in intext:
|
||||
if mode=='p':
|
||||
# Prose
|
||||
match = re.match("( \s*)Usage: docker ([a-z]+)", line)
|
||||
if match:
|
||||
# the begining of a Docker command usage block
|
||||
space = match.group(1)
|
||||
command = match.group(2)
|
||||
mode = 'c'
|
||||
else:
|
||||
match = re.match("( \s*)Usage of .*docker.*:", line)
|
||||
if match:
|
||||
# the begining of the Docker --help usage block
|
||||
space = match.group(1)
|
||||
command = ""
|
||||
mode = 'c'
|
||||
else:
|
||||
outtext.write(line)
|
||||
else:
|
||||
# command usage block
|
||||
match = re.match("("+space+")(.*)|^$", line)
|
||||
#print "CMD ", command
|
||||
if not match:
|
||||
# The end of the current usage block - Shell out to run docker to see the new output
|
||||
print_usage(outtext, docker_cmd, command)
|
||||
outtext.write(line)
|
||||
mode = 'p'
|
||||
if mode == 'c':
|
||||
print_usage(outtext, docker_cmd, command)
|
||||
|
||||
def update_man_pages():
|
||||
cmds = []
|
||||
try:
|
||||
help = subprocess.check_output("".join((docker_cmd)), stderr=subprocess.STDOUT, shell=True)
|
||||
except subprocess.CalledProcessError, e:
|
||||
help = e.output
|
||||
for l in str(help).strip().split("\n"):
|
||||
l = l.rstrip()
|
||||
if l != "":
|
||||
match = re.match(" (.*?) .*", l)
|
||||
if match:
|
||||
cmds.append(match.group(1))
|
||||
|
||||
desc_re = re.compile(r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*", re.MULTILINE|re.DOTALL)
|
||||
example_re = re.compile(r".*# EXAMPLES?(.*)# HISTORY.*", re.MULTILINE|re.DOTALL)
|
||||
history_re = re.compile(r".*# HISTORY(.*)", re.MULTILINE|re.DOTALL)
|
||||
|
||||
for command in cmds:
|
||||
print "COMMAND: "+command
|
||||
history = ""
|
||||
description = ""
|
||||
examples = ""
|
||||
if os.path.isfile("docs/man/docker-"+command+".1.md"):
|
||||
intext = open("docs/man/docker-"+command+".1.md", "r")
|
||||
txt = intext.read()
|
||||
intext.close()
|
||||
match = desc_re.match(txt)
|
||||
if match:
|
||||
description = match.group(1)
|
||||
match = example_re.match(txt)
|
||||
if match:
|
||||
examples = match.group(1)
|
||||
match = history_re.match(txt)
|
||||
if match:
|
||||
history = match.group(1).strip()
|
||||
|
||||
usage = ""
|
||||
usage_description = ""
|
||||
params = {}
|
||||
key_params = {}
|
||||
|
||||
help = ""
|
||||
try:
|
||||
help = subprocess.check_output("".join((docker_cmd, " ", command, " --help")), stderr=subprocess.STDOUT, shell=True)
|
||||
except subprocess.CalledProcessError, e:
|
||||
help = e.output
|
||||
last_key = ""
|
||||
for l in str(help).split("\n"):
|
||||
l = l.rstrip()
|
||||
if l != "":
|
||||
match = re.match("Usage: docker "+command+"(.*)", l)
|
||||
if match:
|
||||
usage = match.group(1).strip()
|
||||
else:
|
||||
#print ">>>>"+l
|
||||
match = re.match(" (-+)(.*) \s+(.*)", l)
|
||||
if match:
|
||||
last_key = match.group(2).rstrip()
|
||||
#print " found "+match.group(1)
|
||||
key_params[last_key] = match.group(1)+last_key
|
||||
params[last_key] = match.group(3)
|
||||
else:
|
||||
if last_key != "":
|
||||
params[last_key] = params[last_key] + "\n" + l
|
||||
else:
|
||||
if usage_description != "":
|
||||
usage_description = usage_description + "\n"
|
||||
usage_description = usage_description + l
|
||||
|
||||
# replace [OPTIONS] with the list of params
|
||||
options = ""
|
||||
match = re.match("\[OPTIONS\](.*)", usage)
|
||||
if match:
|
||||
usage = match.group(1)
|
||||
|
||||
new_usage = ""
|
||||
# TODO: sort without the `-`'s
|
||||
for key in sorted(params.keys(), key=lambda s: s.lower()):
|
||||
# split on commas, remove --?.*=.*, put in *'s mumble
|
||||
ps = []
|
||||
opts = []
|
||||
for k in key_params[key].split(","):
|
||||
#print "......"+k
|
||||
match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip())
|
||||
if match:
|
||||
p = "**"+match.group(1)+match.group(2)+"**"
|
||||
o = "**"+match.group(1)+match.group(2)+"**"
|
||||
if match.group(3):
|
||||
# if ="" then use UPPERCASE(group(2))"
|
||||
val = match.group(3)
|
||||
if val == "\"\"":
|
||||
val = match.group(2).upper()
|
||||
p = p+"[=*"+val+"*]"
|
||||
val = match.group(3)
|
||||
if val in ("true", "false"):
|
||||
params[key] = params[key].rstrip()
|
||||
if not params[key].endswith('.'):
|
||||
params[key] = params[key]+ "."
|
||||
params[key] = params[key] + " The default is *"+val+"*."
|
||||
val = "*true*|*false*"
|
||||
o = o+"="+val
|
||||
ps.append(p)
|
||||
opts.append(o)
|
||||
else:
|
||||
print "nomatch:"+k
|
||||
new_usage = new_usage+ "\n["+"|".join(ps)+"]"
|
||||
options = options + ", ".join(opts) + "\n "+ params[key]+"\n\n"
|
||||
if new_usage != "":
|
||||
new_usage = new_usage.strip() + "\n"
|
||||
usage = new_usage + usage
|
||||
|
||||
|
||||
outtext = open("docs/man/docker-"+command+".1.md", "w")
|
||||
outtext.write("""% DOCKER(1) Docker User Manuals
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
""")
|
||||
outtext.write("docker-"+command+" - "+usage_description+"\n\n")
|
||||
outtext.write("# SYNOPSIS\n**docker "+command+"**\n"+usage+"\n\n")
|
||||
if description != "":
|
||||
outtext.write("# DESCRIPTION"+description)
|
||||
if options == "":
|
||||
options = "There are no available options.\n\n"
|
||||
outtext.write("# OPTIONS\n"+options)
|
||||
if examples != "":
|
||||
outtext.write("# EXAMPLES"+examples)
|
||||
outtext.write("# HISTORY\n")
|
||||
if history != "":
|
||||
outtext.write(history+"\n")
|
||||
recent_history_re = re.compile(".*June 2014.*", re.MULTILINE|re.DOTALL)
|
||||
if not recent_history_re.match(history):
|
||||
outtext.write("June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>\n")
|
||||
outtext.close()
|
||||
|
||||
# main
|
||||
update_cli_reference()
|
||||
update_man_pages()
|
|
@ -93,7 +93,7 @@ or
|
|||
they omit the executable, an ENTRYPOINT must be specified.
|
||||
When used in the shell or exec formats, the CMD instruction sets the command to
|
||||
be executed when running the image.
|
||||
If you use the shell form of of the CMD, the <command> executes in /bin/sh -c:
|
||||
If you use the shell form of the CMD, the <command> executes in /bin/sh -c:
|
||||
**FROM ubuntu**
|
||||
**CMD echo "This is a test." | wc -**
|
||||
If you run <command> wihtout a shell, then you must express the command as a
|
||||
|
@ -203,4 +203,4 @@ or
|
|||
run later, during the next build stage.
|
||||
|
||||
# HISTORY
|
||||
*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.io Dockerfile documentation.
|
||||
*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation.
|
|
@ -51,7 +51,7 @@ saving you from dealing with Pandoc and dependencies on your own computer.
|
|||
|
||||
## Building the Fedora / Pandoc image
|
||||
|
||||
There is a Dockerfile provided in the `docker/contrib/man/md` directory.
|
||||
There is a Dockerfile provided in the `docker/docs/man` directory.
|
||||
|
||||
Using this Dockerfile, create a Docker image tagged `fedora/pandoc`:
|
||||
|
||||
|
@ -61,11 +61,11 @@ Using this Dockerfile, create a Docker image tagged `fedora/pandoc`:
|
|||
|
||||
Once the image is built, run a container using the image with *volumes*:
|
||||
|
||||
docker run -v /<path-to-git-dir>/docker/contrib/man:/pandoc:rw \
|
||||
-w /pandoc -i fedora/pandoc /pandoc/md/md2man-all.sh
|
||||
docker run -v /<path-to-git-dir>/docker/docs/man:/pandoc:rw \
|
||||
-w /pandoc -i fedora/pandoc /pandoc/md2man-all.sh
|
||||
|
||||
The Pandoc Docker container will process the Markdown files and generate
|
||||
the man pages inside the `docker/contrib/man/man1` directory using
|
||||
the man pages inside the `docker/docs/man/man1` directory using
|
||||
Docker volumes. For more information on Docker volumes see the man page for
|
||||
`docker run` and also look at the article [Sharing Directories via Volumes]
|
||||
(http://docs.docker.io/use/working_with_volumes/).
|
||||
(http://docs.docker.com/use/working_with_volumes/).
|
|
@ -1,11 +1,14 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-attach - Attach to a running container
|
||||
|
||||
# SYNOPSIS
|
||||
**docker attach** **--no-stdin**[=*false*] **--sig-proxy**[=*true*] CONTAINER
|
||||
**docker attach**
|
||||
[**--no-stdin**[=*false*]]
|
||||
[**--sig-proxy**[=*true*]]
|
||||
CONTAINER
|
||||
|
||||
# DESCRIPTION
|
||||
If you **docker run** a container in detached mode (**-d**), you can reattach to
|
||||
|
@ -19,11 +22,10 @@ the client.
|
|||
|
||||
# OPTIONS
|
||||
**--no-stdin**=*true*|*false*
|
||||
When set to true, do not attach to stdin. The default is *false*.
|
||||
Do not attach STDIN. The default is *false*.
|
||||
|
||||
**--sig-proxy**=*true*|*false*:
|
||||
When set to true, proxify all received signal to the process (even in non-tty
|
||||
mode). The default is *true*.
|
||||
**--sig-proxy**=*true*|*false*
|
||||
Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied. The default is *true*.
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -55,4 +57,5 @@ attach** command:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,12 +1,17 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-build - Build an image from a Dockerfile source at PATH
|
||||
docker-build - Build a new image from the source code at PATH
|
||||
|
||||
# SYNOPSIS
|
||||
**docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]]
|
||||
[**--rm**] [**-t**|**--tag**=TAG] PATH | URL | -
|
||||
**docker build**
|
||||
[**--force-rm**[=*false*]]
|
||||
[**--no-cache**[=*false*]]
|
||||
[**-q**|**--quiet**[=*false*]]
|
||||
[**--rm**[=*true*]]
|
||||
[**-t**|**--tag**[=*TAG*]]
|
||||
PATH | URL | -
|
||||
|
||||
# DESCRIPTION
|
||||
This will read the Dockerfile from the directory specified in **PATH**.
|
||||
|
@ -25,22 +30,20 @@ When a Git repository is set as the **URL**, the repository is used
|
|||
as context.
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**-q**, **--quiet**=*true*|*false*
|
||||
When set to true, suppress verbose build output. Default is *false*.
|
||||
|
||||
**--rm**=*true*|*false*
|
||||
When true, remove intermediate containers that are created during the
|
||||
build process. The default is true.
|
||||
|
||||
**-t**, **--tag**=*tag*
|
||||
The name to be applied to the resulting image on successful completion of
|
||||
the build. `tag` in this context means the entire image name including the
|
||||
optional TAG after the ':'.
|
||||
**--force-rm**=*true*|*false*
|
||||
Always remove intermediate containers, even after unsuccessful builds. The default is *false*.
|
||||
|
||||
**--no-cache**=*true*|*false*
|
||||
When set to true, do not use a cache when building the image. The
|
||||
default is *false*.
|
||||
Do not use cache when building the image. The default is *false*.
|
||||
|
||||
**-q**, **--quiet**=*true*|*false*
|
||||
Suppress the verbose output generated by the containers. The default is *false*.
|
||||
|
||||
**--rm**=*true*|*false*
|
||||
Remove intermediate containers after a successful build. The default is *true*.
|
||||
|
||||
**-t**, **--tag**=""
|
||||
Repository name (and optionally a tag) to be applied to the resulting image in case of success
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -114,4 +117,5 @@ Note: You can set an arbitrary Git repository via the `git://` schema.
|
|||
|
||||
# HISTORY
|
||||
March 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,24 +1,28 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-commit - Create a new image from the changes to an existing
|
||||
container
|
||||
docker-commit - Create a new image from a container's changes
|
||||
|
||||
# SYNOPSIS
|
||||
**docker commit** **-a**|**--author**[=""] **-m**|**--message**[=""]
|
||||
CONTAINER [REPOSITORY[:TAG]]
|
||||
**docker commit**
|
||||
[**-a**|**--author**[=*AUTHOR*]]
|
||||
[**-m**|**--message**[=*MESSAGE*]]
|
||||
CONTAINER [REPOSITORY[:TAG]]
|
||||
|
||||
# DESCRIPTION
|
||||
Using an existing container's name or ID you can create a new image.
|
||||
|
||||
# OPTIONS
|
||||
**-a, --author**=""
|
||||
Author name. (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
||||
**-a**, **--author**=""
|
||||
Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
|
||||
|
||||
**-m, --message**=""
|
||||
**-m**, **--message**=""
|
||||
Commit message
|
||||
|
||||
**-p, --pause**=true
|
||||
Pause container during commit
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
## Creating a new image from an existing container
|
||||
|
@ -31,4 +35,5 @@ create a new image run docker ps to find the container's ID and then run:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and in
|
||||
based on docker.com source material and in
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,18 +1,22 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-cp - Copy files/folders from the PATH to the HOSTPATH
|
||||
|
||||
# SYNOPSIS
|
||||
**docker cp** CONTAINER:PATH HOSTPATH
|
||||
**docker cp**
|
||||
CONTAINER:PATH HOSTPATH
|
||||
|
||||
# DESCRIPTION
|
||||
Copy files/folders from the containers filesystem to the host
|
||||
Copy files/folders from a container's filesystem to the host
|
||||
path. Paths are relative to the root of the filesystem. Files
|
||||
can be copied from a running or stopped container.
|
||||
|
||||
# EXAMPLE
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# EXAMPLES
|
||||
An important shell script file, created in a bash shell, is copied from
|
||||
the exited container to the current dir on the host:
|
||||
|
||||
|
@ -20,5 +24,5 @@ the exited container to the current dir on the host:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,18 +1,22 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-diff - Inspect changes on a container's filesystem
|
||||
|
||||
# SYNOPSIS
|
||||
**docker diff** CONTAINER
|
||||
**docker diff**
|
||||
CONTAINER
|
||||
|
||||
# DESCRIPTION
|
||||
Inspect changes on a container's filesystem. You can use the full or
|
||||
shortened container ID or the container name set using
|
||||
**docker run --name** option.
|
||||
|
||||
# EXAMPLE
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# EXAMPLES
|
||||
Inspect the changes to on a nginx container:
|
||||
|
||||
# docker diff 1fdfd1f54c1b
|
||||
|
@ -39,6 +43,5 @@ Inspect the changes to on a nginx container:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
|
||||
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,10 +1,14 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-events - Get real time events from the server
|
||||
|
||||
**docker events** **--since**=""|*epoch-time*
|
||||
# SYNOPSIS
|
||||
**docker events**
|
||||
[**--since**[=*SINCE*]]
|
||||
[**--until**[=*UNTIL*]]
|
||||
|
||||
|
||||
# DESCRIPTION
|
||||
Get event information from the Docker daemon. Information can include historical
|
||||
|
@ -12,8 +16,10 @@ information and real-time information.
|
|||
|
||||
# OPTIONS
|
||||
**--since**=""
|
||||
Show previously created events and then stream. This can be in either
|
||||
seconds since epoch, or date string.
|
||||
Show all events created since timestamp
|
||||
|
||||
**--until**=""
|
||||
Stream events until this timestamp
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -43,4 +49,5 @@ Again the output container IDs have been shortened for the purposes of this docu
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,19 +1,22 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-export - Export the contents of a filesystem as a tar archive to
|
||||
STDOUT.
|
||||
docker-export - Export the contents of a filesystem as a tar archive to STDOUT
|
||||
|
||||
# SYNOPSIS
|
||||
**docker export** CONTAINER
|
||||
**docker export**
|
||||
CONTAINER
|
||||
|
||||
# DESCRIPTION
|
||||
Export the contents of a container's filesystem using the full or shortened
|
||||
container ID or container name. The output is exported to STDOUT and can be
|
||||
redirected to a tar file.
|
||||
|
||||
# EXAMPLE
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# EXAMPLES
|
||||
Export the contents of the container called angry_bell to a tar file
|
||||
called test.tar:
|
||||
|
||||
|
@ -23,4 +26,5 @@ called test.tar:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,11 +1,13 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-history - Show the history of an image
|
||||
|
||||
# SYNOPSIS
|
||||
**docker history** **--no-trunc**[=*false*] [**-q**|**--quiet**[=*false*]]
|
||||
**docker history**
|
||||
[**--no-trunc**[=*false*]]
|
||||
[**-q**|**--quiet**[=*false*]]
|
||||
IMAGE
|
||||
|
||||
# DESCRIPTION
|
||||
|
@ -13,14 +15,13 @@ docker-history - Show the history of an image
|
|||
Show the history of when and how an image was created.
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**--no-trunc**=*true*|*false*
|
||||
When true don't truncate output. Default is false
|
||||
Don't truncate output. The default is *false*.
|
||||
|
||||
**-q**, **--quiet=*true*|*false*
|
||||
When true only show numeric IDs. Default is false.
|
||||
**-q**, **--quiet**=*true*|*false*
|
||||
Only show numeric IDs. The default is *false*.
|
||||
|
||||
# EXAMPLE
|
||||
# EXAMPLES
|
||||
$ sudo docker history fedora
|
||||
IMAGE CREATED CREATED BY SIZE
|
||||
105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB
|
||||
|
@ -29,4 +30,5 @@ Show the history of when and how an image was created.
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,23 +1,22 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-images - List the images in the local repository
|
||||
docker-images - List images
|
||||
|
||||
# SYNOPSIS
|
||||
**docker images**
|
||||
[**-a**|**--all**=*false*]
|
||||
[**--no-trunc**[=*false*]
|
||||
[**-q**|**--quiet**[=*false*]
|
||||
[**-t**|**--tree**=*false*]
|
||||
[**-v**|**--viz**=*false*]
|
||||
[NAME]
|
||||
[**-a**|**--all**[=*false*]]
|
||||
[**-f**|**--filter**[=*[]*]]
|
||||
[**--no-trunc**[=*false*]]
|
||||
[**-q**|**--quiet**[=*false*]]
|
||||
[NAME]
|
||||
|
||||
# DESCRIPTION
|
||||
This command lists the images stored in the local Docker repository.
|
||||
|
||||
By default, intermediate images, used during builds, are not listed. Some of the
|
||||
output, e.g. image ID, is truncated, for space reasons. However the truncated
|
||||
output, e.g., image ID, is truncated, for space reasons. However the truncated
|
||||
image ID, and often the first few characters, are enough to be used in other
|
||||
Docker commands that use the image ID. The output includes repository, tag, image
|
||||
ID, date created and the virtual size.
|
||||
|
@ -30,26 +29,17 @@ called fedora. It may be tagged with 18, 19, or 20, etc. to manage different
|
|||
versions.
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**-a**, **--all**=*true*|*false*
|
||||
When set to true, also include all intermediate images in the list. The
|
||||
default is false.
|
||||
Show all images (by default filter out the intermediate image layers). The default is *false*.
|
||||
|
||||
**-f**, **--filter**=[]
|
||||
Provide filter values (i.e. 'dangling=true')
|
||||
|
||||
**--no-trunc**=*true*|*false*
|
||||
When set to true, list the full image ID and not the truncated ID. The
|
||||
default is false.
|
||||
Don't truncate output. The default is *false*.
|
||||
|
||||
**-q**, **--quiet**=*true*|*false*
|
||||
When set to true, list the complete image ID as part of the output. The
|
||||
default is false.
|
||||
|
||||
**-t**, **--tree**=*true*|*false*
|
||||
When set to true, list the images in a tree dependency tree (hierarchy)
|
||||
format. The default is false.
|
||||
|
||||
**-v**, **--viz**=*true*|*false*
|
||||
When set to true, list the graph in graphviz format. The default is
|
||||
*false*.
|
||||
Only show numeric IDs. The default is *false*.
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -96,4 +86,5 @@ tools.
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,16 +1,19 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-import - Create an empty filesystem image and import the contents
|
||||
of the tarball into it.
|
||||
docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
|
||||
|
||||
# SYNOPSIS
|
||||
**docker import** URL|- [REPOSITORY[:TAG]]
|
||||
**docker import**
|
||||
URL|- [REPOSITORY[:TAG]]
|
||||
|
||||
# DESCRIPTION
|
||||
Create a new filesystem image from the contents of a tarball (.tar,
|
||||
.tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
|
||||
Create a new filesystem image from the contents of a tarball (`.tar`,
|
||||
`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it.
|
||||
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -36,4 +39,5 @@ Import to docker via pipe and stdin:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,12 +1,13 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-info - Display system wide information
|
||||
docker-info - Display system-wide information
|
||||
|
||||
# SYNOPSIS
|
||||
**docker info**
|
||||
|
||||
|
||||
# DESCRIPTION
|
||||
This command displays system wide information regarding the Docker installation.
|
||||
Information displayed includes the number of containers and images, pool name,
|
||||
|
@ -43,4 +44,5 @@ Here is a sample output:
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,12 +1,13 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-inspect - Return low-level information on a container/image
|
||||
docker-inspect - Return low-level information on a container or image
|
||||
|
||||
# SYNOPSIS
|
||||
**docker inspect** [**-f**|**--format**="" CONTAINER|IMAGE
|
||||
[CONTAINER|IMAGE...]
|
||||
**docker inspect**
|
||||
[**-f**|**--format**[=*FORMAT*]]
|
||||
CONTAINER|IMAGE [CONTAINER|IMAGE...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
|
@ -17,8 +18,7 @@ each result.
|
|||
|
||||
# OPTIONS
|
||||
**-f**, **--format**=""
|
||||
The text/template package of Go describes all the details of the
|
||||
format. See examples section
|
||||
Format the output using the given go template.
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
|
@ -142,7 +142,7 @@ output:
|
|||
|
||||
## Getting information on an image
|
||||
|
||||
Use an image's ID or name (e.g. repository/name[:tag]) to get information
|
||||
Use an image's ID or name (e.g., repository/name[:tag]) to get information
|
||||
on it.
|
||||
|
||||
# docker inspect 58394af37342
|
||||
|
@ -224,6 +224,6 @@ Use an image's ID or name (e.g. repository/name[:tag]) to get information
|
|||
}]
|
||||
|
||||
# HISTORY
|
||||
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
24
docs/man/docker-kill.1.md
Normal file
24
docs/man/docker-kill.1.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-kill - Kill a running container using SIGKILL or a specified signal
|
||||
|
||||
# SYNOPSIS
|
||||
**docker kill**
|
||||
[**-s**|**--signal**[=*"KILL"*]]
|
||||
CONTAINER [CONTAINER...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The main process inside each container specified will be sent SIGKILL,
|
||||
or any signal specified with option --signal.
|
||||
|
||||
# OPTIONS
|
||||
**-s**, **--signal**="KILL"
|
||||
Signal to send to the container
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,11 +1,13 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-load - Load an image from a tar archive on STDIN
|
||||
|
||||
# SYNOPSIS
|
||||
**docker load** **--input**=""
|
||||
**docker load**
|
||||
[**-i**|**--input**[=*INPUT*]]
|
||||
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
|
@ -13,11 +15,10 @@ Loads a tarred repository from a file or the standard input stream.
|
|||
Restores both images and tags.
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**-i**, **--input**=""
|
||||
Read from a tar archive file, instead of STDIN
|
||||
|
||||
# EXAMPLE
|
||||
# EXAMPLES
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
|
@ -33,4 +34,5 @@ Restores both images and tags.
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,12 +1,15 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-login - Register or Login to a docker registry server.
|
||||
docker-login - Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default.
|
||||
|
||||
# SYNOPSIS
|
||||
**docker login** [**-e**|**-email**=""] [**-p**|**--password**=""]
|
||||
[**-u**|**--username**=""] [SERVER]
|
||||
**docker login**
|
||||
[**-e**|**--email**[=*EMAIL*]]
|
||||
[**-p**|**--password**[=*PASSWORD*]]
|
||||
[**-u**|**--username**[=*USERNAME*]]
|
||||
[SERVER]
|
||||
|
||||
# DESCRIPTION
|
||||
Register or Login to a docker registry server, if no server is
|
||||
|
@ -15,7 +18,7 @@ login to a private registry you can specify this by adding the server name.
|
|||
|
||||
# OPTIONS
|
||||
**-e**, **--email**=""
|
||||
Email address
|
||||
Email
|
||||
|
||||
**-p**, **--password**=""
|
||||
Password
|
||||
|
@ -23,7 +26,7 @@ login to a private registry you can specify this by adding the server name.
|
|||
**-u**, **--username**=""
|
||||
Username
|
||||
|
||||
# EXAMPLE
|
||||
# EXAMPLES
|
||||
|
||||
## Login to a local registry
|
||||
|
||||
|
@ -31,5 +34,5 @@ login to a private registry you can specify this by adding the server name.
|
|||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
|
@ -1,11 +1,14 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% William Henry
|
||||
% APRIL 2014
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-logs - Fetch the logs of a container
|
||||
|
||||
# SYNOPSIS
|
||||
**docker logs** **--follow**[=*false*] CONTAINER
|
||||
**docker logs**
|
||||
[**-f**|**--follow**[=*false*]]
|
||||
[**-t**|**--timestamps**[=*false*]]
|
||||
CONTAINER
|
||||
|
||||
# DESCRIPTION
|
||||
The **docker logs** command batch-retrieves whatever logs are present for
|
||||
|
@ -18,9 +21,13 @@ The **docker logs --follow** command combines commands **docker logs** and
|
|||
then continue streaming new output from the container’s stdout and stderr.
|
||||
|
||||
# OPTIONS
|
||||
**-f, --follow**=*true*|*false*
|
||||
When *true*, follow log output. The default is false.
|
||||
**-f**, **--follow**=*true*|*false*
|
||||
Follow log output. The default is *false*.
|
||||
|
||||
**-t**, **--timestamps**=*true*|*false*
|
||||
Show timestamps. The default is *false*.
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
based on docker.io source material and internal work.
|
||||
based on docker.com source material and internal work.
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
15
docs/man/docker-pause.1.md
Normal file
15
docs/man/docker-pause.1.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-pause - Pause all processes within a container
|
||||
|
||||
# SYNOPSIS
|
||||
**docker pause**
|
||||
CONTAINER
|
||||
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# HISTORY
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
16
docs/man/docker-port.1.md
Normal file
16
docs/man/docker-port.1.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
% DOCKER(1) Docker User Manuals
|
||||
% Docker Community
|
||||
% JUNE 2014
|
||||
# NAME
|
||||
docker-port - Lookup the public-facing port that is NAT-ed to PRIVATE_PORT
|
||||
|
||||
# SYNOPSIS
|
||||
**docker port**
|
||||
CONTAINER PRIVATE_PORT
|
||||
|
||||
# OPTIONS
|
||||
There are no available options.
|
||||
|
||||
# HISTORY
|
||||
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
|
||||
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue