Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
7cd64f0344
395 changed files with 10506 additions and 55189 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -23,3 +23,6 @@ bundles/
|
|||
vendor/pkg/
|
||||
pyenv
|
||||
Vagrantfile
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
|
|
|
@ -10,11 +10,9 @@ install: true
|
|||
|
||||
before_script:
|
||||
- env | sort
|
||||
- sudo pip install -r docs/requirements.txt
|
||||
|
||||
script:
|
||||
- hack/make.sh validate-dco
|
||||
- hack/make.sh validate-gofmt
|
||||
- make -sC docs SPHINXOPTS=-qW docs man
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
|
|
2
AUTHORS
2
AUTHORS
|
@ -20,6 +20,7 @@ Andrew Munsell <andrew@wizardapps.net>
|
|||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andy Chambers <anchambers@paypal.com>
|
||||
andy diller <dillera@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Andy Rothfusz <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Anthony Bishopric <git@anthonybishopric.com>
|
||||
|
@ -44,6 +45,7 @@ Brian Olsen <brian@maven-group.org>
|
|||
Brian Shumate <brian@couchbase.com>
|
||||
Briehan Lombaard <briehan.lombaard@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Bryan Matsuo <bryan.matsuo@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
|
|
12
CHANGELOG.md
12
CHANGELOG.md
|
@ -1,5 +1,17 @@
|
|||
# Changelog
|
||||
|
||||
## 0.11.0 (2014-05-07)
|
||||
|
||||
#### Notable features since 0.10.0
|
||||
|
||||
* SELinux support for mount and process labels
|
||||
* Linked containers can be accessed by hostname
|
||||
* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces
|
||||
* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon
|
||||
* Logs can now be returned with an optional timestamp
|
||||
* Docker now works with registries that support SHA-512
|
||||
* Multiple registry endpoints are supported to allow registry mirrors
|
||||
|
||||
## 0.10.0 (2014-04-08)
|
||||
|
||||
#### Builder
|
||||
|
|
|
@ -82,7 +82,7 @@ editors have plugins that do this automatically, and there's also a git
|
|||
pre-commit hook:
|
||||
|
||||
```
|
||||
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
|
||||
curl -o .git/hooks/pre-commit https://raw.githubusercontent.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
|
@ -90,6 +90,10 @@ reference to all the issues that they address.
|
|||
|
||||
Pull requests must not contain commits from other users or branches.
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50
|
||||
chars) written in the imperative, followed by an optional, more detailed
|
||||
explanatory text which is separated from the summary by an empty line.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
|
|
12
Makefile
12
Makefile
|
@ -1,4 +1,4 @@
|
|||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli validate
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
|
||||
|
||||
# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
|
||||
BINDDIR := bundles
|
||||
|
@ -10,7 +10,7 @@ DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
|||
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
|
||||
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
|
||||
# to allow `make DOCSDIR=docs docs-shell`
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
|
||||
|
||||
|
@ -35,7 +35,10 @@ docs-release: docs-build
|
|||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli
|
||||
|
||||
test-unit: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
|
||||
|
||||
test-integration: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
|
||||
|
@ -53,6 +56,9 @@ build: bundles
|
|||
docker build -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
docs-build:
|
||||
cp ./VERSION docs/VERSION
|
||||
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
|
||||
echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
|
||||
bundles:
|
||||
|
|
|
@ -18,7 +18,7 @@ It benefits directly from the experience accumulated over several years
|
|||
of large-scale operation and support of hundreds of thousands of
|
||||
applications and databases.
|
||||
|
||||

|
||||

|
||||
|
||||
## Better than VMs
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.10.0-dev
|
||||
0.11.0-dev
|
||||
|
|
|
@ -65,8 +65,13 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC
|
|||
var (
|
||||
isTerminal = false
|
||||
terminalFd uintptr
|
||||
scheme = "http"
|
||||
)
|
||||
|
||||
if tlsConfig != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
if in != nil {
|
||||
if file, ok := in.(*os.File); ok {
|
||||
terminalFd = file.Fd()
|
||||
|
@ -86,6 +91,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC
|
|||
isTerminal: isTerminal,
|
||||
terminalFd: terminalFd,
|
||||
tlsConfig: tlsConfig,
|
||||
scheme: scheme,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,4 +105,5 @@ type DockerCli struct {
|
|||
isTerminal bool
|
||||
terminalFd uintptr
|
||||
tlsConfig *tls.Config
|
||||
scheme string
|
||||
}
|
||||
|
|
|
@ -1583,6 +1583,7 @@ func (cli *DockerCli) CmdDiff(args ...string) error {
|
|||
func (cli *DockerCli) CmdLogs(args ...string) error {
|
||||
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
|
||||
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
|
||||
times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1603,14 +1604,16 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("logs", "1")
|
||||
v.Set("stdout", "1")
|
||||
v.Set("stderr", "1")
|
||||
if *times {
|
||||
v.Set("timestamps", "1")
|
||||
}
|
||||
if *follow && container.State.Running {
|
||||
v.Set("stream", "1")
|
||||
v.Set("follow", "1")
|
||||
}
|
||||
|
||||
if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
|
||||
if err := cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
133
api/client/hijack.go
Normal file
133
api/client/hijack.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/dockerversion"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
func (cli *DockerCli) dial() (net.Conn, error) {
|
||||
if cli.tlsConfig != nil && cli.proto != "unix" {
|
||||
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
|
||||
}
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
|
||||
defer func() {
|
||||
if started != nil {
|
||||
close(started)
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
req.Host = cli.addr
|
||||
|
||||
dial, err := cli.dial()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
|
||||
// Server hijacks the connection, error 'connection closed' expected
|
||||
clientconn.Do(req)
|
||||
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
if started != nil {
|
||||
started <- rwc
|
||||
}
|
||||
|
||||
var receiveStdout chan error
|
||||
|
||||
var oldState *term.State
|
||||
|
||||
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.terminalFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
receiveStdout = utils.Go(func() (err error) {
|
||||
defer func() {
|
||||
if in != nil {
|
||||
if setRawTerminal && cli.isTerminal {
|
||||
term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exists right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
if runtime.GOOS != "darwin" {
|
||||
in.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal {
|
||||
_, err = io.Copy(stdout, br)
|
||||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
utils.Debugf("[hijack] End of stdout")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
sendStdin := utils.Go(func() error {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
utils.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
if tcpc, ok := rwc.(*net.TCPConn); ok {
|
||||
if err := tcpc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
} else if unixc, ok := rwc.(*net.UnixConn); ok {
|
||||
if err := unixc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
}
|
||||
// Discard errors due to pipe interruption
|
||||
return nil
|
||||
})
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
utils.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !cli.isTerminal {
|
||||
if err := <-sendStdin; err != nil {
|
||||
utils.Debugf("Error sendStdin: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -2,7 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -11,12 +10,9 @@ import (
|
|||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
@ -33,11 +29,14 @@ var (
|
|||
ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
)
|
||||
|
||||
func (cli *DockerCli) dial() (net.Conn, error) {
|
||||
if cli.tlsConfig != nil && cli.proto != "unix" {
|
||||
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
|
||||
func (cli *DockerCli) HTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: cli.tlsConfig,
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
},
|
||||
}
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
return &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
|
||||
|
@ -57,9 +56,6 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
|
|||
}
|
||||
}
|
||||
}
|
||||
// fixme: refactor client to support redirect
|
||||
re := regexp.MustCompile("/+")
|
||||
path = re.ReplaceAllString(path, "/")
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
|
||||
if err != nil {
|
||||
|
@ -86,28 +82,20 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
|
|||
}
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Host = cli.addr
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
if data != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
} else if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
dial, err := cli.dial()
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, -1, ErrConnectionRefused
|
||||
}
|
||||
return nil, -1, err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
resp, err := clientconn.Do(req)
|
||||
if err != nil {
|
||||
clientconn.Close()
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, -1, ErrConnectionRefused
|
||||
}
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
@ -119,31 +107,25 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
|
|||
}
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return clientconn.Close()
|
||||
})
|
||||
return wrapper, resp.StatusCode, nil
|
||||
return resp.Body, resp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
|
||||
return cli.streamHelper(method, path, true, in, out, nil, headers)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
|
||||
if (method == "POST" || method == "PUT") && in == nil {
|
||||
in = bytes.NewReader([]byte{})
|
||||
}
|
||||
|
||||
// fixme: refactor client to support redirect
|
||||
re := regexp.MustCompile("/+")
|
||||
path = re.ReplaceAllString(path, "/")
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Host = cli.addr
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
@ -153,17 +135,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
|
|||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
dial, err := cli.dial()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
resp, err := clientconn.Do(req)
|
||||
defer clientconn.Close()
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
|
@ -184,126 +156,21 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
|
|||
}
|
||||
|
||||
if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
|
||||
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
|
||||
return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.terminalFd, cli.isTerminal)
|
||||
}
|
||||
if _, err := io.Copy(out, resp.Body); err != nil {
|
||||
if stdout != nil || stderr != nil {
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal {
|
||||
_, err = io.Copy(stdout, resp.Body)
|
||||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, resp.Body)
|
||||
}
|
||||
utils.Debugf("[stream] End of stdout")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
|
||||
defer func() {
|
||||
if started != nil {
|
||||
close(started)
|
||||
}
|
||||
}()
|
||||
// fixme: refactor client to support redirect
|
||||
re := regexp.MustCompile("/+")
|
||||
path = re.ReplaceAllString(path, "/")
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
req.Host = cli.addr
|
||||
|
||||
dial, err := cli.dial()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
|
||||
// Server hijacks the connection, error 'connection closed' expected
|
||||
clientconn.Do(req)
|
||||
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
if started != nil {
|
||||
started <- rwc
|
||||
}
|
||||
|
||||
var receiveStdout chan error
|
||||
|
||||
var oldState *term.State
|
||||
|
||||
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.terminalFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
receiveStdout = utils.Go(func() (err error) {
|
||||
defer func() {
|
||||
if in != nil {
|
||||
if setRawTerminal && cli.isTerminal {
|
||||
term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exists right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
if goruntime.GOOS != "darwin" {
|
||||
in.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal {
|
||||
_, err = io.Copy(stdout, br)
|
||||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
utils.Debugf("[hijack] End of stdout")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
sendStdin := utils.Go(func() error {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
utils.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
if tcpc, ok := rwc.(*net.TCPConn); ok {
|
||||
if err := tcpc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
} else if unixc, ok := rwc.(*net.UnixConn); ok {
|
||||
if err := unixc.CloseWrite(); err != nil {
|
||||
utils.Debugf("Couldn't send EOF: %s\n", err)
|
||||
}
|
||||
}
|
||||
// Discard errors due to pipe interruption
|
||||
return nil
|
||||
})
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
utils.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !cli.isTerminal {
|
||||
if err := <-sendStdin; err != nil {
|
||||
utils.Debugf("Error sendStdin: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (cli *DockerCli) resizeTty(id string) {
|
||||
height, width := cli.getTtySize()
|
||||
if height == 0 && width == 0 {
|
||||
|
|
|
@ -3,7 +3,6 @@ package server
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"code.google.com/p/go.net/websocket"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
|
@ -21,6 +20,8 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
|
||||
"code.google.com/p/go.net/websocket"
|
||||
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/listenbuffer"
|
||||
|
@ -328,6 +329,48 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo
|
|||
return nil
|
||||
}
|
||||
|
||||
func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
|
||||
var (
|
||||
job = eng.Job("inspect", vars["name"], "container")
|
||||
c, err = job.Stdout.AddEnv()
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var outStream, errStream io.Writer
|
||||
outStream = utils.NewWriteFlusher(w)
|
||||
|
||||
if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
|
||||
errStream = utils.NewStdWriter(outStream, utils.Stderr)
|
||||
outStream = utils.NewStdWriter(outStream, utils.Stdout)
|
||||
} else {
|
||||
errStream = outStream
|
||||
}
|
||||
|
||||
job = eng.Job("logs", vars["name"])
|
||||
job.Setenv("follow", r.Form.Get("follow"))
|
||||
job.Setenv("stdout", r.Form.Get("stdout"))
|
||||
job.Setenv("stderr", r.Form.Get("stderr"))
|
||||
job.Setenv("timestamps", r.Form.Get("timestamps"))
|
||||
job.Stdout.Add(outStream)
|
||||
job.Stderr.Set(errStream)
|
||||
if err := job.Run(); err != nil {
|
||||
fmt.Fprintf(outStream, "Error: %s\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
|
@ -934,6 +977,11 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
|
||||
}
|
||||
|
||||
func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
_, err := w.Write([]byte{'O', 'K'})
|
||||
return err
|
||||
}
|
||||
|
||||
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// log the request
|
||||
|
@ -1002,6 +1050,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
|
|||
}
|
||||
m := map[string]map[string]HttpApiFunc{
|
||||
"GET": {
|
||||
"/_ping": ping,
|
||||
"/events": getEvents,
|
||||
"/info": getInfo,
|
||||
"/version": getVersion,
|
||||
|
@ -1017,6 +1066,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
|
|||
"/containers/{name:.*}/changes": getContainersChanges,
|
||||
"/containers/{name:.*}/json": getContainersByName,
|
||||
"/containers/{name:.*}/top": getContainersTop,
|
||||
"/containers/{name:.*}/logs": getContainersLogs,
|
||||
"/containers/{name:.*}/attach/ws": wsContainersAttach,
|
||||
},
|
||||
"POST": {
|
||||
|
@ -1224,6 +1274,9 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
|
|||
// ServeApi loops through all of the protocols sent in to docker and spawns
|
||||
// off a go routine to setup a serving http.Server for each.
|
||||
func ServeApi(job *engine.Job) engine.Status {
|
||||
if len(job.Args) == 0 {
|
||||
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
||||
}
|
||||
var (
|
||||
protoAddrs = job.Args
|
||||
chErrors = make(chan error, len(protoAddrs))
|
||||
|
@ -1236,6 +1289,9 @@ func ServeApi(job *engine.Job) engine.Status {
|
|||
|
||||
for _, protoAddr := range protoAddrs {
|
||||
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
|
||||
if len(protoAddrParts) != 2 {
|
||||
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
||||
}
|
||||
go func() {
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
|
||||
|
|
|
@ -6,11 +6,9 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -59,8 +57,7 @@ func TesthttpError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
eng := tmpEngine(t)
|
||||
defer rmEngine(eng)
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("version", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
|
@ -89,8 +86,7 @@ func TestGetVersion(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
eng := tmpEngine(t)
|
||||
defer rmEngine(eng)
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("info", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
|
@ -130,22 +126,6 @@ func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *
|
|||
return r
|
||||
}
|
||||
|
||||
func tmpEngine(t *testing.T) *engine.Engine {
|
||||
tmp, err := utils.TestDirectory("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng, err := engine.New(tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eng
|
||||
}
|
||||
|
||||
func rmEngine(eng *engine.Engine) {
|
||||
os.RemoveAll(eng.Root())
|
||||
}
|
||||
|
||||
func readEnv(src io.Reader, t *testing.T) *engine.Env {
|
||||
out := engine.NewOutput()
|
||||
v, err := out.AddEnv()
|
||||
|
|
|
@ -4,17 +4,23 @@ import (
|
|||
api "github.com/dotcloud/docker/api/server"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/server"
|
||||
)
|
||||
|
||||
func Register(eng *engine.Engine) {
|
||||
daemon(eng)
|
||||
remote(eng)
|
||||
func Register(eng *engine.Engine) error {
|
||||
if err := daemon(eng); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := remote(eng); err != nil {
|
||||
return err
|
||||
}
|
||||
return registry.NewService().Install(eng)
|
||||
}
|
||||
|
||||
// remote: a RESTful api for cross-docker communication
|
||||
func remote(eng *engine.Engine) {
|
||||
eng.Register("serveapi", api.ServeApi)
|
||||
func remote(eng *engine.Engine) error {
|
||||
return eng.Register("serveapi", api.ServeApi)
|
||||
}
|
||||
|
||||
// daemon: a default execution and storage backend for Docker on Linux,
|
||||
|
@ -32,7 +38,9 @@ func remote(eng *engine.Engine) {
|
|||
//
|
||||
// These components should be broken off into plugins of their own.
|
||||
//
|
||||
func daemon(eng *engine.Engine) {
|
||||
eng.Register("initserver", server.InitServer)
|
||||
eng.Register("init_networkdriver", bridge.InitDriver)
|
||||
func daemon(eng *engine.Engine) error {
|
||||
if err := eng.Register("initserver", server.InitServer); err != nil {
|
||||
return err
|
||||
}
|
||||
return eng.Register("init_networkdriver", bridge.InitDriver)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,13 @@ set -e
|
|||
# bits of this were adapted from lxc-checkconfig
|
||||
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
|
||||
|
||||
: ${CONFIG:=/proc/config.gz}
|
||||
possibleConfigs=(
|
||||
'/proc/config.gz'
|
||||
"/boot/config-$(uname -r)"
|
||||
"/usr/src/linux-$(uname -r)/.config"
|
||||
'/usr/src/linux/.config'
|
||||
)
|
||||
: ${CONFIG:="${possibleConfigs[0]}"}
|
||||
|
||||
if ! command -v zgrep &> /dev/null; then
|
||||
zgrep() {
|
||||
|
@ -74,11 +80,7 @@ check_flags() {
|
|||
|
||||
if [ ! -e "$CONFIG" ]; then
|
||||
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
|
||||
for tryConfig in \
|
||||
'/proc/config.gz' \
|
||||
"/boot/config-$(uname -r)" \
|
||||
'/usr/src/linux/.config' \
|
||||
; do
|
||||
for tryConfig in "${possibleConfigs[@]}"; do
|
||||
if [ -e "$tryConfig" ]; then
|
||||
CONFIG="$tryConfig"
|
||||
break
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
docker-build - Build a container image from a Dockerfile source at PATH
|
||||
|
||||
# SYNOPSIS
|
||||
**docker build** [**--no-cache**[=*false*] [**-q**|**--quiet**[=*false*]
|
||||
[**-rm**] [**-t**|**--tag**=*tag*] PATH | URL | -
|
||||
**docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]]
|
||||
[**--rm**] [**-t**|**--tag**=TAG] PATH | URL | -
|
||||
|
||||
# DESCRIPTION
|
||||
This will read the Dockerfile from the directory specified in **PATH**.
|
||||
|
|
|
@ -23,7 +23,7 @@ its own man page which explain usage and arguements.
|
|||
To see the man page for a command run **man docker <command>**.
|
||||
|
||||
# OPTIONS
|
||||
**-D**=*ture*|*false*
|
||||
**-D**=*true*|*false*
|
||||
Enable debug mode. Default is false.
|
||||
|
||||
**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or
|
||||
|
@ -73,6 +73,9 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used.
|
|||
**-v**=*true*|*false*
|
||||
Print version information and quit. Default is false.
|
||||
|
||||
**--selinux-enabled=*true*|*false*
|
||||
Enable selinux support. Default is false.
|
||||
|
||||
# COMMANDS
|
||||
**docker-attach(1)**
|
||||
Attach to a running container
|
||||
|
|
|
@ -43,7 +43,7 @@ usage() {
|
|||
debianStable=wheezy
|
||||
debianUnstable=sid
|
||||
# this should match the name found at http://releases.ubuntu.com/
|
||||
ubuntuLatestLTS=precise
|
||||
ubuntuLatestLTS=trusty
|
||||
# this should match the name found at http://releases.tanglu.org/
|
||||
tangluLatest=aequorea
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Gurjeet Singh <gurjeet@singh.im> (gurjeet.singh.im)
|
|
@ -1,23 +0,0 @@
|
|||
# ZFS Storage Driver
|
||||
|
||||
This is a placeholder to declare the presence and status of ZFS storage driver
|
||||
for containers.
|
||||
|
||||
The current development is done in Gurjeet Singh's fork of Docker, under the
|
||||
branch named [zfs_driver].
|
||||
|
||||
[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver
|
||||
|
||||
|
||||
# Status
|
||||
|
||||
Alpha: The code is now capable of creating, running and destroying containers
|
||||
and images.
|
||||
|
||||
The code is under development. Contributions in the form of suggestions,
|
||||
code-reviews, and patches are welcome.
|
||||
|
||||
Please send the communication to gurjeet@singh.im and CC at least one Docker
|
||||
mailing list.
|
||||
|
||||
|
153
daemon/attach.go
Normal file
153
daemon/attach.go
Normal file
|
@ -0,0 +1,153 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
|
||||
var (
|
||||
cStdout, cStderr io.ReadCloser
|
||||
nJobs int
|
||||
errors = make(chan error, 3)
|
||||
)
|
||||
|
||||
if stdin != nil && container.Config.OpenStdin {
|
||||
nJobs += 1
|
||||
if cStdin, err := container.StdinPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
utils.Debugf("attach: stdin: begin")
|
||||
defer utils.Debugf("attach: stdin: end")
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
defer cStdin.Close()
|
||||
} else {
|
||||
defer func() {
|
||||
if cStdout != nil {
|
||||
cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
cStderr.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if container.Config.Tty {
|
||||
_, err = utils.CopyEscapable(cStdin, stdin)
|
||||
} else {
|
||||
_, err = io.Copy(cStdin, stdin)
|
||||
}
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdin: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
}
|
||||
if stdout != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StdoutPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStdout = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stdout: begin")
|
||||
defer utils.Debugf("attach: stdout: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
_, err := io.Copy(stdout, cStdout)
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdout: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
if cStdout, err := container.StdoutPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStdout)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if stderr != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StderrPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStderr = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stderr: begin")
|
||||
defer utils.Debugf("attach: stderr: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
_, err := io.Copy(stderr, cStderr)
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stderr: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
|
||||
if cStderr, err := container.StderrPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStderr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return utils.Go(func() error {
|
||||
defer func() {
|
||||
if cStdout != nil {
|
||||
cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
cStderr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// FIXME: how to clean up the stdin goroutine without the unwanted side effect
|
||||
// of closing the passed stdin? Add an intermediary io.Pipe?
|
||||
for i := 0; i < nJobs; i += 1 {
|
||||
utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
||||
if err := <-errors; err != nil {
|
||||
utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
||||
return err
|
||||
}
|
||||
utils.Debugf("attach: job %d completed successfully", i+1)
|
||||
}
|
||||
utils.Debugf("attach: all jobs completed successfully")
|
||||
return nil
|
||||
})
|
||||
}
|
|
@ -4,15 +4,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/links"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
@ -22,6 +13,19 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/links"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/networkfs/etchosts"
|
||||
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
@ -64,7 +68,8 @@ type Container struct {
|
|||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
|
||||
daemon *Daemon
|
||||
daemon *Daemon
|
||||
MountLabel, ProcessLabel string
|
||||
|
||||
waitLock chan struct{}
|
||||
Volumes map[string]string
|
||||
|
@ -122,6 +127,10 @@ func (container *Container) FromDisk() error {
|
|||
if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := label.ReserveLabel(container.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.readHostConfig()
|
||||
}
|
||||
|
||||
|
@ -161,186 +170,46 @@ func (container *Container) WriteHostConfig() (err error) {
|
|||
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
|
||||
}
|
||||
|
||||
func (container *Container) generateEnvConfig(env []string) error {
|
||||
data, err := json.Marshal(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(p, data, 0600)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
|
||||
var cStdout, cStderr io.ReadCloser
|
||||
|
||||
var nJobs int
|
||||
errors := make(chan error, 3)
|
||||
if stdin != nil && container.Config.OpenStdin {
|
||||
nJobs += 1
|
||||
if cStdin, err := container.StdinPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
utils.Debugf("attach: stdin: begin")
|
||||
defer utils.Debugf("attach: stdin: end")
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
defer cStdin.Close()
|
||||
} else {
|
||||
defer func() {
|
||||
if cStdout != nil {
|
||||
cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
cStderr.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if container.Config.Tty {
|
||||
_, err = utils.CopyEscapable(cStdin, stdin)
|
||||
} else {
|
||||
_, err = io.Copy(cStdin, stdin)
|
||||
}
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdin: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
}
|
||||
if stdout != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StdoutPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStdout = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stdout: begin")
|
||||
defer utils.Debugf("attach: stdout: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
_, err := io.Copy(stdout, cStdout)
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stdout: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
if cStdout, err := container.StdoutPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStdout)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if stderr != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StderrPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStderr = p
|
||||
go func() {
|
||||
utils.Debugf("attach: stderr: begin")
|
||||
defer utils.Debugf("attach: stderr: end")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce && stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
_, err := io.Copy(stderr, cStderr)
|
||||
if err == io.ErrClosedPipe {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
utils.Errorf("attach: stderr: %s", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
|
||||
if cStderr, err := container.StderrPipe(); err != nil {
|
||||
utils.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStderr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return utils.Go(func() error {
|
||||
defer func() {
|
||||
if cStdout != nil {
|
||||
cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
cStderr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// FIXME: how to clean up the stdin goroutine without the unwanted side effect
|
||||
// of closing the passed stdin? Add an intermediary io.Pipe?
|
||||
for i := 0; i < nJobs; i += 1 {
|
||||
utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
||||
if err := <-errors; err != nil {
|
||||
utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
||||
return err
|
||||
}
|
||||
utils.Debugf("attach: job %d completed successfully", i+1)
|
||||
}
|
||||
utils.Debugf("attach: all jobs completed successfully")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func populateCommand(c *Container, env []string) {
|
||||
func populateCommand(c *Container, env []string) error {
|
||||
var (
|
||||
en *execdriver.Network
|
||||
driverConfig = make(map[string][]string)
|
||||
en *execdriver.Network
|
||||
context = make(map[string][]string)
|
||||
)
|
||||
context["process_label"] = []string{c.GetProcessLabel()}
|
||||
context["mount_label"] = []string{c.GetMountLabel()}
|
||||
|
||||
en = &execdriver.Network{
|
||||
Mtu: c.daemon.config.Mtu,
|
||||
Interface: nil,
|
||||
}
|
||||
|
||||
if !c.Config.NetworkDisabled {
|
||||
network := c.NetworkSettings
|
||||
en.Interface = &execdriver.NetworkInterface{
|
||||
Gateway: network.Gateway,
|
||||
Bridge: network.Bridge,
|
||||
IPAddress: network.IPAddress,
|
||||
IPPrefixLen: network.IPPrefixLen,
|
||||
parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
|
||||
switch parts[0] {
|
||||
case "none":
|
||||
case "host":
|
||||
en.HostNetworking = true
|
||||
case "bridge", "": // empty string to support existing containers
|
||||
if !c.Config.NetworkDisabled {
|
||||
network := c.NetworkSettings
|
||||
en.Interface = &execdriver.NetworkInterface{
|
||||
Gateway: network.Gateway,
|
||||
Bridge: network.Bridge,
|
||||
IPAddress: network.IPAddress,
|
||||
IPPrefixLen: network.IPPrefixLen,
|
||||
}
|
||||
}
|
||||
case "container":
|
||||
nc, err := c.getNetworkedContainer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
en.ContainerID = nc.ID
|
||||
default:
|
||||
return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
|
||||
}
|
||||
|
||||
// TODO: this can be removed after lxc-conf is fully deprecated
|
||||
mergeLxcConfIntoOptions(c.hostConfig, driverConfig)
|
||||
mergeLxcConfIntoOptions(c.hostConfig, context)
|
||||
|
||||
resources := &execdriver.Resources{
|
||||
Memory: c.Config.Memory,
|
||||
|
@ -358,11 +227,12 @@ func populateCommand(c *Container, env []string) {
|
|||
Network: en,
|
||||
Tty: c.Config.Tty,
|
||||
User: c.Config.User,
|
||||
Config: driverConfig,
|
||||
Config: context,
|
||||
Resources: resources,
|
||||
}
|
||||
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
c.command.Env = env
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Start() (err error) {
|
||||
|
@ -397,16 +267,13 @@ func (container *Container) Start() (err error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env := container.createDaemonEnvironment(linkedEnv)
|
||||
// TODO: This is only needed for lxc so we should look for a way to
|
||||
// remove this dep
|
||||
if err := container.generateEnvConfig(env); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.setupWorkingDirectory(); err != nil {
|
||||
return err
|
||||
}
|
||||
populateCommand(container, env)
|
||||
env := container.createDaemonEnvironment(linkedEnv)
|
||||
if err := populateCommand(container, env); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setupMountsForContainer(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -464,32 +331,51 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
|||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) buildHostnameAndHostsFiles(IP string) {
|
||||
container.HostnamePath = path.Join(container.root, "hostname")
|
||||
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
func (container *Container) StdoutLogPipe() io.ReadCloser {
|
||||
reader, writer := io.Pipe()
|
||||
container.stdout.AddWriter(writer, "stdout")
|
||||
return utils.NewBufReader(reader)
|
||||
}
|
||||
|
||||
hostsContent := []byte(`
|
||||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
`)
|
||||
func (container *Container) StderrLogPipe() io.ReadCloser {
|
||||
reader, writer := io.Pipe()
|
||||
container.stderr.AddWriter(writer, "stderr")
|
||||
return utils.NewBufReader(reader)
|
||||
}
|
||||
|
||||
func (container *Container) buildHostnameFile() error {
|
||||
container.HostnamePath = path.Join(container.root, "hostname")
|
||||
if container.Config.Domainname != "" {
|
||||
return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
|
||||
}
|
||||
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
}
|
||||
|
||||
func (container *Container) buildHostnameAndHostsFiles(IP string) error {
|
||||
if err := container.buildHostnameFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container.HostsPath = path.Join(container.root, "hosts")
|
||||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else if !container.Config.NetworkDisabled {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
|
||||
extraContent := make(map[string]string)
|
||||
|
||||
children, err := container.daemon.Children(container.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
|
||||
for linkAlias, child := range children {
|
||||
_, alias := path.Split(linkAlias)
|
||||
extraContent[alias] = child.NetworkSettings.IPAddress
|
||||
}
|
||||
|
||||
return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent)
|
||||
}
|
||||
|
||||
func (container *Container) allocateNetwork() error {
|
||||
if container.Config.NetworkDisabled {
|
||||
mode := container.hostConfig.NetworkMode
|
||||
if container.Config.NetworkDisabled || mode.IsContainer() || mode.IsHost() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -654,9 +540,12 @@ func (container *Container) Kill() error {
|
|||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := syscall.Kill(container.State.Pid, 9); err != nil {
|
||||
return err
|
||||
// Ensure that we don't kill ourselves
|
||||
if pid := container.State.Pid; pid != 0 {
|
||||
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := syscall.Kill(pid, 9); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -800,22 +689,6 @@ func (container *Container) jsonPath() string {
|
|||
return path.Join(container.root, "config.json")
|
||||
}
|
||||
|
||||
func (container *Container) EnvConfigPath() (string, error) {
|
||||
p := path.Join(container.root, "config.env")
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// This method must be exported to be used from the lxc template
|
||||
// This directory is only usable when the container is running
|
||||
func (container *Container) RootfsPath() string {
|
||||
|
@ -939,19 +812,27 @@ func (container *Container) setupContainerDns() error {
|
|||
if container.ResolvConfPath != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
config = container.hostConfig
|
||||
daemon = container.daemon
|
||||
)
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
|
||||
if config.NetworkMode == "host" {
|
||||
container.ResolvConfPath = "/etc/resolv.conf"
|
||||
return nil
|
||||
}
|
||||
|
||||
resolvConf, err := resolvconf.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
|
||||
var (
|
||||
dns = utils.GetNameservers(resolvConf)
|
||||
dnsSearch = utils.GetSearchDomains(resolvConf)
|
||||
dns = resolvconf.GetNameservers(resolvConf)
|
||||
dnsSearch = resolvconf.GetSearchDomains(resolvConf)
|
||||
)
|
||||
if len(config.Dns) > 0 {
|
||||
dns = config.Dns
|
||||
|
@ -964,21 +845,7 @@ func (container *Container) setupContainerDns() error {
|
|||
dnsSearch = daemon.config.DnsSearch
|
||||
}
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
for _, dns := range dns {
|
||||
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(dnsSearch) > 0 {
|
||||
if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
|
||||
} else {
|
||||
container.ResolvConfPath = "/etc/resolv.conf"
|
||||
}
|
||||
|
@ -986,14 +853,39 @@ func (container *Container) setupContainerDns() error {
|
|||
}
|
||||
|
||||
func (container *Container) initializeNetworking() error {
|
||||
if container.daemon.config.DisableNetwork {
|
||||
var err error
|
||||
if container.hostConfig.NetworkMode.IsHost() {
|
||||
container.Config.Hostname, err = os.Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parts := strings.SplitN(container.Config.Hostname, ".", 2)
|
||||
if len(parts) > 1 {
|
||||
container.Config.Hostname = parts[0]
|
||||
container.Config.Domainname = parts[1]
|
||||
}
|
||||
container.HostsPath = "/etc/hosts"
|
||||
|
||||
return container.buildHostnameFile()
|
||||
} else if container.hostConfig.NetworkMode.IsContainer() {
|
||||
// we need to get the hosts files from the container to join
|
||||
nc, err := container.getNetworkedContainer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.HostsPath = nc.HostsPath
|
||||
container.ResolvConfPath = nc.ResolvConfPath
|
||||
container.Config.Hostname = nc.Config.Hostname
|
||||
container.Config.Domainname = nc.Config.Domainname
|
||||
} else if container.daemon.config.DisableNetwork {
|
||||
container.Config.NetworkDisabled = true
|
||||
container.buildHostnameAndHostsFiles("127.0.1.1")
|
||||
return container.buildHostnameAndHostsFiles("127.0.1.1")
|
||||
} else {
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
|
||||
return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1179,3 +1071,36 @@ func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bind
|
|||
bindings[port] = binding
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) GetProcessLabel() string {
|
||||
// even if we have a process label return "" if we are running
|
||||
// in privileged mode
|
||||
if container.hostConfig.Privileged {
|
||||
return ""
|
||||
}
|
||||
return container.ProcessLabel
|
||||
}
|
||||
|
||||
func (container *Container) GetMountLabel() string {
|
||||
if container.hostConfig.Privileged {
|
||||
return ""
|
||||
}
|
||||
return container.MountLabel
|
||||
}
|
||||
|
||||
func (container *Container) getNetworkedContainer() (*Container, error) {
|
||||
parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
|
||||
switch parts[0] {
|
||||
case "container":
|
||||
nc := container.daemon.Get(parts[1])
|
||||
if nc == nil {
|
||||
return nil, fmt.Errorf("no such container to join network: %s", parts[1])
|
||||
}
|
||||
if !nc.State.IsRunning() {
|
||||
return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
|
||||
}
|
||||
return nc, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("network mode not set to container")
|
||||
}
|
||||
}
|
||||
|
|
103
daemon/daemon.go
103
daemon/daemon.go
|
@ -3,6 +3,16 @@ package daemon
|
|||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
|
||||
|
@ -17,20 +27,13 @@ import (
|
|||
"github.com/dotcloud/docker/graph"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/pkg/graphdb"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/mount"
|
||||
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/dotcloud/docker/pkg/selinux"
|
||||
"github.com/dotcloud/docker/pkg/sysinfo"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Set the max depth to the aufs default that most
|
||||
|
@ -134,9 +137,6 @@ func (daemon *Daemon) load(id string) (*Container, error) {
|
|||
if container.ID != id {
|
||||
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
|
||||
}
|
||||
if container.State.IsRunning() {
|
||||
container.State.SetGhost(true)
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
|
@ -171,35 +171,32 @@ func (daemon *Daemon) Register(container *Container) error {
|
|||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.IsRunning() {
|
||||
if container.State.IsGhost() {
|
||||
utils.Debugf("killing ghost %s", container.ID)
|
||||
utils.Debugf("killing old running container %s", container.ID)
|
||||
|
||||
existingPid := container.State.Pid
|
||||
container.State.SetGhost(false)
|
||||
container.State.SetStopped(0)
|
||||
existingPid := container.State.Pid
|
||||
container.State.SetStopped(0)
|
||||
|
||||
// We only have to handle this for lxc because the other drivers will ensure that
|
||||
// no ghost processes are left when docker dies
|
||||
if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
|
||||
lxc.KillLxc(container.ID, 9)
|
||||
} else {
|
||||
// use the current driver and ensure that the container is dead x.x
|
||||
cmd := &execdriver.Command{
|
||||
ID: container.ID,
|
||||
}
|
||||
var err error
|
||||
cmd.Process, err = os.FindProcess(existingPid)
|
||||
if err != nil {
|
||||
utils.Debugf("cannot find existing process for %d", existingPid)
|
||||
}
|
||||
daemon.execDriver.Terminate(cmd)
|
||||
// We only have to handle this for lxc because the other drivers will ensure that
|
||||
// no processes are left when docker dies
|
||||
if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
|
||||
lxc.KillLxc(container.ID, 9)
|
||||
} else {
|
||||
// use the current driver and ensure that the container is dead x.x
|
||||
cmd := &execdriver.Command{
|
||||
ID: container.ID,
|
||||
}
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("ghost unmount error %s", err)
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("saving ghost state to disk %s", err)
|
||||
var err error
|
||||
cmd.Process, err = os.FindProcess(existingPid)
|
||||
if err != nil {
|
||||
utils.Debugf("cannot find existing process for %d", existingPid)
|
||||
}
|
||||
daemon.execDriver.Terminate(cmd)
|
||||
}
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("unmount error %s", err)
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("saving stopped state to disk %s", err)
|
||||
}
|
||||
|
||||
info := daemon.execDriver.Info(container.ID)
|
||||
|
@ -211,8 +208,6 @@ func (daemon *Daemon) Register(container *Container) error {
|
|||
utils.Debugf("restart unmount error %s", err)
|
||||
}
|
||||
|
||||
container.State.SetGhost(false)
|
||||
container.State.SetStopped(0)
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -278,6 +273,10 @@ func (daemon *Daemon) Destroy(container *Container) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Deregister the container before removing its directory, to avoid race conditions
|
||||
daemon.idIndex.Delete(container.ID)
|
||||
daemon.containers.Remove(element)
|
||||
|
||||
if err := daemon.driver.Remove(container.ID); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
|
||||
}
|
||||
|
@ -291,12 +290,11 @@ func (daemon *Daemon) Destroy(container *Container) error {
|
|||
utils.Debugf("Unable to remove container from link graph: %s", err)
|
||||
}
|
||||
|
||||
// Deregister the container before removing its directory, to avoid race conditions
|
||||
daemon.idIndex.Delete(container.ID)
|
||||
daemon.containers.Remove(element)
|
||||
if err := os.RemoveAll(container.root); err != nil {
|
||||
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
|
||||
}
|
||||
selinux.FreeLxcContexts(container.ProcessLabel)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -541,6 +539,10 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
|
|||
ExecDriver: daemon.execDriver.Name(),
|
||||
}
|
||||
container.root = daemon.containerRoot(container.ID)
|
||||
|
||||
if container.ProcessLabel, container.MountLabel, err = label.GenLabels(""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
|
@ -551,10 +553,10 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
|
|||
return err
|
||||
}
|
||||
initID := fmt.Sprintf("%s-init", container.ID)
|
||||
if err := daemon.driver.Create(initID, img.ID, ""); err != nil {
|
||||
if err := daemon.driver.Create(initID, img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
initPath, err := daemon.driver.Get(initID)
|
||||
initPath, err := daemon.driver.Get(initID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -564,7 +566,7 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
|
|||
return err
|
||||
}
|
||||
|
||||
if err := daemon.driver.Create(container.ID, initID, ""); err != nil {
|
||||
if err := daemon.driver.Create(container.ID, initID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -678,7 +680,6 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
|
|||
if !config.EnableSelinuxSupport {
|
||||
selinux.SetDisabled()
|
||||
}
|
||||
|
||||
// Set the default driver
|
||||
graphdriver.DefaultDriver = config.GraphDriver
|
||||
|
||||
|
@ -848,7 +849,7 @@ func (daemon *Daemon) Close() error {
|
|||
}
|
||||
|
||||
func (daemon *Daemon) Mount(container *Container) error {
|
||||
dir, err := daemon.driver.Get(container.ID)
|
||||
dir, err := daemon.driver.Get(container.ID, container.GetMountLabel())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
|
||||
}
|
||||
|
@ -870,12 +871,12 @@ func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
|
|||
if differ, ok := daemon.driver.(graphdriver.Differ); ok {
|
||||
return differ.Changes(container.ID)
|
||||
}
|
||||
cDir, err := daemon.driver.Get(container.ID)
|
||||
cDir, err := daemon.driver.Get(container.ID, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
defer daemon.driver.Put(container.ID)
|
||||
initDir, err := daemon.driver.Get(container.ID + "-init")
|
||||
initDir, err := daemon.driver.Get(container.ID+"-init", "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
|
@ -893,7 +894,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
cDir, err := daemon.driver.Get(container.ID)
|
||||
cDir, err := daemon.driver.Get(container.ID, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
|
@ -981,7 +982,7 @@ func (daemon *Daemon) SetServer(server Server) {
|
|||
}
|
||||
|
||||
func (daemon *Daemon) checkLocaldns() error {
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
resolvConf, err := resolvconf.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -89,8 +89,10 @@ type Driver interface {
|
|||
|
||||
// Network settings of the container
|
||||
type Network struct {
|
||||
Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
|
||||
Mtu int `json:"mtu"`
|
||||
Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
|
||||
Mtu int `json:"mtu"`
|
||||
ContainerID string `json:"container_id"` // id of the container to join network.
|
||||
HostNetworking bool `json:"host_networking"`
|
||||
}
|
||||
|
||||
type NetworkInterface struct {
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
package lxc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -16,6 +13,12 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
const DriverName = "lxc"
|
||||
|
@ -25,23 +28,21 @@ func init() {
|
|||
if err := setupEnv(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupHostname(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupNetworking(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupCapabilities(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupWorkingDirectory(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := system.CloseFdsFrom(3); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := changeUser(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -85,6 +86,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
if err := execdriver.SetTerminal(c, pipes); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if err := d.generateEnvConfig(c); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
configPath, err := d.generateLXCConfig(c)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
@ -416,3 +420,14 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
|
|||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
|
||||
data, err := json.Marshal(c.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := path.Join(d.root, "containers", c.ID, "config.env")
|
||||
c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true})
|
||||
|
||||
return ioutil.WriteFile(p, data, 0600)
|
||||
}
|
||||
|
|
|
@ -3,15 +3,16 @@ package lxc
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/pkg/user"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/pkg/user"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Clear environment pollution introduced by lxc-start
|
||||
|
@ -149,6 +150,7 @@ func setupCapabilities(args *execdriver.InitArgs) error {
|
|||
capability.CAP_MAC_OVERRIDE,
|
||||
capability.CAP_MAC_ADMIN,
|
||||
capability.CAP_NET_ADMIN,
|
||||
capability.CAP_SYSLOG,
|
||||
}
|
||||
|
||||
c, err := capability.NewPid(os.Getpid())
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package lxc
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
)
|
||||
|
||||
const LxcTemplate = `
|
||||
|
@ -13,12 +14,13 @@ const LxcTemplate = `
|
|||
lxc.network.type = veth
|
||||
lxc.network.link = {{.Network.Interface.Bridge}}
|
||||
lxc.network.name = eth0
|
||||
{{else}}
|
||||
lxc.network.mtu = {{.Network.Mtu}}
|
||||
{{else if not .Network.HostNetworking}}
|
||||
# network is disabled (-n=false)
|
||||
lxc.network.type = empty
|
||||
lxc.network.flags = up
|
||||
{{end}}
|
||||
lxc.network.mtu = {{.Network.Mtu}}
|
||||
{{end}}
|
||||
|
||||
# root filesystem
|
||||
{{$ROOTFS := .Rootfs}}
|
||||
|
@ -82,12 +84,11 @@ lxc.pivotdir = lxc_putold
|
|||
|
||||
# NOTICE: These mounts must be applied within the namespace
|
||||
|
||||
# WARNING: procfs is a known attack vector and should probably be disabled
|
||||
# if your userspace allows it. eg. see http://blog.zx2c4.com/749
|
||||
# WARNING: mounting procfs and/or sysfs read-write is a known attack vector.
|
||||
# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ
|
||||
# We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only.
|
||||
# We cannot mount them directly read-only, because that would prevent loading AppArmor profiles.
|
||||
lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
|
||||
|
||||
# WARNING: sysfs is a known attack vector and should probably be disabled
|
||||
# if your userspace allows it. eg. see http://bit.ly/T9CkqJ
|
||||
lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
|
||||
|
||||
{{if .Tty}}
|
||||
|
@ -109,7 +110,7 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS
|
|||
{{if .AppArmor}}
|
||||
lxc.aa_profile = unconfined
|
||||
{{else}}
|
||||
#lxc.aa_profile = unconfined
|
||||
# Let AppArmor normal confinement take place (i.e., not unconfined)
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
|
|
@ -2,12 +2,13 @@ package configuration
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
type Action func(*libcontainer.Container, interface{}, string) error
|
||||
|
@ -21,10 +22,13 @@ var actions = map[string]Action{
|
|||
|
||||
"net.join": joinNetNamespace, // join another containers net namespace
|
||||
|
||||
"cgroups.cpu_shares": cpuShares, // set the cpu shares
|
||||
"cgroups.memory": memory, // set the memory limit
|
||||
"cgroups.memory_swap": memorySwap, // set the memory swap limit
|
||||
"cgroups.cpuset.cpus": cpusetCpus, // set the cpus used
|
||||
"cgroups.cpu_shares": cpuShares, // set the cpu shares
|
||||
"cgroups.memory": memory, // set the memory limit
|
||||
"cgroups.memory_reservation": memoryReservation, // set the memory reservation
|
||||
"cgroups.memory_swap": memorySwap, // set the memory swap limit
|
||||
"cgroups.cpuset.cpus": cpusetCpus, // set the cpus used
|
||||
|
||||
"systemd.slice": systemdSlice, // set parent Slice used for systemd unit
|
||||
|
||||
"apparmor_profile": apparmorProfile, // set the apparmor profile to apply
|
||||
|
||||
|
@ -40,6 +44,15 @@ func cpusetCpus(container *libcontainer.Container, context interface{}, value st
|
|||
return nil
|
||||
}
|
||||
|
||||
func systemdSlice(container *libcontainer.Container, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set slice when cgroups are disabled")
|
||||
}
|
||||
container.Cgroups.Slice = value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error {
|
||||
container.Context["apparmor_profile"] = value
|
||||
return nil
|
||||
|
@ -70,6 +83,19 @@ func memory(container *libcontainer.Container, context interface{}, value string
|
|||
return nil
|
||||
}
|
||||
|
||||
func memoryReservation(container *libcontainer.Container, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
||||
v, err := utils.RAMInBytes(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Cgroups.MemoryReservation = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func memorySwap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
|
@ -83,38 +109,22 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st
|
|||
}
|
||||
|
||||
func addCap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
c := container.CapabilitiesMask.Get(value)
|
||||
if c == nil {
|
||||
return fmt.Errorf("%s is not a valid capability", value)
|
||||
}
|
||||
c.Enabled = true
|
||||
container.CapabilitiesMask[value] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropCap(container *libcontainer.Container, context interface{}, value string) error {
|
||||
c := container.CapabilitiesMask.Get(value)
|
||||
if c == nil {
|
||||
return fmt.Errorf("%s is not a valid capability", value)
|
||||
}
|
||||
c.Enabled = false
|
||||
container.CapabilitiesMask[value] = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func addNamespace(container *libcontainer.Container, context interface{}, value string) error {
|
||||
ns := container.Namespaces.Get(value)
|
||||
if ns == nil {
|
||||
return fmt.Errorf("%s is not a valid namespace", value[1:])
|
||||
}
|
||||
ns.Enabled = true
|
||||
container.Namespaces[value] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropNamespace(container *libcontainer.Container, context interface{}, value string) error {
|
||||
ns := container.Namespaces.Get(value)
|
||||
if ns == nil {
|
||||
return fmt.Errorf("%s is not a valid namespace", value[1:])
|
||||
}
|
||||
ns.Enabled = false
|
||||
container.Namespaces[value] = false
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
"testing"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
)
|
||||
|
||||
func TestSetReadonlyRootFs(t *testing.T) {
|
||||
|
@ -38,10 +39,10 @@ func TestConfigurationsDoNotConflict(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container1.CapabilitiesMask.Get("NET_ADMIN").Enabled {
|
||||
if !container1.CapabilitiesMask["NET_ADMIN"] {
|
||||
t.Fatal("container one should have NET_ADMIN enabled")
|
||||
}
|
||||
if container2.CapabilitiesMask.Get("NET_ADMIN").Enabled {
|
||||
if container2.CapabilitiesMask["NET_ADMIN"] {
|
||||
t.Fatal("container two should not have NET_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
@ -93,7 +94,7 @@ func TestCpuShares(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCgroupMemory(t *testing.T) {
|
||||
func TestMemory(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
|
@ -109,6 +110,22 @@ func TestCgroupMemory(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMemoryReservation(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.memory_reservation=500m",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(500 * 1024 * 1024); container.Cgroups.MemoryReservation != expected {
|
||||
t.Fatalf("expected memory reservation %d got %d", expected, container.Cgroups.MemoryReservation)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCap(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
|
@ -121,10 +138,10 @@ func TestAddCap(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container.CapabilitiesMask.Get("MKNOD").Enabled {
|
||||
if !container.CapabilitiesMask["MKNOD"] {
|
||||
t.Fatal("container should have MKNOD enabled")
|
||||
}
|
||||
if !container.CapabilitiesMask.Get("SYS_ADMIN").Enabled {
|
||||
if !container.CapabilitiesMask["SYS_ADMIN"] {
|
||||
t.Fatal("container should have SYS_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
@ -137,14 +154,14 @@ func TestDropCap(t *testing.T) {
|
|||
}
|
||||
)
|
||||
// enabled all caps like in privileged mode
|
||||
for _, c := range container.CapabilitiesMask {
|
||||
c.Enabled = true
|
||||
for key := range container.CapabilitiesMask {
|
||||
container.CapabilitiesMask[key] = true
|
||||
}
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.CapabilitiesMask.Get("MKNOD").Enabled {
|
||||
if container.CapabilitiesMask["MKNOD"] {
|
||||
t.Fatal("container should not have MKNOD enabled")
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +177,7 @@ func TestDropNamespace(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.Namespaces.Get("NEWNET").Enabled {
|
||||
if container.Namespaces["NEWNET"] {
|
||||
t.Fatal("container should not have NEWNET enabled")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,12 +3,12 @@ package native
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
"github.com/dotcloud/docker/pkg/apparmor"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
)
|
||||
|
||||
|
@ -25,6 +25,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
|
|||
container.Cgroups.Name = c.ID
|
||||
// check to see if we are running in ramdisk to disable pivot root
|
||||
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
||||
container.Context["restrictions"] = "true"
|
||||
|
||||
if err := d.createNetwork(container, c); err != nil {
|
||||
return nil, err
|
||||
|
@ -33,6 +34,8 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
|
|||
if err := d.setPrivileged(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"})
|
||||
}
|
||||
if err := d.setupCgroups(container, c); err != nil {
|
||||
return nil, err
|
||||
|
@ -50,6 +53,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
|
|||
}
|
||||
|
||||
func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
if c.Network.HostNetworking {
|
||||
container.Namespaces["NEWNET"] = false
|
||||
return nil
|
||||
}
|
||||
container.Networks = []*libcontainer.Network{
|
||||
{
|
||||
Mtu: c.Network.Mtu,
|
||||
|
@ -73,14 +80,31 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
|
|||
}
|
||||
container.Networks = append(container.Networks, &vethNetwork)
|
||||
}
|
||||
|
||||
if c.Network.ContainerID != "" {
|
||||
cmd := d.activeContainers[c.Network.ContainerID]
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
|
||||
}
|
||||
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
|
||||
container.Networks = append(container.Networks, &libcontainer.Network{
|
||||
Type: "netns",
|
||||
Context: libcontainer.Context{
|
||||
"nspath": nspath,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setPrivileged(container *libcontainer.Container) error {
|
||||
for _, c := range container.CapabilitiesMask {
|
||||
c.Enabled = true
|
||||
for key := range container.CapabilitiesMask {
|
||||
container.CapabilitiesMask[key] = true
|
||||
}
|
||||
container.Cgroups.DeviceAccess = true
|
||||
|
||||
delete(container.Context, "restrictions")
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
container.Context["apparmor_profile"] = "unconfined"
|
||||
}
|
||||
|
@ -91,6 +115,7 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
|
|||
if c.Resources != nil {
|
||||
container.Cgroups.CpuShares = c.Resources.CpuShares
|
||||
container.Cgroups.Memory = c.Resources.Memory
|
||||
container.Cgroups.MemoryReservation = c.Resources.Memory
|
||||
container.Cgroups.MemorySwap = c.Resources.MemorySwap
|
||||
}
|
||||
return nil
|
||||
|
@ -98,20 +123,19 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
|
|||
|
||||
func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
for _, m := range c.Mounts {
|
||||
container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private})
|
||||
container.Mounts = append(container.Mounts, libcontainer.Mount{
|
||||
Type: "bind",
|
||||
Source: m.Source,
|
||||
Destination: m.Destination,
|
||||
Writable: m.Writable,
|
||||
Private: m.Private,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
labels := c.Config["label"]
|
||||
if len(labels) > 0 {
|
||||
process, mount, err := label.GenLabels(labels[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Context["mount_label"] = mount
|
||||
container.Context["process_label"] = process
|
||||
}
|
||||
container.Context["process_label"] = c.Config["process_label"][0]
|
||||
container.Context["mount_label"] = c.Config["mount_label"][0]
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -3,9 +3,7 @@ package native
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
@ -23,16 +21,13 @@ import (
|
|||
|
||||
const (
|
||||
DriverName = "native"
|
||||
Version = "0.1"
|
||||
Version = "0.2"
|
||||
BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root
|
||||
)
|
||||
|
||||
func init() {
|
||||
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
|
||||
var (
|
||||
container *libcontainer.Container
|
||||
ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger(""))
|
||||
)
|
||||
var container *libcontainer.Container
|
||||
f, err := os.Open(filepath.Join(args.Root, "container.json"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -43,7 +38,7 @@ func init() {
|
|||
}
|
||||
f.Close()
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
rootfs, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -51,7 +46,7 @@ func init() {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil {
|
||||
if err := nsinit.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -88,35 +83,49 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
d.activeContainers[c.ID] = &c.Cmd
|
||||
|
||||
var (
|
||||
term nsinit.Terminal
|
||||
factory = &dockerCommandFactory{c: c, driver: d}
|
||||
stateWriter = &dockerStateWriter{
|
||||
callback: startCallback,
|
||||
c: c,
|
||||
dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)},
|
||||
}
|
||||
ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG")))
|
||||
args = append([]string{c.Entrypoint}, c.Arguments...)
|
||||
dataPath = filepath.Join(d.root, c.ID)
|
||||
args = append([]string{c.Entrypoint}, c.Arguments...)
|
||||
)
|
||||
if err := d.createContainerRoot(c.ID); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer d.removeContainerRoot(c.ID)
|
||||
|
||||
if c.Tty {
|
||||
term = &dockerTtyTerm{
|
||||
pipes: pipes,
|
||||
}
|
||||
} else {
|
||||
term = &dockerStdTerm{
|
||||
pipes: pipes,
|
||||
}
|
||||
}
|
||||
c.Terminal = term
|
||||
if err := d.writeContainerFile(container, c.ID); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return ns.Exec(container, term, args)
|
||||
|
||||
term := getTerminal(c, pipes)
|
||||
|
||||
return nsinit.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
|
||||
// we need to join the rootfs because nsinit will setup the rootfs and chroot
|
||||
initPath := filepath.Join(c.Rootfs, c.InitPath)
|
||||
|
||||
c.Path = d.initPath
|
||||
c.Args = append([]string{
|
||||
initPath,
|
||||
"-driver", DriverName,
|
||||
"-console", console,
|
||||
"-pipe", "3",
|
||||
"-root", filepath.Join(d.root, c.ID),
|
||||
"--",
|
||||
}, args...)
|
||||
|
||||
// set this to nil so that when we set the clone flags anything else is reset
|
||||
c.SysProcAttr = nil
|
||||
system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces)))
|
||||
c.ExtraFiles = []*os.File{child}
|
||||
|
||||
c.Env = container.Env
|
||||
c.Dir = c.Rootfs
|
||||
|
||||
return &c.Cmd
|
||||
}, func() {
|
||||
if startCallback != nil {
|
||||
c.ContainerPid = c.Process.Pid
|
||||
startCallback(c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (d *driver) Kill(p *execdriver.Command, sig int) error {
|
||||
|
@ -229,65 +238,17 @@ func getEnv(key string, env []string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
type dockerCommandFactory struct {
|
||||
c *execdriver.Command
|
||||
driver *driver
|
||||
}
|
||||
|
||||
// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces
|
||||
// defined on the container's configuration and use the current binary as the init with the
|
||||
// args provided
|
||||
func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd {
|
||||
// we need to join the rootfs because nsinit will setup the rootfs and chroot
|
||||
initPath := filepath.Join(d.c.Rootfs, d.c.InitPath)
|
||||
|
||||
d.c.Path = d.driver.initPath
|
||||
d.c.Args = append([]string{
|
||||
initPath,
|
||||
"-driver", DriverName,
|
||||
"-console", console,
|
||||
"-pipe", "3",
|
||||
"-root", filepath.Join(d.driver.root, d.c.ID),
|
||||
"--",
|
||||
}, args...)
|
||||
|
||||
// set this to nil so that when we set the clone flags anything else is reset
|
||||
d.c.SysProcAttr = nil
|
||||
system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces)))
|
||||
d.c.ExtraFiles = []*os.File{syncFile}
|
||||
|
||||
d.c.Env = container.Env
|
||||
d.c.Dir = d.c.Rootfs
|
||||
|
||||
return &d.c.Cmd
|
||||
}
|
||||
|
||||
type dockerStateWriter struct {
|
||||
dsw nsinit.StateWriter
|
||||
c *execdriver.Command
|
||||
callback execdriver.StartCallback
|
||||
}
|
||||
|
||||
func (d *dockerStateWriter) WritePid(pid int, started string) error {
|
||||
d.c.ContainerPid = pid
|
||||
err := d.dsw.WritePid(pid, started)
|
||||
if d.callback != nil {
|
||||
d.callback(d.c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dockerStateWriter) DeletePid() error {
|
||||
return d.dsw.DeletePid()
|
||||
}
|
||||
|
||||
func createLogger(debug string) *log.Logger {
|
||||
var w io.Writer
|
||||
// if we are in debug mode set the logger to stderr
|
||||
if debug != "" {
|
||||
w = os.Stderr
|
||||
func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) nsinit.Terminal {
|
||||
var term nsinit.Terminal
|
||||
if c.Tty {
|
||||
term = &dockerTtyTerm{
|
||||
pipes: pipes,
|
||||
}
|
||||
} else {
|
||||
w = ioutil.Discard
|
||||
term = &dockerStdTerm{
|
||||
pipes: pipes,
|
||||
}
|
||||
}
|
||||
return log.New(w, "[libcontainer] ", log.LstdFlags)
|
||||
c.Terminal = term
|
||||
return term
|
||||
}
|
||||
|
|
|
@ -9,29 +9,30 @@ import (
|
|||
// New returns the docker default configuration for libcontainer
|
||||
func New() *libcontainer.Container {
|
||||
container := &libcontainer.Container{
|
||||
CapabilitiesMask: libcontainer.Capabilities{
|
||||
libcontainer.GetCapability("SETPCAP"),
|
||||
libcontainer.GetCapability("SYS_MODULE"),
|
||||
libcontainer.GetCapability("SYS_RAWIO"),
|
||||
libcontainer.GetCapability("SYS_PACCT"),
|
||||
libcontainer.GetCapability("SYS_ADMIN"),
|
||||
libcontainer.GetCapability("SYS_NICE"),
|
||||
libcontainer.GetCapability("SYS_RESOURCE"),
|
||||
libcontainer.GetCapability("SYS_TIME"),
|
||||
libcontainer.GetCapability("SYS_TTY_CONFIG"),
|
||||
libcontainer.GetCapability("AUDIT_WRITE"),
|
||||
libcontainer.GetCapability("AUDIT_CONTROL"),
|
||||
libcontainer.GetCapability("MAC_OVERRIDE"),
|
||||
libcontainer.GetCapability("MAC_ADMIN"),
|
||||
libcontainer.GetCapability("NET_ADMIN"),
|
||||
libcontainer.GetCapability("MKNOD"),
|
||||
CapabilitiesMask: map[string]bool{
|
||||
"SETPCAP": false,
|
||||
"SYS_MODULE": false,
|
||||
"SYS_RAWIO": false,
|
||||
"SYS_PACCT": false,
|
||||
"SYS_ADMIN": false,
|
||||
"SYS_NICE": false,
|
||||
"SYS_RESOURCE": false,
|
||||
"SYS_TIME": false,
|
||||
"SYS_TTY_CONFIG": false,
|
||||
"AUDIT_WRITE": false,
|
||||
"AUDIT_CONTROL": false,
|
||||
"MAC_OVERRIDE": false,
|
||||
"MAC_ADMIN": false,
|
||||
"NET_ADMIN": false,
|
||||
"MKNOD": true,
|
||||
"SYSLOG": false,
|
||||
},
|
||||
Namespaces: libcontainer.Namespaces{
|
||||
libcontainer.GetNamespace("NEWNS"),
|
||||
libcontainer.GetNamespace("NEWUTS"),
|
||||
libcontainer.GetNamespace("NEWIPC"),
|
||||
libcontainer.GetNamespace("NEWPID"),
|
||||
libcontainer.GetNamespace("NEWNET"),
|
||||
Namespaces: map[string]bool{
|
||||
"NEWNS": true,
|
||||
"NEWUTS": true,
|
||||
"NEWIPC": true,
|
||||
"NEWPID": true,
|
||||
"NEWNET": true,
|
||||
},
|
||||
Cgroups: &cgroups.Cgroup{
|
||||
Parent: "docker",
|
||||
|
@ -39,7 +40,6 @@ func New() *libcontainer.Container {
|
|||
},
|
||||
Context: libcontainer.Context{},
|
||||
}
|
||||
container.CapabilitiesMask.Get("MKNOD").Enabled = true
|
||||
if apparmor.IsEnabled() {
|
||||
container.Context["apparmor_profile"] = "docker-default"
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
mountpk "github.com/dotcloud/docker/pkg/mount"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
|
@ -134,7 +135,7 @@ func (a Driver) Exists(id string) bool {
|
|||
|
||||
// Three folders are created for each id
|
||||
// mnt, layers, and diff
|
||||
func (a *Driver) Create(id, parent string, mountLabel string) error {
|
||||
func (a *Driver) Create(id, parent string) error {
|
||||
if err := a.createDirsFor(id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -218,7 +219,7 @@ func (a *Driver) Remove(id string) error {
|
|||
|
||||
// Return the rootfs path for the id
|
||||
// This will mount the dir at it's given path
|
||||
func (a *Driver) Get(id string) (string, error) {
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
ids, err := getParentIds(a.rootPath(), id)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
|
@ -240,7 +241,7 @@ func (a *Driver) Get(id string) (string, error) {
|
|||
out = path.Join(a.rootPath(), "mnt", id)
|
||||
|
||||
if count == 0 {
|
||||
if err := a.mount(id); err != nil {
|
||||
if err := a.mount(id, mountLabel); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
@ -309,7 +310,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
|||
return layers, nil
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string) error {
|
||||
func (a *Driver) mount(id, mountLabel string) error {
|
||||
// If the id is mounted or we get an error return
|
||||
if mounted, err := a.mounted(id); err != nil || mounted {
|
||||
return err
|
||||
|
@ -325,7 +326,7 @@ func (a *Driver) mount(id string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := a.aufsMount(layers, rw, target); err != nil {
|
||||
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -358,21 +359,21 @@ func (a *Driver) Cleanup() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
|
||||
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = a.tryMount(ro, rw, target); err != nil {
|
||||
if err = a.mountRw(rw, target); err != nil {
|
||||
if err = a.tryMount(ro, rw, target, mountLabel); err != nil {
|
||||
if err = a.mountRw(rw, target, mountLabel); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, layer := range ro {
|
||||
branch := fmt.Sprintf("append:%s=ro+wh", layer)
|
||||
if err = mount("none", target, "aufs", MsRemount, branch); err != nil {
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel)
|
||||
if err = mount("none", target, "aufs", MsRemount, data); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -382,16 +383,18 @@ func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
|
|||
|
||||
// Try to mount using the aufs fast path, if this fails then
|
||||
// append ro layers.
|
||||
func (a *Driver) tryMount(ro []string, rw, target string) (err error) {
|
||||
func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) {
|
||||
var (
|
||||
rwBranch = fmt.Sprintf("%s=rw", rw)
|
||||
roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:"))
|
||||
data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel)
|
||||
)
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches))
|
||||
return mount("none", target, "aufs", 0, data)
|
||||
}
|
||||
|
||||
func (a *Driver) mountRw(rw, target string) error {
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw))
|
||||
func (a *Driver) mountRw(rw, target, mountLabel string) error {
|
||||
data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel)
|
||||
return mount("none", target, "aufs", 0, data)
|
||||
}
|
||||
|
||||
func rollbackMount(target string, err error) {
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestCreateNewDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func TestCreateNewDirStructure(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ func TestRemoveImage(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -145,11 +145,11 @@ func TestGetWithoutParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func TestCleanupWithDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ func TestMountedFalseResponse(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -204,14 +204,14 @@ func TestMountedTrueReponse(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := d.Get("2")
|
||||
_, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -230,10 +230,10 @@ func TestMountWithParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ func TestMountWithParent(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
mntPath, err := d.Get("2")
|
||||
mntPath, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -261,10 +261,10 @@ func TestRemoveMountedDir(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ func TestRemoveMountedDir(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
mntPath, err := d.Get("2")
|
||||
mntPath, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ func TestCreateWithInvalidParent(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "docker", ""); err == nil {
|
||||
if err := d.Create("1", "docker"); err == nil {
|
||||
t.Fatalf("Error should not be nil with parent does not exist")
|
||||
}
|
||||
}
|
||||
|
@ -309,11 +309,11 @@ func TestGetDiff(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -343,10 +343,10 @@ func TestChanges(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ func TestChanges(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
mntPoint, err := d.Get("2")
|
||||
mntPoint, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -392,10 +392,10 @@ func TestChanges(t *testing.T) {
|
|||
t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind)
|
||||
}
|
||||
|
||||
if err := d.Create("3", "2", ""); err != nil {
|
||||
if err := d.Create("3", "2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mntPoint, err = d.Get("3")
|
||||
mntPoint, err = d.Get("3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -437,11 +437,11 @@ func TestDiffSize(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -479,11 +479,11 @@ func TestChildDiffSize(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -515,7 +515,7 @@ func TestChildDiffSize(t *testing.T) {
|
|||
t.Fatalf("Expected size to be %d got %d", size, diffSize)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -534,7 +534,7 @@ func TestExists(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ func TestStatus(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -581,11 +581,11 @@ func TestApplyDiff(t *testing.T) {
|
|||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -607,10 +607,10 @@ func TestApplyDiff(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "", ""); err != nil {
|
||||
if err := d.Create("2", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("3", "2", ""); err != nil {
|
||||
if err := d.Create("3", "2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -620,7 +620,7 @@ func TestApplyDiff(t *testing.T) {
|
|||
|
||||
// Ensure that the file is in the mount point for id 3
|
||||
|
||||
mountPoint, err := d.Get("3")
|
||||
mountPoint, err := d.Get("3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -656,11 +656,11 @@ func TestMountMoreThan42Layers(t *testing.T) {
|
|||
}
|
||||
current = hash(current)
|
||||
|
||||
if err := d.Create(current, parent, ""); err != nil {
|
||||
if err := d.Create(current, parent); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
point, err := d.Get(current)
|
||||
point, err := d.Get(current, "")
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
|
@ -683,7 +683,7 @@ func TestMountMoreThan42Layers(t *testing.T) {
|
|||
}
|
||||
|
||||
// Perform the actual mount for the top most image
|
||||
point, err := d.Get(last)
|
||||
point, err := d.Get(last, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -77,11 +77,11 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
|
|||
}
|
||||
|
||||
initID := fmt.Sprintf("%s-init", id)
|
||||
if err := a.Create(initID, metadata.Image, ""); err != nil {
|
||||
if err := a.Create(initID, metadata.Image); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
initPath, err := a.Get(initID)
|
||||
initPath, err := a.Get(initID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
|
|||
return err
|
||||
}
|
||||
|
||||
if err := a.Create(id, initID, ""); err != nil {
|
||||
if err := a.Create(id, initID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool)
|
|||
return err
|
||||
}
|
||||
if !a.Exists(m.ID) {
|
||||
if err := a.Create(m.ID, m.ParentID, ""); err != nil {
|
||||
if err := a.Create(m.ID, m.ParentID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
1
daemon/graphdriver/btrfs/MAINTAINERS
Normal file
1
daemon/graphdriver/btrfs/MAINTAINERS
Normal file
|
@ -0,0 +1 @@
|
|||
Alexander Larsson <alexl@redhat.com> (@alexlarsson)
|
|
@ -80,7 +80,7 @@ func getDirFd(dir *C.DIR) uintptr {
|
|||
return uintptr(C.dirfd(dir))
|
||||
}
|
||||
|
||||
func subvolCreate(path, name string, mountLabel string) error {
|
||||
func subvolCreate(path, name string) error {
|
||||
dir, err := openDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -155,17 +155,17 @@ func (d *Driver) subvolumesDirId(id string) string {
|
|||
return path.Join(d.subvolumesDir(), id)
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id string, parent string, mountLabel string) error {
|
||||
func (d *Driver) Create(id string, parent string) error {
|
||||
subvolumes := path.Join(d.home, "subvolumes")
|
||||
if err := os.MkdirAll(subvolumes, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == "" {
|
||||
if err := subvolCreate(subvolumes, id, mountLabel); err != nil {
|
||||
if err := subvolCreate(subvolumes, id); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
parentDir, err := d.Get(parent)
|
||||
parentDir, err := d.Get(parent, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (d *Driver) Remove(id string) error {
|
|||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
func (d *Driver) Get(id string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
dir := d.subvolumesDirId(id)
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
|
|
1
daemon/graphdriver/devmapper/MAINTAINERS
Normal file
1
daemon/graphdriver/devmapper/MAINTAINERS
Normal file
|
@ -0,0 +1 @@
|
|||
Alexander Larsson <alexl@redhat.com> (@alexlarsson)
|
|
@ -6,8 +6,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
@ -17,6 +15,9 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -35,12 +36,6 @@ type DevInfo struct {
|
|||
|
||||
mountCount int `json:"-"`
|
||||
mountPath string `json:"-"`
|
||||
// A floating mount means one reference is not owned and
|
||||
// will be stolen by the next mount. This allows us to
|
||||
// avoid unmounting directly after creation before the
|
||||
// first get (since we need to mount to set up the device
|
||||
// a bit first).
|
||||
floating bool `json:"-"`
|
||||
|
||||
// The global DeviceSet lock guarantees that we serialize all
|
||||
// the calls to libdevmapper (which is not threadsafe), but we
|
||||
|
@ -94,14 +89,6 @@ type DevStatus struct {
|
|||
HighestMappedSector uint64
|
||||
}
|
||||
|
||||
type UnmountMode int
|
||||
|
||||
const (
|
||||
UnmountRegular UnmountMode = iota
|
||||
UnmountFloat
|
||||
UnmountSink
|
||||
)
|
||||
|
||||
func getDevName(name string) string {
|
||||
return "/dev/mapper/" + name
|
||||
}
|
||||
|
@ -859,7 +846,7 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error {
|
||||
func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
||||
info, err := devices.lookupDevice(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -876,12 +863,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro
|
|||
return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path)
|
||||
}
|
||||
|
||||
if info.floating {
|
||||
// Steal floating ref
|
||||
info.floating = false
|
||||
} else {
|
||||
info.mountCount++
|
||||
}
|
||||
info.mountCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -894,7 +876,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro
|
|||
mountOptions := label.FormatMountLabel("discard", mountLabel)
|
||||
err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
|
||||
if err != nil && err == sysEInval {
|
||||
mountOptions = label.FormatMountLabel(mountLabel, "")
|
||||
mountOptions = label.FormatMountLabel("", mountLabel)
|
||||
err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -903,13 +885,12 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro
|
|||
|
||||
info.mountCount = 1
|
||||
info.mountPath = path
|
||||
info.floating = false
|
||||
|
||||
return devices.setInitialized(info)
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error {
|
||||
utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode)
|
||||
func (devices *DeviceSet) UnmountDevice(hash string) error {
|
||||
utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
|
||||
defer utils.Debugf("[devmapper] UnmountDevice END")
|
||||
|
||||
info, err := devices.lookupDevice(hash)
|
||||
|
@ -923,24 +904,6 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error {
|
|||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
if mode == UnmountFloat {
|
||||
if info.floating {
|
||||
return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash)
|
||||
}
|
||||
|
||||
// Leave this reference floating
|
||||
info.floating = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if mode == UnmountSink {
|
||||
if !info.floating {
|
||||
// Someone already sunk this
|
||||
return nil
|
||||
}
|
||||
// Otherwise, treat this as a regular unmount
|
||||
}
|
||||
|
||||
if info.mountCount == 0 {
|
||||
return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash)
|
||||
}
|
||||
|
|
|
@ -4,11 +4,12 @@ package devmapper
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -60,30 +61,10 @@ func (d *Driver) Cleanup() error {
|
|||
return d.DeviceSet.Shutdown()
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id, parent string, mountLabel string) error {
|
||||
func (d *Driver) Create(id, parent string) error {
|
||||
if err := d.DeviceSet.AddDevice(id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
if err := d.mount(id, mp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create an "id" file with the container/image id in it to help reconscruct this in case
|
||||
// of later problems
|
||||
if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We float this reference so that the next Get call can
|
||||
// steal it, so we don't have to unmount
|
||||
if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -96,10 +77,6 @@ func (d *Driver) Remove(id string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Sink the float from create in case no Get() call was made
|
||||
if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil {
|
||||
return err
|
||||
}
|
||||
// This assumes the device has been properly Get/Put:ed and thus is unmounted
|
||||
if err := d.DeviceSet.DeleteDevice(id); err != nil {
|
||||
return err
|
||||
|
@ -113,30 +90,44 @@ func (d *Driver) Remove(id string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Get(id string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
if err := d.mount(id, mp); err != nil {
|
||||
|
||||
// Create the target directories if they don't exist
|
||||
if err := osMkdirAll(mp, 0755); err != nil && !osIsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(mp, "rootfs"), nil
|
||||
// Mount the device
|
||||
if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rootFs := path.Join(mp, "rootfs")
|
||||
if err := osMkdirAll(rootFs, 0755); err != nil && !osIsExist(err) {
|
||||
d.DeviceSet.UnmountDevice(id)
|
||||
return "", err
|
||||
}
|
||||
|
||||
idFile := path.Join(mp, "id")
|
||||
if _, err := osStat(idFile); err != nil && osIsNotExist(err) {
|
||||
// Create an "id" file with the container/image id in it to help reconscruct this in case
|
||||
// of later problems
|
||||
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
|
||||
d.DeviceSet.UnmountDevice(id)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return rootFs, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Put(id string) {
|
||||
if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil {
|
||||
if err := d.DeviceSet.UnmountDevice(id); err != nil {
|
||||
utils.Errorf("Warning: error unmounting device %s: %s\n", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Driver) mount(id, mountPoint string) error {
|
||||
// Create the target directories if they don't exist
|
||||
if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) {
|
||||
return err
|
||||
}
|
||||
// Mount the device
|
||||
return d.DeviceSet.MountDevice(id, mountPoint, "")
|
||||
}
|
||||
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
return d.Devices[id] != nil
|
||||
return d.DeviceSet.HasDevice(id)
|
||||
}
|
||||
|
|
|
@ -436,6 +436,12 @@ func TestDriverCreate(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
sysUnmount = func(target string, flag int) error {
|
||||
//calls["sysUnmount"] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
calls["Mounted"] = true
|
||||
if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") {
|
||||
|
@ -494,21 +500,16 @@ func TestDriverCreate(t *testing.T) {
|
|||
"?ioctl.loopctlgetfree",
|
||||
)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
calls.Assert(t,
|
||||
"DmTaskCreate",
|
||||
"DmTaskGetInfo",
|
||||
"sysMount",
|
||||
"DmTaskRun",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetName",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
)
|
||||
|
||||
}()
|
||||
|
@ -547,7 +548,6 @@ func TestDriverRemove(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
sysUnmount = func(target string, flags int) (err error) {
|
||||
calls["sysUnmount"] = true
|
||||
// FIXME: compare the exact source and target strings (inodes + devname)
|
||||
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
|
||||
|
@ -612,22 +612,17 @@ func TestDriverRemove(t *testing.T) {
|
|||
"?ioctl.loopctlgetfree",
|
||||
)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
calls.Assert(t,
|
||||
"DmTaskCreate",
|
||||
"DmTaskGetInfo",
|
||||
"sysMount",
|
||||
"DmTaskRun",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetName",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
)
|
||||
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
|
@ -650,7 +645,6 @@ func TestDriverRemove(t *testing.T) {
|
|||
"DmTaskSetTarget",
|
||||
"DmTaskSetAddNode",
|
||||
"DmUdevWait",
|
||||
"sysUnmount",
|
||||
)
|
||||
}()
|
||||
runtime.GC()
|
||||
|
@ -668,21 +662,21 @@ func TestCleanup(t *testing.T) {
|
|||
|
||||
mountPoints := make([]string, 2)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Mount the id
|
||||
p, err := d.Get("1")
|
||||
p, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mountPoints[0] = p
|
||||
|
||||
if err := d.Create("2", "1", ""); err != nil {
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p, err = d.Get("2")
|
||||
p, err = d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -731,7 +725,7 @@ func TestNotMounted(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -749,10 +743,10 @@ func TestMounted(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -769,10 +763,10 @@ func TestInitCleanedDriver(t *testing.T) {
|
|||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -787,7 +781,7 @@ func TestInitCleanedDriver(t *testing.T) {
|
|||
d = driver.(*Driver)
|
||||
defer cleanup(d)
|
||||
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -797,16 +791,16 @@ func TestMountMountedDriver(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Perform get on same id to ensure that it will
|
||||
// not be mounted twice
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -816,7 +810,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -824,7 +818,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
|
|||
t.Fatalf("Expected id 1 to be in device set")
|
||||
}
|
||||
|
||||
if _, err := d.Get("1"); err != nil {
|
||||
if _, err := d.Get("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -844,11 +838,11 @@ func TestDriverGetSize(t *testing.T) {
|
|||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
if err := d.Create("1", "", ""); err != nil {
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mountPoint, err := d.Get("1")
|
||||
mountPoint, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -13,10 +13,10 @@ type InitFunc func(root string) (Driver, error)
|
|||
type Driver interface {
|
||||
String() string
|
||||
|
||||
Create(id, parent string, mountLabel string) error
|
||||
Create(id, parent string) error
|
||||
Remove(id string) error
|
||||
|
||||
Get(id string) (dir string, err error)
|
||||
Get(id, mountLabel string) (dir string, err error)
|
||||
Put(id string)
|
||||
Exists(id string) bool
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ func copyDir(src, dst string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id string, parent string, mountLabel string) error {
|
||||
func (d *Driver) Create(id, parent string) error {
|
||||
dir := d.dir(id)
|
||||
if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
|
||||
return err
|
||||
|
@ -53,7 +53,7 @@ func (d *Driver) Create(id string, parent string, mountLabel string) error {
|
|||
if parent == "" {
|
||||
return nil
|
||||
}
|
||||
parentDir, err := d.Get(parent)
|
||||
parentDir, err := d.Get(parent, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ func (d *Driver) Remove(id string) error {
|
|||
return os.RemoveAll(d.dir(id))
|
||||
}
|
||||
|
||||
func (d *Driver) Get(id string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
dir := d.dir(id)
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -2,6 +2,11 @@ package bridge
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/networkdriver"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
|
@ -9,11 +14,8 @@ import (
|
|||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -97,8 +99,12 @@ func InitDriver(job *engine.Job) engine.Status {
|
|||
network = addr.(*net.IPNet)
|
||||
// validate that the bridge ip matches the ip specified by BridgeIP
|
||||
if bridgeIP != "" {
|
||||
if !network.IP.Equal(net.ParseIP(bridgeIP)) {
|
||||
return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bridgeIP)
|
||||
bip, _, err := net.ParseCIDR(bridgeIP)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
if !network.IP.Equal(bip) {
|
||||
return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -218,13 +224,13 @@ func setupIPTables(addr net.Addr, icc bool) error {
|
|||
// If it can't find an address which doesn't conflict, it will return an error.
|
||||
func createBridge(bridgeIP string) error {
|
||||
nameservers := []string{}
|
||||
resolvConf, _ := utils.GetResolvConf()
|
||||
resolvConf, _ := resolvconf.Get()
|
||||
// we don't check for an error here, because we don't really care
|
||||
// if we can't read /etc/resolv.conf. So instead we skip the append
|
||||
// if resolvConf is nil. It either doesn't exist, or we can't read it
|
||||
// for some reason.
|
||||
if resolvConf != nil {
|
||||
nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...)
|
||||
nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...)
|
||||
}
|
||||
|
||||
var ifaceAddr string
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
package daemon
|
||||
|
||||
import "sort"
|
||||
|
||||
type containerSorter struct {
|
||||
containers []*Container
|
||||
by func(i, j *Container) bool
|
||||
}
|
||||
|
||||
func (s *containerSorter) Len() int {
|
||||
return len(s.containers)
|
||||
}
|
||||
|
||||
func (s *containerSorter) Swap(i, j int) {
|
||||
s.containers[i], s.containers[j] = s.containers[j], s.containers[i]
|
||||
}
|
||||
|
||||
func (s *containerSorter) Less(i, j int) bool {
|
||||
return s.by(s.containers[i], s.containers[j])
|
||||
}
|
||||
|
||||
func sortContainers(containers []*Container, predicate func(i, j *Container) bool) {
|
||||
s := &containerSorter{containers, predicate}
|
||||
sort.Sort(s)
|
||||
}
|
|
@ -14,7 +14,6 @@ type State struct {
|
|||
ExitCode int
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
Ghost bool
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the state
|
||||
|
@ -23,9 +22,6 @@ func (s *State) String() string {
|
|||
defer s.RUnlock()
|
||||
|
||||
if s.Running {
|
||||
if s.Ghost {
|
||||
return fmt.Sprintf("Ghost")
|
||||
}
|
||||
return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
if s.FinishedAt.IsZero() {
|
||||
|
@ -41,13 +37,6 @@ func (s *State) IsRunning() bool {
|
|||
return s.Running
|
||||
}
|
||||
|
||||
func (s *State) IsGhost() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.Ghost
|
||||
}
|
||||
|
||||
func (s *State) GetExitCode() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
@ -55,19 +44,11 @@ func (s *State) GetExitCode() int {
|
|||
return s.ExitCode
|
||||
}
|
||||
|
||||
func (s *State) SetGhost(val bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Ghost = val
|
||||
}
|
||||
|
||||
func (s *State) SetRunning(pid int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Running = true
|
||||
s.Ghost = false
|
||||
s.ExitCode = 0
|
||||
s.Pid = pid
|
||||
s.StartedAt = time.Now().UTC()
|
||||
|
|
|
@ -2,14 +2,15 @@ package daemon
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
type BindMap struct {
|
||||
|
@ -34,14 +35,8 @@ func prepareVolumesForContainer(container *Container) error {
|
|||
}
|
||||
|
||||
func setupMountsForContainer(container *Container) error {
|
||||
envPath, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mounts := []execdriver.Mount{
|
||||
{container.daemon.sysInitPath, "/.dockerinit", false, true},
|
||||
{envPath, "/.dockerenv", false, true},
|
||||
{container.ResolvConfPath, "/etc/resolv.conf", false, true},
|
||||
}
|
||||
|
||||
|
@ -204,7 +199,7 @@ func createVolumes(container *Container) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err = volumesDriver.Get(c.ID)
|
||||
srcPath, err = volumesDriver.Get(c.ID, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
|
||||
}
|
||||
|
@ -217,15 +212,26 @@ func createVolumes(container *Container) error {
|
|||
srcPath = p
|
||||
}
|
||||
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
|
||||
// Create the mountpoint
|
||||
volPath = filepath.Join(container.basefs, volPath)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newVolPath, err := filepath.Rel(container.basefs, rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newVolPath = "/" + newVolPath
|
||||
|
||||
if volPath != newVolPath {
|
||||
delete(container.Volumes, volPath)
|
||||
delete(container.VolumesRW, volPath)
|
||||
}
|
||||
|
||||
container.Volumes[newVolPath] = srcPath
|
||||
container.VolumesRW[newVolPath] = srcRW
|
||||
|
||||
if err := createIfNotExists(rootVolPath, volIsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -246,22 +252,22 @@ func createVolumes(container *Container) error {
|
|||
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that were just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that were just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ type Config struct {
|
|||
Mtu int
|
||||
DisableNetwork bool
|
||||
EnableSelinuxSupport bool
|
||||
Context map[string][]string
|
||||
}
|
||||
|
||||
// ConfigFromJob creates and returns a new DaemonConfig object
|
||||
|
@ -46,7 +47,7 @@ func ConfigFromJob(job *engine.Job) *Config {
|
|||
InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
|
||||
GraphDriver: job.Getenv("GraphDriver"),
|
||||
ExecDriver: job.Getenv("ExecDriver"),
|
||||
EnableSelinuxSupport: false, // FIXME: hardcoded default to disable selinux for .10 release
|
||||
EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"),
|
||||
}
|
||||
if dns := job.GetenvList("Dns"); dns != nil {
|
||||
config.Dns = dns
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/dotcloud/docker/api"
|
||||
|
@ -42,11 +43,11 @@ func main() {
|
|||
flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
|
||||
flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers")
|
||||
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
|
||||
bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group")
|
||||
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
|
||||
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
|
||||
flDns = opts.NewListOpts(opts.ValidateIp4Address)
|
||||
flDnsSearch = opts.NewListOpts(opts.ValidateDomain)
|
||||
|
@ -57,16 +58,17 @@ func main() {
|
|||
flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
|
||||
flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver")
|
||||
flHosts = opts.NewListOpts(api.ValidateHost)
|
||||
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available")
|
||||
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
|
||||
flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
|
||||
flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
|
||||
flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here")
|
||||
flCert = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file")
|
||||
flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
|
||||
flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support")
|
||||
)
|
||||
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
|
||||
flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
|
||||
flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified")
|
||||
flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
|
@ -96,6 +98,10 @@ func main() {
|
|||
}
|
||||
|
||||
if *flDaemon {
|
||||
if os.Geteuid() != 0 {
|
||||
log.Fatalf("The Docker daemon needs to be run as root")
|
||||
}
|
||||
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
return
|
||||
|
@ -120,13 +126,15 @@ func main() {
|
|||
log.Fatalf("Unable to get the full path to root (%s): %s", root, err)
|
||||
}
|
||||
}
|
||||
|
||||
eng, err := engine.New(realRoot)
|
||||
if err != nil {
|
||||
if err := checkKernelAndArch(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
eng := engine.New()
|
||||
// Load builtins
|
||||
builtins.Register(eng)
|
||||
if err := builtins.Register(eng); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// load the daemon in the background so we can immediately start
|
||||
// the http api so that connections don't fail while the daemon
|
||||
// is booting
|
||||
|
@ -147,6 +155,7 @@ func main() {
|
|||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
job.Setenv("ExecDriver", *flExecDriver)
|
||||
job.SetenvInt("Mtu", *flMtu)
|
||||
job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -239,3 +248,27 @@ func main() {
|
|||
func showVersion() {
|
||||
fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
|
||||
}
|
||||
|
||||
func checkKernelAndArch() error {
|
||||
// Check for unsupported architectures
|
||||
if runtime.GOARCH != "amd64" {
|
||||
return fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
|
||||
}
|
||||
// Check for unsupported kernel versions
|
||||
// FIXME: it would be cleaner to not test for specific versions, but rather
|
||||
// test for specific functionalities.
|
||||
// Unfortunately we can't test for the feature "does not cause a kernel panic"
|
||||
// without actually causing a kernel panic, so we need this workaround until
|
||||
// the circumstances of pre-3.8 crashes are clearer.
|
||||
// For details see http://github.com/dotcloud/docker/issues/407
|
||||
if k, err := utils.GetKernelVersion(); err != nil {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
} else {
|
||||
if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
|
||||
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
|
||||
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,11 +8,6 @@ RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim
|
|||
|
||||
RUN pip install mkdocs
|
||||
|
||||
# installing sphinx for the rst->md conversion only - will be removed after May release
|
||||
# pip installs from docs/requirements.txt, but here to increase cacheability
|
||||
RUN pip install Sphinx==1.2.1
|
||||
RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
|
||||
# add MarkdownTools to get transclusion
|
||||
# (future development)
|
||||
#RUN easy_install -U setuptools
|
||||
|
@ -33,11 +28,10 @@ ADD . /docs
|
|||
ADD MAINTAINERS /docs/sources/humans.txt
|
||||
WORKDIR /docs
|
||||
|
||||
#build the sphinx html
|
||||
#RUN make -C /docs clean docs
|
||||
|
||||
#convert to markdown
|
||||
#RUN ./convert.sh
|
||||
RUN VERSION=$(cat /docs/VERSION) &&\
|
||||
GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\
|
||||
AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\
|
||||
echo "{% set docker_version = \"${VERSION}\" %}{% set docker_branch = \"${GIT_BRANCH}\" %}{% set aws_bucket = \"${AWS_S3_BUCKET}\" %}{% include \"beta_warning.html\" %}" > /docs/theme/mkdocs/version.html
|
||||
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
|
|
185
docs/Makefile
185
docs/Makefile
|
@ -1,185 +0,0 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
PYTHON = python
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) sources
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
# @echo " html to make standalone HTML files"
|
||||
# @echo " dirhtml to make HTML files named index.html in directories"
|
||||
# @echo " singlehtml to make a single large HTML file"
|
||||
# @echo " pickle to make pickle files"
|
||||
# @echo " json to make JSON files"
|
||||
# @echo " htmlhelp to make HTML files and a HTML help project"
|
||||
# @echo " qthelp to make HTML files and a qthelp project"
|
||||
# @echo " devhelp to make HTML files and a Devhelp project"
|
||||
# @echo " epub to make an epub"
|
||||
# @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
# @echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
# @echo " text to make text files"
|
||||
@echo " man to make a manual page"
|
||||
# @echo " texinfo to make Texinfo files"
|
||||
# @echo " info to make Texinfo files and run them through makeinfo"
|
||||
# @echo " gettext to make PO message catalogs"
|
||||
# @echo " changes to make an overview of all changed/added/deprecated items"
|
||||
# @echo " linkcheck to check all external links for integrity"
|
||||
# @echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " docs to build the docs and copy the static files to the outputdir"
|
||||
@echo " server to serve the docs in your browser under \`http://localhost:8000\`"
|
||||
@echo " publish to publish the app to dotcloud"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
docs:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The documentation pages are now in $(BUILDDIR)/html."
|
||||
|
||||
server: docs
|
||||
@cd $(BUILDDIR)/html; $(PYTHON) -m SimpleHTTPServer 8000
|
||||
|
||||
site:
|
||||
cp -r website $(BUILDDIR)/
|
||||
cp -r theme/docker/static/ $(BUILDDIR)/website/
|
||||
@echo
|
||||
@echo "The Website pages are in $(BUILDDIR)/site."
|
||||
|
||||
connect:
|
||||
@echo connecting dotcloud to www.docker.io website, make sure to use user 1
|
||||
@echo or create your own "dockerwebsite" app
|
||||
@cd $(BUILDDIR)/website/ ; \
|
||||
dotcloud connect dockerwebsite ; \
|
||||
dotcloud list
|
||||
|
||||
push:
|
||||
@cd $(BUILDDIR)/website/ ; \
|
||||
dotcloud push
|
||||
|
||||
$(VERSIONS):
|
||||
@echo "Hello world"
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Docker.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Docker.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/Docker"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Docker"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
149
docs/README.md
149
docs/README.md
|
@ -1,104 +1,99 @@
|
|||
Docker Documentation
|
||||
====================
|
||||
# Docker Documentation
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The source for Docker documentation is here under ``sources/`` and uses
|
||||
The source for Docker documentation is here under `sources/` and uses
|
||||
extended Markdown, as implemented by [mkdocs](http://mkdocs.org).
|
||||
|
||||
The HTML files are built and hosted on https://docs.docker.io, and update
|
||||
automatically after each change to the master or release branch of the
|
||||
[docker files on GitHub](https://github.com/dotcloud/docker) thanks to
|
||||
post-commit hooks. The "release" branch maps to the "latest"
|
||||
documentation and the "master" (unreleased development) branch maps to the "master"
|
||||
documentation.
|
||||
The HTML files are built and hosted on `https://docs.docker.io`, and
|
||||
update automatically after each change to the master or release branch
|
||||
of [Docker on GitHub](https://github.com/dotcloud/docker)
|
||||
thanks to post-commit hooks. The "docs" branch maps to the "latest"
|
||||
documentation and the "master" (unreleased development) branch maps to
|
||||
the "master" documentation.
|
||||
|
||||
## Branches
|
||||
|
||||
**There are two branches related to editing docs**: ``master`` and a
|
||||
``docs`` branch. You should always edit
|
||||
docs on a local branch of the ``master`` branch, and send a PR against ``master``.
|
||||
That way your fixes
|
||||
will automatically get included in later releases, and docs maintainers
|
||||
can easily cherry-pick your changes into the ``docs`` release branch.
|
||||
In the rare case where your change is not forward-compatible,
|
||||
you may need to base your changes on the ``docs`` branch.
|
||||
**There are two branches related to editing docs**: `master` and a
|
||||
`docs` branch. You should always edit documentation on a local branch
|
||||
of the `master` branch, and send a PR against `master`.
|
||||
|
||||
Now that we have a ``docs`` branch, we can keep the [http://docs.docker.io](http://docs.docker.io) docs
|
||||
up to date with any bugs found between ``docker`` code releases.
|
||||
That way your fixes will automatically get included in later releases,
|
||||
and docs maintainers can easily cherry-pick your changes into the
|
||||
`docs` release branch. In the rare case where your change is not
|
||||
forward-compatible, you may need to base your changes on the `docs`
|
||||
branch.
|
||||
|
||||
**Warning**: When *reading* the docs, the [http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may
|
||||
include features not yet part of any official docker
|
||||
release. The ``beta-docs`` site should be used only for understanding
|
||||
bleeding-edge development and ``docs.docker.io`` (which points to the ``docs``
|
||||
branch``) should be used for the latest official release.
|
||||
Also, now that we have a `docs` branch, we can keep the
|
||||
[http://docs.docker.io](http://docs.docker.io) docs up to date with any
|
||||
bugs found between `docker` code releases.
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
**Warning**: When *reading* the docs, the
|
||||
[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation
|
||||
may include features not yet part of any official docker release. The
|
||||
`beta-docs` site should be used only for understanding bleeding-edge
|
||||
development and `docs.docker.io` (which points to the `docs`
|
||||
branch`) should be used for the latest official release.
|
||||
|
||||
Docker documentation builds are done in a docker container, which installs all
|
||||
the required tools, adds the local ``docs/`` directory and builds the HTML
|
||||
docs. It then starts a HTTP server on port 8000 so that you can connect
|
||||
and see your changes.
|
||||
## Contributing
|
||||
|
||||
In the ``docker`` source directory, run:
|
||||
```make docs```
|
||||
- Follow the contribution guidelines ([see
|
||||
`../CONTRIBUTING.md`](../CONTRIBUTING.md)).
|
||||
- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
|
||||
If you have any issues you need to debug, you can use ``make docs-shell`` and
|
||||
then run ``mkdocs serve``
|
||||
## Getting Started
|
||||
|
||||
# Contributing
|
||||
Docker documentation builds are done in a Docker container, which
|
||||
installs all the required tools, adds the local `docs/` directory and
|
||||
builds the HTML docs. It then starts a HTTP server on port 8000 so that
|
||||
you can connect and see your changes.
|
||||
|
||||
## Normal Case:
|
||||
In the root of the `docker` source directory:
|
||||
|
||||
* Follow the contribution guidelines ([see
|
||||
``../CONTRIBUTING.md``](../CONTRIBUTING.md)).
|
||||
* [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
* Work in your own fork of the code, we accept pull requests.
|
||||
* Change the ``.md`` files with your favorite editor -- try to keep the
|
||||
lines short (80 chars) and respect Markdown conventions.
|
||||
* Run ``make clean docs`` to clean up old files and generate new ones,
|
||||
or just ``make docs`` to update after small changes.
|
||||
* Your static website can now be found in the ``_build`` directory.
|
||||
* To preview what you have generated run ``make server`` and open
|
||||
http://localhost:8000/ in your favorite browser.
|
||||
make docs
|
||||
|
||||
``make clean docs`` must complete without any warnings or errors.
|
||||
If you have any issues you need to debug, you can use `make docs-shell` and
|
||||
then run `mkdocs serve`
|
||||
|
||||
Working using GitHub's file editor
|
||||
----------------------------------
|
||||
### Examples
|
||||
|
||||
When writing examples give the user hints by making them resemble what
|
||||
they see in their shell:
|
||||
|
||||
- Indent shell examples by 4 spaces so they get rendered as code.
|
||||
- Start typed commands with `$ ` (dollar space), so that they are easily
|
||||
differentiated from program output.
|
||||
- Program output has no prefix.
|
||||
- Comments begin with `# ` (hash space).
|
||||
- In-container shell commands begin with `$$ ` (dollar dollar space).
|
||||
|
||||
### Images
|
||||
|
||||
When you need to add images, try to make them as small as possible
|
||||
(e.g. as gifs). Usually images should go in the same directory as the
|
||||
`.md` file which references them, or in a subdirectory if one already
|
||||
exists.
|
||||
|
||||
## Working using GitHub's file editor
|
||||
|
||||
Alternatively, for small changes and typos you might want to use
|
||||
GitHub's built in file editor. It allows you to preview your changes
|
||||
right online (though there can be some differences between GitHub
|
||||
Markdown and mkdocs Markdown). Just be careful not to create many commits.
|
||||
And you must still [sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
right on-line (though there can be some differences between GitHub
|
||||
Markdown and [MkDocs Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)).
|
||||
Just be careful not to create many commits. And you must still
|
||||
[sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
|
||||
Images
|
||||
------
|
||||
## Publishing Documentation
|
||||
|
||||
When you need to add images, try to make them as small as possible
|
||||
(e.g. as gif). Usually images should go in the same directory as the
|
||||
.md file which references them, or in a subdirectory if one already
|
||||
exists.
|
||||
|
||||
Publishing Documentation
|
||||
------------------------
|
||||
|
||||
To publish a copy of the documentation you need a ``docs/awsconfig``
|
||||
file containing AWS settings to deploy to. The release script will
|
||||
To publish a copy of the documentation you need a `docs/awsconfig`
|
||||
file containing AWS settings to deploy to. The release script will
|
||||
create an s3 if needed, and will then push the files to it.
|
||||
|
||||
```
|
||||
[profile dowideit-docs]
|
||||
aws_access_key_id = IHOIUAHSIDH234rwf....
|
||||
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
|
||||
region = ap-southeast-2
|
||||
```
|
||||
[profile dowideit-docs]
|
||||
aws_access_key_id = IHOIUAHSIDH234rwf....
|
||||
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
|
||||
region = ap-southeast-2
|
||||
|
||||
The ``profile`` name must be the same as the name of the bucket you are
|
||||
deploying to - which you call from the docker directory:
|
||||
The `profile` name must be the same as the name of the bucket you are
|
||||
deploying to - which you call from the `docker` directory:
|
||||
|
||||
``make AWS_S3_BUCKET=dowideit-docs docs-release``
|
||||
make AWS_S3_BUCKET=dowideit-docs docs-release
|
||||
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
index 6e072f6..5a4537d 100644
|
||||
--- a/docs/sources/examples/hello_world.md
|
||||
+++ b/docs/sources/examples/hello_world.md
|
||||
@@ -59,6 +59,9 @@ standard out.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
+
|
||||
+
|
||||
## Hello World Daemon
|
||||
|
||||
Note
|
||||
@@ -142,6 +145,8 @@ Make sure it is really stopped.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
+
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
diff --git a/docs/asciinema.patch b/docs/asciinema.patch
|
||||
index e240bf3..e69de29 100644
|
||||
--- a/docs/asciinema.patch
|
||||
+++ b/docs/asciinema.patch
|
||||
@@ -1,23 +0,0 @@
|
||||
-diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
-index 6e072f6..5a4537d 100644
|
||||
---- a/docs/sources/examples/hello_world.md
|
||||
-+++ b/docs/sources/examples/hello_world.md
|
||||
-@@ -59,6 +59,9 @@ standard out.
|
||||
-
|
||||
- See the example in action
|
||||
-
|
||||
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
-+
|
||||
-+
|
||||
- ## Hello World Daemon
|
||||
-
|
||||
- Note
|
||||
-@@ -142,6 +145,8 @@ Make sure it is really stopped.
|
||||
-
|
||||
- See the example in action
|
||||
-
|
||||
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
-+
|
||||
- The next example in the series is a [*Node.js Web
|
||||
- App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
- any of the other examples:
|
||||
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
index 6e072f6..c277f38 100644
|
||||
--- a/docs/sources/examples/hello_world.md
|
||||
+++ b/docs/sources/examples/hello_world.md
|
||||
@@ -59,6 +59,8 @@ standard out.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
+
|
||||
## Hello World Daemon
|
||||
|
||||
Note
|
||||
@@ -142,6 +144,8 @@ Make sure it is really stopped.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
+
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md
|
||||
index 2122b8d..49edbc8 100644
|
||||
--- a/docs/sources/use/workingwithrepository.md
|
||||
+++ b/docs/sources/use/workingwithrepository.md
|
||||
@@ -199,6 +199,8 @@ searchable (or indexed at all) in the Central Index, and there will be
|
||||
no user name checking performed. Your registry will function completely
|
||||
independently from the Central Index.
|
||||
|
||||
+<iframe width="640" height="360" src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0" allowfullscreen></iframe>
|
||||
+
|
||||
See also
|
||||
|
||||
[Docker Blog: How to use your own
|
|
@ -1,53 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
cd /
|
||||
|
||||
#run the sphinx build first
|
||||
make -C /docs clean docs
|
||||
|
||||
cd /docs
|
||||
|
||||
#find sources -name '*.md*' -exec rm '{}' \;
|
||||
|
||||
# convert from rst to md for mkdocs.org
|
||||
# TODO: we're using a sphinx specific rst thing to do between docs links, which we then need to convert to mkdocs specific markup (and pandoc loses it when converting to html / md)
|
||||
HTML_FILES=$(find _build -name '*.html' | sed 's/_build\/html\/\(.*\)\/index.html/\1/')
|
||||
|
||||
for name in ${HTML_FILES}
|
||||
do
|
||||
echo $name
|
||||
# lets not use gratuitious unicode quotes that cause terrible copy and paste issues
|
||||
sed -i 's/“/"/g' _build/html/${name}/index.html
|
||||
sed -i 's/”/"/g' _build/html/${name}/index.html
|
||||
pandoc -f html -t markdown --atx-headers -o sources/${name}.md1 _build/html/${name}/index.html
|
||||
|
||||
#add the meta-data from the rst
|
||||
egrep ':(title|description|keywords):' sources/${name}.rst | sed 's/^:/page_/' > sources/${name}.md
|
||||
echo >> sources/${name}.md
|
||||
#cat sources/${name}.md1 >> sources/${name}.md
|
||||
# remove the paragraph links from the source
|
||||
cat sources/${name}.md1 | sed 's/\[..\](#.*)//' >> sources/${name}.md
|
||||
|
||||
rm sources/${name}.md1
|
||||
|
||||
sed -i 's/{.docutils .literal}//g' sources/${name}.md
|
||||
sed -i 's/{.docutils$//g' sources/${name}.md
|
||||
sed -i 's/^.literal} //g' sources/${name}.md
|
||||
sed -i 's/`{.descname}`//g' sources/${name}.md
|
||||
sed -i 's/{.descname}//g' sources/${name}.md
|
||||
sed -i 's/{.xref}//g' sources/${name}.md
|
||||
sed -i 's/{.xref .doc .docutils .literal}//g' sources/${name}.md
|
||||
sed -i 's/{.xref .http .http-post .docutils$//g' sources/${name}.md
|
||||
sed -i 's/^ .literal}//g' sources/${name}.md
|
||||
|
||||
sed -i 's/\\\$container\\_id/\$container_id/' sources/examples/hello_world.md
|
||||
sed -i 's/\\\$TESTFLAGS/\$TESTFLAGS/' sources/contributing/devenvironment.md
|
||||
sed -i 's/\\\$MYVAR1/\$MYVAR1/g' sources/reference/commandline/cli.md
|
||||
|
||||
# git it all so we can test
|
||||
# git add ${name}.md
|
||||
done
|
||||
|
||||
#annoyingly, there are lots of failures
|
||||
patch --fuzz 50 -t -p2 < pr4923.patch || true
|
||||
patch --fuzz 50 -t -p2 < asciinema.patch || true
|
|
@ -1,197 +0,0 @@
|
|||
diff --git a/docs/Dockerfile b/docs/Dockerfile
|
||||
index bc2b73b..b9808b2 100644
|
||||
--- a/docs/Dockerfile
|
||||
+++ b/docs/Dockerfile
|
||||
@@ -4,14 +4,24 @@ MAINTAINER SvenDowideit@docker.com
|
||||
# docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
|
||||
#
|
||||
|
||||
-RUN apt-get update && apt-get install -yq make python-pip python-setuptools
|
||||
-
|
||||
+RUN apt-get update && apt-get install -yq make python-pip python-setuptools
|
||||
RUN pip install mkdocs
|
||||
|
||||
+RUN apt-get install -yq vim-tiny git pandoc
|
||||
+
|
||||
+# pip installs from docs/requirements.txt, but here to increase cacheability
|
||||
+RUN pip install Sphinx==1.2.1
|
||||
+RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
+
|
||||
ADD . /docs
|
||||
+
|
||||
+#build the sphinx html
|
||||
+RUN make -C /docs clean docs
|
||||
+
|
||||
WORKDIR /docs
|
||||
|
||||
-CMD ["mkdocs", "serve"]
|
||||
+#CMD ["mkdocs", "serve"]
|
||||
+CMD bash
|
||||
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html
|
||||
index 7d78fb9..0dac9e0 100755
|
||||
--- a/docs/theme/docker/layout.html
|
||||
+++ b/docs/theme/docker/layout.html
|
||||
@@ -63,48 +63,6 @@
|
||||
|
||||
<body>
|
||||
|
||||
-<div id="wrap">
|
||||
-<div class="navbar navbar-static-top navbar-inner navbar-fixed-top ">
|
||||
- <div class="navbar-dotcloud">
|
||||
- <div class="container">
|
||||
-
|
||||
- <div style="float: right" class="pull-right">
|
||||
- <ul class="nav">
|
||||
- <li id="nav-introduction"><a href="http://www.docker.io/" title="Docker Homepage">Home</a></li>
|
||||
- <li id="nav-about"><a href="http://www.docker.io/about/" title="About">About</a></li>
|
||||
- <li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
|
||||
- <li id="nav-community"><a href="http://www.docker.io/community/" title="Community">Community</a></li>
|
||||
- <li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
|
||||
- <li id="nav-blog"><a href="http://blog.docker.io/" title="Docker Blog">Blog</a></li>
|
||||
- <li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" alt="link to external site" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
|
||||
- </ul>
|
||||
- </div>
|
||||
-
|
||||
- <div class="brand-logo">
|
||||
- <a href="http://www.docker.io" title="Docker Homepage"><img src="{{ pathto('_static/img/docker-top-logo.png', 1) }}" alt="Docker logo"></a>
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
-</div>
|
||||
-
|
||||
-<div class="container-fluid">
|
||||
-
|
||||
- <!-- Docs nav
|
||||
- ================================================== -->
|
||||
- <div class="row-fluid main-row">
|
||||
-
|
||||
- <div class="sidebar bs-docs-sidebar">
|
||||
- <div class="page-title" >
|
||||
- <h4>DOCUMENTATION</h4>
|
||||
- </div>
|
||||
-
|
||||
- {{ toctree(collapse=False, maxdepth=3) }}
|
||||
- <form>
|
||||
- <input type="text" id="st-search-input" class="st-search-input span3" placeholder="search in documentation" style="width:210px;" />
|
||||
- <div id="st-results-container"></div>
|
||||
- </form>
|
||||
- </div>
|
||||
-
|
||||
<!-- body block -->
|
||||
<div class="main-content">
|
||||
|
||||
@@ -114,111 +72,7 @@
|
||||
{% block body %}{% endblock %}
|
||||
</section>
|
||||
|
||||
- <div class="pull-right"><a href="https://github.com/dotcloud/docker/blob/{{ github_tag }}/docs/sources/{{ pagename }}.rst" title="edit this article">Edit this article on GitHub</a></div>
|
||||
</div>
|
||||
- </div>
|
||||
-</div>
|
||||
-
|
||||
-<div id="push-the-footer"></div>
|
||||
-</div> <!-- end wrap for pushing footer -->
|
||||
-
|
||||
-<div id="footer">
|
||||
- <div class="footer-landscape">
|
||||
- <div class="footer-landscape-image">
|
||||
- <!-- footer -->
|
||||
- <div class="container">
|
||||
- <div class="row footer">
|
||||
- <div class="span12 tbox">
|
||||
- <div class="tbox">
|
||||
- <p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
|
||||
- <p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
|
||||
- </div>
|
||||
-
|
||||
- <div class="social links">
|
||||
- <a title="Docker on Twitter" class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
- <a title="Docker on GitHub" class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
- <a title="Docker on Reddit" class="reddit" href="http://www.reddit.com/r/Docker/">Reddit</a>
|
||||
- <a title="Docker on Google+" class="googleplus" href="https://plus.google.com/u/0/b/100381662757235514581/communities/108146856671494713993">Google+</a>
|
||||
- <a title="Docker on Facebook" class="facebook" href="https://www.facebook.com/docker.run">Facebook</a>
|
||||
- <a title="Docker on SlideShare" class="slideshare" href="http://www.slideshare.net/dotCloud">Slideshare</a>
|
||||
- <a title="Docker on Youtube" class="youtube" href="http://www.youtube.com/user/dockerrun/">Youtube</a>
|
||||
- <a title="Docker on Flickr" class="flickr" href="http://www.flickr.com/photos/99741659@N08/">Flickr</a>
|
||||
- <a title="Docker on LinkedIn" class="linkedin" href="http://www.linkedin.com/company/dotcloud">LinkedIn</a>
|
||||
- </div>
|
||||
-
|
||||
- <div class="tbox version-flyer ">
|
||||
- <div class="content">
|
||||
- <p class="version-note">Note: You are currently browsing the development documentation. The current release may work differently.</p>
|
||||
-
|
||||
- <small>Available versions:</small>
|
||||
- <ul class="inline">
|
||||
- {% for slug, url in versions %}
|
||||
- <li class="alternative"><a href="{{ url }}{%- for word in pagename.split('/') -%}
|
||||
- {%- if word != 'index' -%}
|
||||
- {%- if word != '' -%}
|
||||
- {{ word }}/
|
||||
- {%- endif -%}
|
||||
- {%- endif -%}
|
||||
- {%- endfor -%}"
|
||||
- title="Switch to {{ slug }}">{{ slug }}</a></li>
|
||||
- {% endfor %}
|
||||
- </ul>
|
||||
- </div>
|
||||
- </div>
|
||||
-
|
||||
-
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
- <!-- end of footer -->
|
||||
- </div>
|
||||
-
|
||||
-</div>
|
||||
-
|
||||
-
|
||||
-<script type="text/javascript" src="{{ pathto('_static/js/docs.js', 1) }}"></script>
|
||||
-
|
||||
-<!-- Swiftype search -->
|
||||
-
|
||||
-<script type="text/javascript">
|
||||
- var Swiftype = window.Swiftype || {};
|
||||
- (function() {
|
||||
- Swiftype.key = 'pWPnnyvwcfpcrw1o51Sz';
|
||||
- Swiftype.inputElement = '#st-search-input';
|
||||
- Swiftype.resultContainingElement = '#st-results-container';
|
||||
- Swiftype.attachElement = '#st-search-input';
|
||||
- Swiftype.renderStyle = "overlay";
|
||||
- // from https://swiftype.com/questions/how-can-i-make-more-popular-content-rank-higher
|
||||
- // Use "page" for now -- they don't subgroup by document type yet.
|
||||
- Swiftype.searchFunctionalBoosts = {"page": {"popularity": "linear"}};
|
||||
-
|
||||
- var script = document.createElement('script');
|
||||
- script.type = 'text/javascript';
|
||||
- script.async = true;
|
||||
- script.src = "//swiftype.com/embed.js";
|
||||
- var entry = document.getElementsByTagName('script')[0];
|
||||
- entry.parentNode.insertBefore(script, entry);
|
||||
- }());
|
||||
-</script>
|
||||
-
|
||||
-
|
||||
-<!-- Google analytics -->
|
||||
-<script type="text/javascript">
|
||||
-
|
||||
- var _gaq = _gaq || [];
|
||||
- _gaq.push(['_setAccount', 'UA-6096819-11']);
|
||||
- _gaq.push(['_setDomainName', 'docker.io']);
|
||||
- _gaq.push(['_setAllowLinker', true]);
|
||||
- _gaq.push(['_trackPageview']);
|
||||
-
|
||||
- (function() {
|
||||
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
|
||||
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
|
||||
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
|
||||
- })();
|
||||
-
|
||||
-</script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -82,15 +82,16 @@ pages:
|
|||
# - ['user-guide/configuration.md', 'User Guide', 'Configuration']
|
||||
# ./faq.md
|
||||
|
||||
# Docker Index docs:
|
||||
- ['index/index.md', '**HIDDEN**']
|
||||
# Docker.io docs:
|
||||
- ['docker-io/index.md', '**HIDDEN**']
|
||||
# - ['index/home.md', 'Docker Index', 'Help']
|
||||
- ['index/accounts.md', 'Docker Index', 'Accounts']
|
||||
- ['index/repos.md', 'Docker Index', 'Repositories']
|
||||
- ['index/builds.md', 'Docker Index', 'Trusted Builds']
|
||||
- ['docker-io/accounts.md', 'Docker.io', 'Accounts']
|
||||
- ['docker-io/repos.md', 'Docker.io', 'Repositories']
|
||||
- ['docker-io/builds.md', 'Docker.io', 'Trusted Builds']
|
||||
|
||||
# Reference
|
||||
- ['reference/index.md', '**HIDDEN**']
|
||||
- ['reference/commandline/index.md', '**HIDDEN**']
|
||||
- ['reference/commandline/cli.md', 'Reference', 'Command line']
|
||||
- ['reference/builder.md', 'Reference', 'Dockerfile']
|
||||
- ['reference/run.md', 'Reference', 'Run Reference']
|
||||
|
@ -99,28 +100,43 @@ pages:
|
|||
- ['articles/security.md', 'Reference', 'Security']
|
||||
- ['articles/baseimages.md', 'Reference', 'Creating a Base Image']
|
||||
- ['use/networking.md', 'Reference', 'Advanced networking']
|
||||
- ['reference/api/index_api.md', 'Reference', 'Docker Index API']
|
||||
- ['reference/api/index.md', '**HIDDEN**']
|
||||
- ['reference/api/docker-io_api.md', 'Reference', 'Docker.io API']
|
||||
- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API']
|
||||
- ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec']
|
||||
- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
|
||||
- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11']
|
||||
- ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10']
|
||||
- ['reference/api/docker_remote_api_v1.9.md', 'Reference', 'Docker Remote API v1.9']
|
||||
- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**']
|
||||
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
|
||||
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
|
||||
- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API']
|
||||
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API']
|
||||
|
||||
- ['jsearch.md', '**HIDDEN**']
|
||||
|
||||
# - ['static_files/README.md', 'static_files', 'README']
|
||||
- ['terms/index.md', '**HIDDEN**']
|
||||
- ['terms/layer.md', '**HIDDEN**']
|
||||
- ['terms/index.md', '**HIDDEN**']
|
||||
- ['terms/registry.md', '**HIDDEN**']
|
||||
- ['terms/container.md', '**HIDDEN**']
|
||||
- ['terms/repository.md', '**HIDDEN**']
|
||||
- ['terms/filesystem.md', '**HIDDEN**']
|
||||
- ['terms/image.md', '**HIDDEN**']
|
||||
|
||||
# TODO: our theme adds a dropdown even for sections that have no subsections.
|
||||
#- ['faq.md', 'FAQ']
|
||||
|
||||
# Contribute:
|
||||
- ['contributing/index.md', '**HIDDEN**']
|
||||
- ['contributing/contributing.md', 'Contribute', 'Contributing']
|
||||
- ['contributing/devenvironment.md', 'Contribute', 'Development environment']
|
||||
# - ['about/license.md', 'About', 'License']
|
||||
|
||||
- ['jsearch.md', '**HIDDEN**']
|
||||
|
||||
# - ['static_files/README.md', 'static_files', 'README']
|
||||
#- ['terms/index.md', '**HIDDEN**']
|
||||
# - ['terms/layer.md', 'terms', 'layer']
|
||||
# - ['terms/index.md', 'terms', 'Home']
|
||||
# - ['terms/registry.md', 'terms', 'registry']
|
||||
# - ['terms/container.md', 'terms', 'container']
|
||||
# - ['terms/repository.md', 'terms', 'repository']
|
||||
# - ['terms/filesystem.md', 'terms', 'filesystem']
|
||||
# - ['terms/image.md', 'terms', 'image']
|
||||
|
|
12836
docs/pr4923.patch
12836
docs/pr4923.patch
File diff suppressed because it is too large
Load diff
|
@ -1,2 +0,0 @@
|
|||
Sphinx==1.2.1
|
||||
sphinxcontrib-httpdomain==1.2.0
|
|
@ -9,7 +9,9 @@
|
|||
{ "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }
|
||||
{ "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Contents:
|
||||
|
||||
- [Docker Security](security/)
|
||||
- [Create a Base Image](baseimages/)
|
||||
- [Runtime Metrics](runmetrics/)
|
||||
- [Docker Security](security/)
|
||||
- [Create a Base Image](baseimages/)
|
||||
- [Runtime Metrics](runmetrics/)
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@ page_keywords: Examples, Usage, base image, docker, documentation, examples
|
|||
|
||||
# Create a Base Image
|
||||
|
||||
So you want to create your own [*Base
|
||||
Image*](../../terms/image/#base-image-def)? Great!
|
||||
So you want to create your own [*Base Image*](
|
||||
/terms/image/#base-image-def)? Great!
|
||||
|
||||
The specific process will depend heavily on the Linux distribution you
|
||||
want to package. We have some examples below, and you are encouraged to
|
||||
|
@ -13,9 +13,9 @@ submit pull requests to contribute new ones.
|
|||
|
||||
## Create a full image using tar
|
||||
|
||||
In general, you’ll want to start with a working machine that is running
|
||||
the distribution you’d like to package as a base image, though that is
|
||||
not required for some tools like Debian’s
|
||||
In general, you'll want to start with a working machine that is running
|
||||
the distribution you'd like to package as a base image, though that is
|
||||
not required for some tools like Debian's
|
||||
[Debootstrap](https://wiki.debian.org/Debootstrap), which you can also
|
||||
use to build Ubuntu images.
|
||||
|
||||
|
@ -33,19 +33,18 @@ It can be as simple as this to create an Ubuntu base image:
|
|||
There are more example scripts for creating base images in the Docker
|
||||
GitHub Repo:
|
||||
|
||||
- [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh)
|
||||
- CentOS / Scientific Linux CERN (SLC) [on
|
||||
Debian/Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh)
|
||||
or [on
|
||||
CentOS/RHEL/SLC/etc.](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh)
|
||||
- [Debian /
|
||||
Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh)
|
||||
- [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh)
|
||||
- CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu](
|
||||
https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh) or
|
||||
[on CentOS/RHEL/SLC/etc.](
|
||||
https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh)
|
||||
- [Debian / Ubuntu](
|
||||
https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh)
|
||||
|
||||
## Creating a simple base image using `scratch`
|
||||
|
||||
There is a special repository in the Docker registry called
|
||||
`scratch`, which was created using an empty tar
|
||||
file:
|
||||
There is a special repository in the Docker registry called `scratch`, which
|
||||
was created using an empty tar file:
|
||||
|
||||
$ tar cv --files-from /dev/null | docker import - scratch
|
||||
|
||||
|
@ -56,5 +55,5 @@ image to base your new minimal containers `FROM`:
|
|||
ADD true-asm /true
|
||||
CMD ["/true"]
|
||||
|
||||
The Dockerfile above is from extremely minimal image -
|
||||
[tianon/true](https://github.com/tianon/dockerfiles/tree/master/true).
|
||||
The Dockerfile above is from extremely minimal image - [tianon/true](
|
||||
https://github.com/tianon/dockerfiles/tree/master/true).
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
:title: Create a Base Image
|
||||
:description: How to create base images
|
||||
:keywords: Examples, Usage, base image, docker, documentation, examples
|
||||
|
||||
.. _base_image_creation:
|
||||
|
||||
Create a Base Image
|
||||
===================
|
||||
|
||||
So you want to create your own :ref:`base_image_def`? Great!
|
||||
|
||||
The specific process will depend heavily on the Linux distribution you
|
||||
want to package. We have some examples below, and you are encouraged
|
||||
to submit pull requests to contribute new ones.
|
||||
|
||||
Create a full image using tar
|
||||
.............................
|
||||
|
||||
In general, you'll want to start with a working machine that is
|
||||
running the distribution you'd like to package as a base image, though
|
||||
that is not required for some tools like Debian's `Debootstrap
|
||||
<https://wiki.debian.org/Debootstrap>`_, which you can also use to
|
||||
build Ubuntu images.
|
||||
|
||||
It can be as simple as this to create an Ubuntu base image::
|
||||
|
||||
$ sudo debootstrap raring raring > /dev/null
|
||||
$ sudo tar -C raring -c . | sudo docker import - raring
|
||||
a29c15f1bf7a
|
||||
$ sudo docker run raring cat /etc/lsb-release
|
||||
DISTRIB_ID=Ubuntu
|
||||
DISTRIB_RELEASE=13.04
|
||||
DISTRIB_CODENAME=raring
|
||||
DISTRIB_DESCRIPTION="Ubuntu 13.04"
|
||||
|
||||
There are more example scripts for creating base images in the
|
||||
Docker GitHub Repo:
|
||||
|
||||
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
|
||||
* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
|
||||
or
|
||||
`on CentOS/RHEL/SLC/etc.
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh>`_
|
||||
* `Debian / Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
|
||||
|
||||
|
||||
Creating a simple base image using ``scratch``
|
||||
..............................................
|
||||
|
||||
There is a special repository in the Docker registry called ``scratch``, which
|
||||
was created using an empty tar file::
|
||||
|
||||
$ tar cv --files-from /dev/null | docker import - scratch
|
||||
|
||||
which you can ``docker pull``. You can then use that image to base your new
|
||||
minimal containers ``FROM``::
|
||||
|
||||
FROM scratch
|
||||
ADD true-asm /true
|
||||
CMD ["/true"]
|
||||
|
||||
The Dockerfile above is from extremely minimal image -
|
||||
`tianon/true <https://github.com/tianon/dockerfiles/tree/master/true>`_.
|
|
@ -1,15 +0,0 @@
|
|||
:title: Docker articles
|
||||
:description: various articles related to Docker
|
||||
:keywords: docker, articles
|
||||
|
||||
.. _articles_list:
|
||||
|
||||
Articles
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
security
|
||||
baseimages
|
||||
runmetrics
|
|
@ -4,8 +4,8 @@ page_keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
|
|||
|
||||
# Runtime Metrics
|
||||
|
||||
Linux Containers rely on [control
|
||||
groups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt)
|
||||
Linux Containers rely on [control groups](
|
||||
https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt)
|
||||
which not only track groups of processes, but also expose metrics about
|
||||
CPU, memory, and block I/O usage. You can access those metrics and
|
||||
obtain network usage metrics as well. This is relevant for "pure" LXC
|
||||
|
@ -14,34 +14,30 @@ containers, as well as for Docker containers.
|
|||
## Control Groups
|
||||
|
||||
Control groups are exposed through a pseudo-filesystem. In recent
|
||||
distros, you should find this filesystem under
|
||||
`/sys/fs/cgroup`. Under that directory, you will see
|
||||
multiple sub-directories, called devices, freezer, blkio, etc.; each
|
||||
sub-directory actually corresponds to a different cgroup hierarchy.
|
||||
distros, you should find this filesystem under `/sys/fs/cgroup`. Under
|
||||
that directory, you will see multiple sub-directories, called devices,
|
||||
freezer, blkio, etc.; each sub-directory actually corresponds to a different
|
||||
cgroup hierarchy.
|
||||
|
||||
On older systems, the control groups might be mounted on
|
||||
`/cgroup`, without distinct hierarchies. In that
|
||||
case, instead of seeing the sub-directories, you will see a bunch of
|
||||
files in that directory, and possibly some directories corresponding to
|
||||
existing containers.
|
||||
On older systems, the control groups might be mounted on `/cgroup`, without
|
||||
distinct hierarchies. In that case, instead of seeing the sub-directories,
|
||||
you will see a bunch of files in that directory, and possibly some directories
|
||||
corresponding to existing containers.
|
||||
|
||||
To figure out where your control groups are mounted, you can run:
|
||||
|
||||
grep cgroup /proc/mounts
|
||||
$ grep cgroup /proc/mounts
|
||||
|
||||
## Enumerating Cgroups
|
||||
|
||||
You can look into `/proc/cgroups` to see the
|
||||
different control group subsystems known to the system, the hierarchy
|
||||
they belong to, and how many groups they contain.
|
||||
You can look into `/proc/cgroups` to see the different control group subsystems
|
||||
known to the system, the hierarchy they belong to, and how many groups they contain.
|
||||
|
||||
You can also look at `/proc/<pid>/cgroup` to see
|
||||
which control groups a process belongs to. The control group will be
|
||||
shown as a path relative to the root of the hierarchy mountpoint; e.g.
|
||||
`/` means “this process has not been assigned into a
|
||||
particular group”, while `/lxc/pumpkin` means that
|
||||
the process is likely to be a member of a container named
|
||||
`pumpkin`.
|
||||
You can also look at `/proc/<pid>/cgroup` to see which control groups a process
|
||||
belongs to. The control group will be shown as a path relative to the root of
|
||||
the hierarchy mountpoint; e.g. `/` means “this process has not been assigned into
|
||||
a particular group”, while `/lxc/pumpkin` means that the process is likely to be
|
||||
a member of a container named `pumpkin`.
|
||||
|
||||
## Finding the Cgroup for a Given Container
|
||||
|
||||
|
@ -53,12 +49,11 @@ of the LXC tools, the cgroup will be `lxc/<container_name>.`
|
|||
For Docker containers using cgroups, the container name will be the full
|
||||
ID or long ID of the container. If a container shows up as ae836c95b4c3
|
||||
in `docker ps`, its long ID might be something like
|
||||
`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can look it up with `docker inspect`
|
||||
or `docker ps -notrunc`.
|
||||
`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can
|
||||
look it up with `docker inspect` or `docker ps -notrunc`.
|
||||
|
||||
Putting everything together to look at the memory metrics for a Docker
|
||||
container, take a look at
|
||||
`/sys/fs/cgroup/memory/lxc/<longid>/`.
|
||||
container, take a look at `/sys/fs/cgroup/memory/lxc/<longid>/`.
|
||||
|
||||
## Metrics from Cgroups: Memory, CPU, Block IO
|
||||
|
||||
|
@ -106,10 +101,9 @@ Here is what it will look like:
|
|||
total_active_file 4489052160
|
||||
total_unevictable 32768
|
||||
|
||||
The first half (without the `total_` prefix)
|
||||
contains statistics relevant to the processes within the cgroup,
|
||||
excluding sub-cgroups. The second half (with the `total_`
|
||||
prefix) includes sub-cgroups as well.
|
||||
The first half (without the `total_` prefix) contains statistics relevant
|
||||
to the processes within the cgroup, excluding sub-cgroups. The second half
|
||||
(with the `total_` prefix) includes sub-cgroups as well.
|
||||
|
||||
Some metrics are "gauges", i.e. values that can increase or decrease
|
||||
(e.g. swap, the amount of swap space used by the members of the cgroup).
|
||||
|
@ -118,95 +112,104 @@ they represent occurrences of a specific event (e.g. pgfault, which
|
|||
indicates the number of page faults which happened since the creation of
|
||||
the cgroup; this number can never decrease).
|
||||
|
||||
cache
|
||||
: the amount of memory used by the processes of this control group
|
||||
that can be associated precisely with a block on a block device.
|
||||
When you read from and write to files on disk, this amount will
|
||||
increase. This will be the case if you use "conventional" I/O
|
||||
(`open`, `read`,
|
||||
`write` syscalls) as well as mapped files (with
|
||||
`mmap`). It also accounts for the memory used by
|
||||
`tmpfs` mounts, though the reasons are unclear.
|
||||
rss
|
||||
: the amount of memory that *doesn’t* correspond to anything on disk:
|
||||
stacks, heaps, and anonymous memory maps.
|
||||
mapped\_file
|
||||
: indicates the amount of memory mapped by the processes in the
|
||||
control group. It doesn’t give you information about *how much*
|
||||
memory is used; it rather tells you *how* it is used.
|
||||
pgfault and pgmajfault
|
||||
: indicate the number of times that a process of the cgroup triggered
|
||||
a "page fault" and a "major fault", respectively. A page fault
|
||||
happens when a process accesses a part of its virtual memory space
|
||||
which is nonexistent or protected. The former can happen if the
|
||||
process is buggy and tries to access an invalid address (it will
|
||||
then be sent a `SIGSEGV` signal, typically
|
||||
killing it with the famous `Segmentation fault`
|
||||
message). The latter can happen when the process reads from a memory
|
||||
zone which has been swapped out, or which corresponds to a mapped
|
||||
file: in that case, the kernel will load the page from disk, and let
|
||||
the CPU complete the memory access. It can also happen when the
|
||||
process writes to a copy-on-write memory zone: likewise, the kernel
|
||||
will preempt the process, duplicate the memory page, and resume the
|
||||
write operation on the process’ own copy of the page. "Major" faults
|
||||
happen when the kernel actually has to read the data from disk. When
|
||||
it just has to duplicate an existing page, or allocate an empty
|
||||
page, it’s a regular (or "minor") fault.
|
||||
swap
|
||||
: the amount of swap currently used by the processes in this cgroup.
|
||||
active\_anon and inactive\_anon
|
||||
: the amount of *anonymous* memory that has been identified has
|
||||
respectively *active* and *inactive* by the kernel. "Anonymous"
|
||||
memory is the memory that is *not* linked to disk pages. In other
|
||||
words, that’s the equivalent of the rss counter described above. In
|
||||
fact, the very definition of the rss counter is **active\_anon** +
|
||||
**inactive\_anon** - **tmpfs** (where tmpfs is the amount of memory
|
||||
used up by `tmpfs` filesystems mounted by this
|
||||
control group). Now, what’s the difference between "active" and
|
||||
"inactive"? Pages are initially "active"; and at regular intervals,
|
||||
the kernel sweeps over the memory, and tags some pages as
|
||||
"inactive". Whenever they are accessed again, they are immediately
|
||||
retagged "active". When the kernel is almost out of memory, and time
|
||||
comes to swap out to disk, the kernel will swap "inactive" pages.
|
||||
active\_file and inactive\_file
|
||||
: cache memory, with *active* and *inactive* similar to the *anon*
|
||||
memory above. The exact formula is cache = **active\_file** +
|
||||
**inactive\_file** + **tmpfs**. The exact rules used by the kernel
|
||||
to move memory pages between active and inactive sets are different
|
||||
from the ones used for anonymous memory, but the general principle
|
||||
is the same. Note that when the kernel needs to reclaim memory, it
|
||||
is cheaper to reclaim a clean (=non modified) page from this pool,
|
||||
since it can be reclaimed immediately (while anonymous pages and
|
||||
dirty/modified pages have to be written to disk first).
|
||||
unevictable
|
||||
: the amount of memory that cannot be reclaimed; generally, it will
|
||||
account for memory that has been "locked" with `mlock`
|
||||
. It is often used by crypto frameworks to make sure that
|
||||
secret keys and other sensitive material never gets swapped out to
|
||||
disk.
|
||||
memory and memsw limits
|
||||
: These are not really metrics, but a reminder of the limits applied
|
||||
to this cgroup. The first one indicates the maximum amount of
|
||||
physical memory that can be used by the processes of this control
|
||||
group; the second one indicates the maximum amount of RAM+swap.
|
||||
|
||||
- **cache:**
|
||||
the amount of memory used by the processes of this control group
|
||||
that can be associated precisely with a block on a block device.
|
||||
When you read from and write to files on disk, this amount will
|
||||
increase. This will be the case if you use "conventional" I/O
|
||||
(`open`, `read`,
|
||||
`write` syscalls) as well as mapped files (with
|
||||
`mmap`). It also accounts for the memory used by
|
||||
`tmpfs` mounts, though the reasons are unclear.
|
||||
|
||||
- **rss:**
|
||||
the amount of memory that *doesn't* correspond to anything on disk:
|
||||
stacks, heaps, and anonymous memory maps.
|
||||
|
||||
- **mapped_file:**
|
||||
indicates the amount of memory mapped by the processes in the
|
||||
control group. It doesn't give you information about *how much*
|
||||
memory is used; it rather tells you *how* it is used.
|
||||
|
||||
- **pgfault and pgmajfault:**
|
||||
indicate the number of times that a process of the cgroup triggered
|
||||
a "page fault" and a "major fault", respectively. A page fault
|
||||
happens when a process accesses a part of its virtual memory space
|
||||
which is nonexistent or protected. The former can happen if the
|
||||
process is buggy and tries to access an invalid address (it will
|
||||
then be sent a `SIGSEGV` signal, typically
|
||||
killing it with the famous `Segmentation fault`
|
||||
message). The latter can happen when the process reads from a memory
|
||||
zone which has been swapped out, or which corresponds to a mapped
|
||||
file: in that case, the kernel will load the page from disk, and let
|
||||
the CPU complete the memory access. It can also happen when the
|
||||
process writes to a copy-on-write memory zone: likewise, the kernel
|
||||
will preempt the process, duplicate the memory page, and resume the
|
||||
write operation on the process` own copy of the page. "Major" faults
|
||||
happen when the kernel actually has to read the data from disk. When
|
||||
it just has to duplicate an existing page, or allocate an empty
|
||||
page, it's a regular (or "minor") fault.
|
||||
|
||||
- **swap:**
|
||||
the amount of swap currently used by the processes in this cgroup.
|
||||
|
||||
- **active_anon and inactive_anon:**
|
||||
the amount of *anonymous* memory that has been identified has
|
||||
respectively *active* and *inactive* by the kernel. "Anonymous"
|
||||
memory is the memory that is *not* linked to disk pages. In other
|
||||
words, that's the equivalent of the rss counter described above. In
|
||||
fact, the very definition of the rss counter is **active_anon** +
|
||||
**inactive_anon** - **tmpfs** (where tmpfs is the amount of memory
|
||||
used up by `tmpfs` filesystems mounted by this
|
||||
control group). Now, what's the difference between "active" and
|
||||
"inactive"? Pages are initially "active"; and at regular intervals,
|
||||
the kernel sweeps over the memory, and tags some pages as
|
||||
"inactive". Whenever they are accessed again, they are immediately
|
||||
retagged "active". When the kernel is almost out of memory, and time
|
||||
comes to swap out to disk, the kernel will swap "inactive" pages.
|
||||
|
||||
- **active_file and inactive_file:**
|
||||
cache memory, with *active* and *inactive* similar to the *anon*
|
||||
memory above. The exact formula is cache = **active_file** +
|
||||
**inactive_file** + **tmpfs**. The exact rules used by the kernel
|
||||
to move memory pages between active and inactive sets are different
|
||||
from the ones used for anonymous memory, but the general principle
|
||||
is the same. Note that when the kernel needs to reclaim memory, it
|
||||
is cheaper to reclaim a clean (=non modified) page from this pool,
|
||||
since it can be reclaimed immediately (while anonymous pages and
|
||||
dirty/modified pages have to be written to disk first).
|
||||
|
||||
- **unevictable:**
|
||||
the amount of memory that cannot be reclaimed; generally, it will
|
||||
account for memory that has been "locked" with `mlock`.
|
||||
It is often used by crypto frameworks to make sure that
|
||||
secret keys and other sensitive material never gets swapped out to
|
||||
disk.
|
||||
|
||||
- **memory and memsw limits:**
|
||||
These are not really metrics, but a reminder of the limits applied
|
||||
to this cgroup. The first one indicates the maximum amount of
|
||||
physical memory that can be used by the processes of this control
|
||||
group; the second one indicates the maximum amount of RAM+swap.
|
||||
|
||||
Accounting for memory in the page cache is very complex. If two
|
||||
processes in different control groups both read the same file
|
||||
(ultimately relying on the same blocks on disk), the corresponding
|
||||
memory charge will be split between the control groups. It’s nice, but
|
||||
memory charge will be split between the control groups. It's nice, but
|
||||
it also means that when a cgroup is terminated, it could increase the
|
||||
memory usage of another cgroup, because they are not splitting the cost
|
||||
anymore for those memory pages.
|
||||
|
||||
### CPU metrics: `cpuacct.stat`
|
||||
|
||||
Now that we’ve covered memory metrics, everything else will look very
|
||||
Now that we've covered memory metrics, everything else will look very
|
||||
simple in comparison. CPU metrics will be found in the
|
||||
`cpuacct` controller.
|
||||
|
||||
For each container, you will find a pseudo-file `cpuacct.stat`,
|
||||
containing the CPU usage accumulated by the processes of the container,
|
||||
broken down between `user` and `system` time. If you’re not familiar
|
||||
broken down between `user` and `system` time. If you're not familiar
|
||||
with the distinction, `user` is the time during which the processes were
|
||||
in direct control of the CPU (i.e. executing process code), and `system`
|
||||
is the time during which the CPU was executing system calls on behalf of
|
||||
|
@ -217,43 +220,47 @@ they are expressed in "user jiffies". There are `USER_HZ`
|
|||
*"jiffies"* per second, and on x86 systems,
|
||||
`USER_HZ` is 100. This used to map exactly to the
|
||||
number of scheduler "ticks" per second; but with the advent of higher
|
||||
frequency scheduling, as well as [tickless
|
||||
kernels](http://lwn.net/Articles/549580/), the number of kernel ticks
|
||||
wasn’t relevant anymore. It stuck around anyway, mainly for legacy and
|
||||
frequency scheduling, as well as [tickless kernels](
|
||||
http://lwn.net/Articles/549580/), the number of kernel ticks
|
||||
wasn't relevant anymore. It stuck around anyway, mainly for legacy and
|
||||
compatibility reasons.
|
||||
|
||||
### Block I/O metrics
|
||||
|
||||
Block I/O is accounted in the `blkio` controller.
|
||||
Different metrics are scattered across different files. While you can
|
||||
find in-depth details in the
|
||||
[blkio-controller](https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt)
|
||||
find in-depth details in the [blkio-controller](
|
||||
https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt)
|
||||
file in the kernel documentation, here is a short list of the most
|
||||
relevant ones:
|
||||
|
||||
blkio.sectors
|
||||
: contain the number of 512-bytes sectors read and written by the
|
||||
processes member of the cgroup, device by device. Reads and writes
|
||||
are merged in a single counter.
|
||||
blkio.io\_service\_bytes
|
||||
: indicates the number of bytes read and written by the cgroup. It has
|
||||
4 counters per device, because for each device, it differentiates
|
||||
between synchronous vs. asynchronous I/O, and reads vs. writes.
|
||||
blkio.io\_serviced
|
||||
: the number of I/O operations performed, regardless of their size. It
|
||||
also has 4 counters per device.
|
||||
blkio.io\_queued
|
||||
: indicates the number of I/O operations currently queued for this
|
||||
cgroup. In other words, if the cgroup isn’t doing any I/O, this will
|
||||
be zero. Note that the opposite is not true. In other words, if
|
||||
there is no I/O queued, it does not mean that the cgroup is idle
|
||||
(I/O-wise). It could be doing purely synchronous reads on an
|
||||
otherwise quiescent device, which is therefore able to handle them
|
||||
immediately, without queuing. Also, while it is helpful to figure
|
||||
out which cgroup is putting stress on the I/O subsystem, keep in
|
||||
mind that is is a relative quantity. Even if a process group does
|
||||
not perform more I/O, its queue size can increase just because the
|
||||
device load increases because of other devices.
|
||||
|
||||
- **blkio.sectors:**
|
||||
contain the number of 512-bytes sectors read and written by the
|
||||
processes member of the cgroup, device by device. Reads and writes
|
||||
are merged in a single counter.
|
||||
|
||||
- **blkio.io_service_bytes:**
|
||||
indicates the number of bytes read and written by the cgroup. It has
|
||||
4 counters per device, because for each device, it differentiates
|
||||
between synchronous vs. asynchronous I/O, and reads vs. writes.
|
||||
|
||||
- **blkio.io_serviced:**
|
||||
the number of I/O operations performed, regardless of their size. It
|
||||
also has 4 counters per device.
|
||||
|
||||
- **blkio.io_queued:**
|
||||
indicates the number of I/O operations currently queued for this
|
||||
cgroup. In other words, if the cgroup isn't doing any I/O, this will
|
||||
be zero. Note that the opposite is not true. In other words, if
|
||||
there is no I/O queued, it does not mean that the cgroup is idle
|
||||
(I/O-wise). It could be doing purely synchronous reads on an
|
||||
otherwise quiescent device, which is therefore able to handle them
|
||||
immediately, without queuing. Also, while it is helpful to figure
|
||||
out which cgroup is putting stress on the I/O subsystem, keep in
|
||||
mind that is is a relative quantity. Even if a process group does
|
||||
not perform more I/O, its queue size can increase just because the
|
||||
device load increases because of other devices.
|
||||
|
||||
## Network Metrics
|
||||
|
||||
|
@ -261,9 +268,9 @@ Network metrics are not exposed directly by control groups. There is a
|
|||
good explanation for that: network interfaces exist within the context
|
||||
of *network namespaces*. The kernel could probably accumulate metrics
|
||||
about packets and bytes sent and received by a group of processes, but
|
||||
those metrics wouldn’t be very useful. You want per-interface metrics
|
||||
those metrics wouldn't be very useful. You want per-interface metrics
|
||||
(because traffic happening on the local `lo`
|
||||
interface doesn’t really count). But since processes in a single cgroup
|
||||
interface doesn't really count). But since processes in a single cgroup
|
||||
can belong to multiple network namespaces, those metrics would be harder
|
||||
to interpret: multiple network namespaces means multiple `lo`
|
||||
interfaces, potentially multiple `eth0`
|
||||
|
@ -280,7 +287,7 @@ an interface) can do some serious accounting.
|
|||
For instance, you can setup a rule to account for the outbound HTTP
|
||||
traffic on a web server:
|
||||
|
||||
iptables -I OUTPUT -p tcp --sport 80
|
||||
$ iptables -I OUTPUT -p tcp --sport 80
|
||||
|
||||
There is no `-j` or `-g` flag,
|
||||
so the rule will just count matched packets and go to the following
|
||||
|
@ -288,7 +295,7 @@ rule.
|
|||
|
||||
Later, you can check the values of the counters, with:
|
||||
|
||||
iptables -nxvL OUTPUT
|
||||
$ iptables -nxvL OUTPUT
|
||||
|
||||
Technically, `-n` is not required, but it will
|
||||
prevent iptables from doing DNS reverse lookups, which are probably
|
||||
|
@ -324,17 +331,17 @@ The `ip-netns exec` command will let you execute any
|
|||
program (present in the host system) within any network namespace
|
||||
visible to the current process. This means that your host will be able
|
||||
to enter the network namespace of your containers, but your containers
|
||||
won’t be able to access the host, nor their sibling containers.
|
||||
won't be able to access the host, nor their sibling containers.
|
||||
Containers will be able to “see” and affect their sub-containers,
|
||||
though.
|
||||
|
||||
The exact format of the command is:
|
||||
|
||||
ip netns exec <nsname> <command...>
|
||||
$ ip netns exec <nsname> <command...>
|
||||
|
||||
For example:
|
||||
|
||||
ip netns exec mycontainer netstat -i
|
||||
$ ip netns exec mycontainer netstat -i
|
||||
|
||||
`ip netns` finds the "mycontainer" container by
|
||||
using namespaces pseudo-files. Each process belongs to one network
|
||||
|
@ -351,11 +358,9 @@ those pseudo-files. (Symlinks are accepted.)
|
|||
In other words, to execute a command within the network namespace of a
|
||||
container, we need to:
|
||||
|
||||
- Find out the PID of any process within the container that we want to
|
||||
investigate;
|
||||
- Create a symlink from `/var/run/netns/<somename>`
|
||||
to `/proc/<thepid>/ns/net`
|
||||
- Execute `ip netns exec <somename> ....`
|
||||
- Find out the PID of any process within the container that we want to investigate;
|
||||
- Create a symlink from `/var/run/netns/<somename>` to `/proc/<thepid>/ns/net`
|
||||
- Execute `ip netns exec <somename> ....`
|
||||
|
||||
Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find
|
||||
the cgroup of a pprocess running in the container of which you want to
|
||||
|
@ -364,14 +369,13 @@ measure network usage. From there, you can examine the pseudo-file named
|
|||
control group (i.e. in the container). Pick any one of them.
|
||||
|
||||
Putting everything together, if the "short ID" of a container is held in
|
||||
the environment variable `$CID`, then you can do
|
||||
this:
|
||||
the environment variable `$CID`, then you can do this:
|
||||
|
||||
TASKS=/sys/fs/cgroup/devices/$CID*/tasks
|
||||
PID=$(head -n 1 $TASKS)
|
||||
mkdir -p /var/run/netns
|
||||
ln -sf /proc/$PID/ns/net /var/run/netns/$CID
|
||||
ip netns exec $CID netstat -i
|
||||
$ TASKS=/sys/fs/cgroup/devices/$CID*/tasks
|
||||
$ PID=$(head -n 1 $TASKS)
|
||||
$ mkdir -p /var/run/netns
|
||||
$ ln -sf /proc/$PID/ns/net /var/run/netns/$CID
|
||||
$ ip netns exec $CID netstat -i
|
||||
|
||||
## Tips for high-performance metric collection
|
||||
|
||||
|
@ -386,7 +390,7 @@ write your metric collector in C (or any language that lets you do
|
|||
low-level system calls). You need to use a special system call,
|
||||
`setns()`, which lets the current process enter any
|
||||
arbitrary namespace. It requires, however, an open file descriptor to
|
||||
the namespace pseudo-file (remember: that’s the pseudo-file in
|
||||
the namespace pseudo-file (remember: that's the pseudo-file in
|
||||
`/proc/<pid>/ns/net`).
|
||||
|
||||
However, there is a catch: you must not keep this file descriptor open.
|
||||
|
@ -409,26 +413,26 @@ carefully cleans up after itself, but it is still possible. It is
|
|||
usually easier to collect metrics at regular intervals (e.g. every
|
||||
minute, with the collectd LXC plugin) and rely on that instead.
|
||||
|
||||
But, if you’d still like to gather the stats when a container stops,
|
||||
But, if you'd still like to gather the stats when a container stops,
|
||||
here is how:
|
||||
|
||||
For each container, start a collection process, and move it to the
|
||||
control groups that you want to monitor by writing its PID to the tasks
|
||||
file of the cgroup. The collection process should periodically re-read
|
||||
the tasks file to check if it’s the last process of the control group.
|
||||
the tasks file to check if it's the last process of the control group.
|
||||
(If you also want to collect network statistics as explained in the
|
||||
previous section, you should also move the process to the appropriate
|
||||
network namespace.)
|
||||
|
||||
When the container exits, `lxc-start` will try to
|
||||
delete the control groups. It will fail, since the control group is
|
||||
still in use; but that’s fine. You process should now detect that it is
|
||||
still in use; but that's fine. You process should now detect that it is
|
||||
the only one remaining in the group. Now is the right time to collect
|
||||
all the metrics you need!
|
||||
|
||||
Finally, your process should move itself back to the root control group,
|
||||
and remove the container control group. To remove a control group, just
|
||||
`rmdir` its directory. It’s counter-intuitive to
|
||||
`rmdir` its directory. It's counter-intuitive to
|
||||
`rmdir` a directory as it still contains files; but
|
||||
remember that this is a pseudo-filesystem, so usual rules don’t apply.
|
||||
remember that this is a pseudo-filesystem, so usual rules don't apply.
|
||||
After the cleanup is done, the collection process can exit safely.
|
||||
|
|
|
@ -1,463 +0,0 @@
|
|||
:title: Runtime Metrics
|
||||
:description: Measure the behavior of running containers
|
||||
:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
|
||||
|
||||
.. _run_metrics:
|
||||
|
||||
|
||||
Runtime Metrics
|
||||
===============
|
||||
|
||||
Linux Containers rely on `control groups
|
||||
<https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt>`_ which
|
||||
not only track groups of processes, but also expose metrics about CPU,
|
||||
memory, and block I/O usage. You can access those metrics and obtain
|
||||
network usage metrics as well. This is relevant for "pure" LXC
|
||||
containers, as well as for Docker containers.
|
||||
|
||||
Control Groups
|
||||
--------------
|
||||
|
||||
Control groups are exposed through a pseudo-filesystem. In recent
|
||||
distros, you should find this filesystem under
|
||||
``/sys/fs/cgroup``. Under that directory, you will see multiple
|
||||
sub-directories, called devices, freezer, blkio, etc.; each
|
||||
sub-directory actually corresponds to a different cgroup hierarchy.
|
||||
|
||||
On older systems, the control groups might be mounted on ``/cgroup``,
|
||||
without distinct hierarchies. In that case, instead of seeing the
|
||||
sub-directories, you will see a bunch of files in that directory, and
|
||||
possibly some directories corresponding to existing containers.
|
||||
|
||||
To figure out where your control groups are mounted, you can run:
|
||||
|
||||
::
|
||||
|
||||
grep cgroup /proc/mounts
|
||||
|
||||
.. _run_findpid:
|
||||
|
||||
Enumerating Cgroups
|
||||
-------------------
|
||||
|
||||
You can look into ``/proc/cgroups`` to see the different control group
|
||||
subsystems known to the system, the hierarchy they belong to, and how
|
||||
many groups they contain.
|
||||
|
||||
You can also look at ``/proc/<pid>/cgroup`` to see which control
|
||||
groups a process belongs to. The control group will be shown as a path
|
||||
relative to the root of the hierarchy mountpoint; e.g. ``/`` means
|
||||
“this process has not been assigned into a particular group”, while
|
||||
``/lxc/pumpkin`` means that the process is likely to be a member of a
|
||||
container named ``pumpkin``.
|
||||
|
||||
Finding the Cgroup for a Given Container
|
||||
----------------------------------------
|
||||
|
||||
For each container, one cgroup will be created in each hierarchy. On
|
||||
older systems with older versions of the LXC userland tools, the name
|
||||
of the cgroup will be the name of the container. With more recent
|
||||
versions of the LXC tools, the cgroup will be ``lxc/<container_name>.``
|
||||
|
||||
For Docker containers using cgroups, the container name will be the
|
||||
full ID or long ID of the container. If a container shows up as
|
||||
ae836c95b4c3 in ``docker ps``, its long ID might be something like
|
||||
``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
|
||||
can look it up with ``docker inspect`` or ``docker ps --no-trunc``.
|
||||
|
||||
Putting everything together to look at the memory metrics for a Docker
|
||||
container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.
|
||||
|
||||
Metrics from Cgroups: Memory, CPU, Block IO
|
||||
-------------------------------------------
|
||||
|
||||
For each subsystem (memory, CPU, and block I/O), you will find one or
|
||||
more pseudo-files containing statistics.
|
||||
|
||||
Memory Metrics: ``memory.stat``
|
||||
...............................
|
||||
|
||||
Memory metrics are found in the "memory" cgroup. Note that the memory
|
||||
control group adds a little overhead, because it does very
|
||||
fine-grained accounting of the memory usage on your host. Therefore,
|
||||
many distros chose to not enable it by default. Generally, to enable
|
||||
it, all you have to do is to add some kernel command-line parameters:
|
||||
``cgroup_enable=memory swapaccount=1``.
|
||||
|
||||
The metrics are in the pseudo-file ``memory.stat``. Here is what it
|
||||
will look like:
|
||||
|
||||
::
|
||||
|
||||
cache 11492564992
|
||||
rss 1930993664
|
||||
mapped_file 306728960
|
||||
pgpgin 406632648
|
||||
pgpgout 403355412
|
||||
swap 0
|
||||
pgfault 728281223
|
||||
pgmajfault 1724
|
||||
inactive_anon 46608384
|
||||
active_anon 1884520448
|
||||
inactive_file 7003344896
|
||||
active_file 4489052160
|
||||
unevictable 32768
|
||||
hierarchical_memory_limit 9223372036854775807
|
||||
hierarchical_memsw_limit 9223372036854775807
|
||||
total_cache 11492564992
|
||||
total_rss 1930993664
|
||||
total_mapped_file 306728960
|
||||
total_pgpgin 406632648
|
||||
total_pgpgout 403355412
|
||||
total_swap 0
|
||||
total_pgfault 728281223
|
||||
total_pgmajfault 1724
|
||||
total_inactive_anon 46608384
|
||||
total_active_anon 1884520448
|
||||
total_inactive_file 7003344896
|
||||
total_active_file 4489052160
|
||||
total_unevictable 32768
|
||||
|
||||
The first half (without the ``total_`` prefix) contains statistics
|
||||
relevant to the processes within the cgroup, excluding
|
||||
sub-cgroups. The second half (with the ``total_`` prefix) includes
|
||||
sub-cgroups as well.
|
||||
|
||||
Some metrics are "gauges", i.e. values that can increase or decrease
|
||||
(e.g. swap, the amount of swap space used by the members of the
|
||||
cgroup). Some others are "counters", i.e. values that can only go up,
|
||||
because they represent occurrences of a specific event (e.g. pgfault,
|
||||
which indicates the number of page faults which happened since the
|
||||
creation of the cgroup; this number can never decrease).
|
||||
|
||||
cache
|
||||
the amount of memory used by the processes of this control group
|
||||
that can be associated precisely with a block on a block
|
||||
device. When you read from and write to files on disk, this amount
|
||||
will increase. This will be the case if you use "conventional" I/O
|
||||
(``open``, ``read``, ``write`` syscalls) as well as mapped files
|
||||
(with ``mmap``). It also accounts for the memory used by ``tmpfs``
|
||||
mounts, though the reasons are unclear.
|
||||
|
||||
rss
|
||||
the amount of memory that *doesn't* correspond to anything on
|
||||
disk: stacks, heaps, and anonymous memory maps.
|
||||
|
||||
mapped_file
|
||||
indicates the amount of memory mapped by the processes in the
|
||||
control group. It doesn't give you information about *how much*
|
||||
memory is used; it rather tells you *how* it is used.
|
||||
|
||||
pgfault and pgmajfault
|
||||
indicate the number of times that a process of the cgroup triggered
|
||||
a "page fault" and a "major fault", respectively. A page fault
|
||||
happens when a process accesses a part of its virtual memory space
|
||||
which is nonexistent or protected. The former can happen if the
|
||||
process is buggy and tries to access an invalid address (it will
|
||||
then be sent a ``SIGSEGV`` signal, typically killing it with the
|
||||
famous ``Segmentation fault`` message). The latter can happen when
|
||||
the process reads from a memory zone which has been swapped out, or
|
||||
which corresponds to a mapped file: in that case, the kernel will
|
||||
load the page from disk, and let the CPU complete the memory
|
||||
access. It can also happen when the process writes to a
|
||||
copy-on-write memory zone: likewise, the kernel will preempt the
|
||||
process, duplicate the memory page, and resume the write operation
|
||||
on the process' own copy of the page. "Major" faults happen when the
|
||||
kernel actually has to read the data from disk. When it just has to
|
||||
duplicate an existing page, or allocate an empty page, it's a
|
||||
regular (or "minor") fault.
|
||||
|
||||
swap
|
||||
the amount of swap currently used by the processes in this cgroup.
|
||||
|
||||
active_anon and inactive_anon
|
||||
the amount of *anonymous* memory that has been identified has
|
||||
respectively *active* and *inactive* by the kernel. "Anonymous"
|
||||
memory is the memory that is *not* linked to disk pages. In other
|
||||
words, that's the equivalent of the rss counter described above. In
|
||||
fact, the very definition of the rss counter is **active_anon** +
|
||||
**inactive_anon** - **tmpfs** (where tmpfs is the amount of memory
|
||||
used up by ``tmpfs`` filesystems mounted by this control
|
||||
group). Now, what's the difference between "active" and "inactive"?
|
||||
Pages are initially "active"; and at regular intervals, the kernel
|
||||
sweeps over the memory, and tags some pages as "inactive". Whenever
|
||||
they are accessed again, they are immediately retagged
|
||||
"active". When the kernel is almost out of memory, and time comes to
|
||||
swap out to disk, the kernel will swap "inactive" pages.
|
||||
|
||||
active_file and inactive_file
|
||||
cache memory, with *active* and *inactive* similar to the *anon*
|
||||
memory above. The exact formula is cache = **active_file** +
|
||||
**inactive_file** + **tmpfs**. The exact rules used by the kernel to
|
||||
move memory pages between active and inactive sets are different
|
||||
from the ones used for anonymous memory, but the general principle
|
||||
is the same. Note that when the kernel needs to reclaim memory, it
|
||||
is cheaper to reclaim a clean (=non modified) page from this pool,
|
||||
since it can be reclaimed immediately (while anonymous pages and
|
||||
dirty/modified pages have to be written to disk first).
|
||||
|
||||
unevictable
|
||||
the amount of memory that cannot be reclaimed; generally, it will
|
||||
account for memory that has been "locked" with ``mlock``. It is
|
||||
often used by crypto frameworks to make sure that secret keys and
|
||||
other sensitive material never gets swapped out to disk.
|
||||
|
||||
memory and memsw limits
|
||||
These are not really metrics, but a reminder of the limits applied
|
||||
to this cgroup. The first one indicates the maximum amount of
|
||||
physical memory that can be used by the processes of this control
|
||||
group; the second one indicates the maximum amount of RAM+swap.
|
||||
|
||||
Accounting for memory in the page cache is very complex. If two
|
||||
processes in different control groups both read the same file
|
||||
(ultimately relying on the same blocks on disk), the corresponding
|
||||
memory charge will be split between the control groups. It's nice, but
|
||||
it also means that when a cgroup is terminated, it could increase the
|
||||
memory usage of another cgroup, because they are not splitting the
|
||||
cost anymore for those memory pages.
|
||||
|
||||
CPU metrics: ``cpuacct.stat``
|
||||
.............................
|
||||
|
||||
Now that we've covered memory metrics, everything else will look very
|
||||
simple in comparison. CPU metrics will be found in the ``cpuacct``
|
||||
controller.
|
||||
|
||||
For each container, you will find a pseudo-file ``cpuacct.stat``,
|
||||
containing the CPU usage accumulated by the processes of the
|
||||
container, broken down between ``user`` and ``system`` time. If you're
|
||||
not familiar with the distinction, ``user`` is the time during which
|
||||
the processes were in direct control of the CPU (i.e. executing
|
||||
process code), and ``system`` is the time during which the CPU was
|
||||
executing system calls on behalf of those processes.
|
||||
|
||||
Those times are expressed in ticks of 1/100th of a second. Actually,
|
||||
they are expressed in "user jiffies". There are ``USER_HZ``
|
||||
*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This
|
||||
used to map exactly to the number of scheduler "ticks" per second; but
|
||||
with the advent of higher frequency scheduling, as well as `tickless
|
||||
kernels <http://lwn.net/Articles/549580/>`_, the number of kernel
|
||||
ticks wasn't relevant anymore. It stuck around anyway, mainly for
|
||||
legacy and compatibility reasons.
|
||||
|
||||
Block I/O metrics
|
||||
.................
|
||||
|
||||
Block I/O is accounted in the ``blkio`` controller. Different metrics
|
||||
are scattered across different files. While you can find in-depth
|
||||
details in the `blkio-controller
|
||||
<https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt>`_
|
||||
file in the kernel documentation, here is a short list of the most
|
||||
relevant ones:
|
||||
|
||||
blkio.sectors
|
||||
contain the number of 512-bytes sectors read and written by the
|
||||
processes member of the cgroup, device by device. Reads and writes
|
||||
are merged in a single counter.
|
||||
|
||||
blkio.io_service_bytes
|
||||
indicates the number of bytes read and written by the cgroup. It has
|
||||
4 counters per device, because for each device, it differentiates
|
||||
between synchronous vs. asynchronous I/O, and reads vs. writes.
|
||||
|
||||
blkio.io_serviced
|
||||
the number of I/O operations performed, regardless of their size. It
|
||||
also has 4 counters per device.
|
||||
|
||||
blkio.io_queued
|
||||
indicates the number of I/O operations currently queued for this
|
||||
cgroup. In other words, if the cgroup isn't doing any I/O, this will
|
||||
be zero. Note that the opposite is not true. In other words, if
|
||||
there is no I/O queued, it does not mean that the cgroup is idle
|
||||
(I/O-wise). It could be doing purely synchronous reads on an
|
||||
otherwise quiescent device, which is therefore able to handle them
|
||||
immediately, without queuing. Also, while it is helpful to figure
|
||||
out which cgroup is putting stress on the I/O subsystem, keep in
|
||||
mind that is is a relative quantity. Even if a process group does
|
||||
not perform more I/O, its queue size can increase just because the
|
||||
device load increases because of other devices.
|
||||
|
||||
Network Metrics
|
||||
---------------
|
||||
|
||||
Network metrics are not exposed directly by control groups. There is a
|
||||
good explanation for that: network interfaces exist within the context
|
||||
of *network namespaces*. The kernel could probably accumulate metrics
|
||||
about packets and bytes sent and received by a group of processes, but
|
||||
those metrics wouldn't be very useful. You want per-interface metrics
|
||||
(because traffic happening on the local ``lo`` interface doesn't
|
||||
really count). But since processes in a single cgroup can belong to
|
||||
multiple network namespaces, those metrics would be harder to
|
||||
interpret: multiple network namespaces means multiple ``lo``
|
||||
interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is
|
||||
why there is no easy way to gather network metrics with control
|
||||
groups.
|
||||
|
||||
Instead we can gather network metrics from other sources:
|
||||
|
||||
IPtables
|
||||
........
|
||||
|
||||
IPtables (or rather, the netfilter framework for which iptables is
|
||||
just an interface) can do some serious accounting.
|
||||
|
||||
For instance, you can setup a rule to account for the outbound HTTP
|
||||
traffic on a web server:
|
||||
|
||||
::
|
||||
|
||||
iptables -I OUTPUT -p tcp --sport 80
|
||||
|
||||
|
||||
There is no ``-j`` or ``-g`` flag, so the rule will just count matched
|
||||
packets and go to the following rule.
|
||||
|
||||
Later, you can check the values of the counters, with:
|
||||
|
||||
::
|
||||
|
||||
iptables -nxvL OUTPUT
|
||||
|
||||
Technically, ``-n`` is not required, but it will prevent iptables from
|
||||
doing DNS reverse lookups, which are probably useless in this
|
||||
scenario.
|
||||
|
||||
Counters include packets and bytes. If you want to setup metrics for
|
||||
container traffic like this, you could execute a ``for`` loop to add
|
||||
two ``iptables`` rules per container IP address (one in each
|
||||
direction), in the ``FORWARD`` chain. This will only meter traffic
|
||||
going through the NAT layer; you will also have to add traffic going
|
||||
through the userland proxy.
|
||||
|
||||
Then, you will need to check those counters on a regular basis. If you
|
||||
happen to use ``collectd``, there is a nice plugin to automate
|
||||
iptables counters collection.
|
||||
|
||||
Interface-level counters
|
||||
........................
|
||||
|
||||
Since each container has a virtual Ethernet interface, you might want
|
||||
to check directly the TX and RX counters of this interface. You will
|
||||
notice that each container is associated to a virtual Ethernet
|
||||
interface in your host, with a name like ``vethKk8Zqi``. Figuring out
|
||||
which interface corresponds to which container is, unfortunately,
|
||||
difficult.
|
||||
|
||||
But for now, the best way is to check the metrics *from within the
|
||||
containers*. To accomplish this, you can run an executable from the
|
||||
host environment within the network namespace of a container using
|
||||
**ip-netns magic**.
|
||||
|
||||
The ``ip-netns exec`` command will let you execute any program
|
||||
(present in the host system) within any network namespace visible to
|
||||
the current process. This means that your host will be able to enter
|
||||
the network namespace of your containers, but your containers won't be
|
||||
able to access the host, nor their sibling containers. Containers will
|
||||
be able to “see” and affect their sub-containers, though.
|
||||
|
||||
The exact format of the command is::
|
||||
|
||||
ip netns exec <nsname> <command...>
|
||||
|
||||
For example::
|
||||
|
||||
ip netns exec mycontainer netstat -i
|
||||
|
||||
``ip netns`` finds the "mycontainer" container by using namespaces
|
||||
pseudo-files. Each process belongs to one network namespace, one PID
|
||||
namespace, one ``mnt`` namespace, etc., and those namespaces are
|
||||
materialized under ``/proc/<pid>/ns/``. For example, the network
|
||||
namespace of PID 42 is materialized by the pseudo-file
|
||||
``/proc/42/ns/net``.
|
||||
|
||||
When you run ``ip netns exec mycontainer ...``, it expects
|
||||
``/var/run/netns/mycontainer`` to be one of those
|
||||
pseudo-files. (Symlinks are accepted.)
|
||||
|
||||
In other words, to execute a command within the network namespace of a
|
||||
container, we need to:
|
||||
|
||||
* Find out the PID of any process within the container that we want to
|
||||
investigate;
|
||||
* Create a symlink from ``/var/run/netns/<somename>`` to
|
||||
``/proc/<thepid>/ns/net``
|
||||
* Execute ``ip netns exec <somename> ....``
|
||||
|
||||
Please review :ref:`run_findpid` to learn how to find the cgroup of a
|
||||
pprocess running in the container of which you want to measure network
|
||||
usage. From there, you can examine the pseudo-file named ``tasks``,
|
||||
which containes the PIDs that are in the control group (i.e. in the
|
||||
container). Pick any one of them.
|
||||
|
||||
Putting everything together, if the "short ID" of a container is held
|
||||
in the environment variable ``$CID``, then you can do this::
|
||||
|
||||
TASKS=/sys/fs/cgroup/devices/$CID*/tasks
|
||||
PID=$(head -n 1 $TASKS)
|
||||
mkdir -p /var/run/netns
|
||||
ln -sf /proc/$PID/ns/net /var/run/netns/$CID
|
||||
ip netns exec $CID netstat -i
|
||||
|
||||
|
||||
Tips for high-performance metric collection
|
||||
-------------------------------------------
|
||||
|
||||
Note that running a new process each time you want to update metrics
|
||||
is (relatively) expensive. If you want to collect metrics at high
|
||||
resolutions, and/or over a large number of containers (think 1000
|
||||
containers on a single host), you do not want to fork a new process
|
||||
each time.
|
||||
|
||||
Here is how to collect metrics from a single process. You will have to
|
||||
write your metric collector in C (or any language that lets you do
|
||||
low-level system calls). You need to use a special system call,
|
||||
``setns()``, which lets the current process enter any arbitrary
|
||||
namespace. It requires, however, an open file descriptor to the
|
||||
namespace pseudo-file (remember: that’s the pseudo-file in
|
||||
``/proc/<pid>/ns/net``).
|
||||
|
||||
However, there is a catch: you must not keep this file descriptor
|
||||
open. If you do, when the last process of the control group exits, the
|
||||
namespace will not be destroyed, and its network resources (like the
|
||||
virtual interface of the container) will stay around for ever (or
|
||||
until you close that file descriptor).
|
||||
|
||||
The right approach would be to keep track of the first PID of each
|
||||
container, and re-open the namespace pseudo-file each time.
|
||||
|
||||
Collecting metrics when a container exits
|
||||
-----------------------------------------
|
||||
|
||||
Sometimes, you do not care about real time metric collection, but when
|
||||
a container exits, you want to know how much CPU, memory, etc. it has
|
||||
used.
|
||||
|
||||
Docker makes this difficult because it relies on ``lxc-start``, which
|
||||
carefully cleans up after itself, but it is still possible. It is
|
||||
usually easier to collect metrics at regular intervals (e.g. every
|
||||
minute, with the collectd LXC plugin) and rely on that instead.
|
||||
|
||||
But, if you'd still like to gather the stats when a container stops,
|
||||
here is how:
|
||||
|
||||
For each container, start a collection process, and move it to the
|
||||
control groups that you want to monitor by writing its PID to the
|
||||
tasks file of the cgroup. The collection process should periodically
|
||||
re-read the tasks file to check if it's the last process of the
|
||||
control group. (If you also want to collect network statistics as
|
||||
explained in the previous section, you should also move the process to
|
||||
the appropriate network namespace.)
|
||||
|
||||
When the container exits, ``lxc-start`` will try to delete the control
|
||||
groups. It will fail, since the control group is still in use; but
|
||||
that’s fine. You process should now detect that it is the only one
|
||||
remaining in the group. Now is the right time to collect all the
|
||||
metrics you need!
|
||||
|
||||
Finally, your process should move itself back to the root control
|
||||
group, and remove the container control group. To remove a control
|
||||
group, just ``rmdir`` its directory. It's counter-intuitive to
|
||||
``rmdir`` a directory as it still contains files; but remember that
|
||||
this is a pseudo-filesystem, so usual rules don't apply. After the
|
||||
cleanup is done, the collection process can exit safely.
|
||||
|
|
@ -9,11 +9,11 @@ page_keywords: Docker, Docker documentation, security
|
|||
|
||||
There are three major areas to consider when reviewing Docker security:
|
||||
|
||||
- the intrinsic security of containers, as implemented by kernel
|
||||
namespaces and cgroups;
|
||||
- the attack surface of the Docker daemon itself;
|
||||
- the "hardening" security features of the kernel and how they
|
||||
interact with containers.
|
||||
- the intrinsic security of containers, as implemented by kernel
|
||||
namespaces and cgroups;
|
||||
- the attack surface of the Docker daemon itself;
|
||||
- the "hardening" security features of the kernel and how they
|
||||
interact with containers.
|
||||
|
||||
## Kernel Namespaces
|
||||
|
||||
|
@ -33,12 +33,12 @@ less affect, processes running in another container, or in the host
|
|||
system.
|
||||
|
||||
**Each container also gets its own network stack**, meaning that a
|
||||
container doesn’t get a privileged access to the sockets or interfaces
|
||||
container doesn't get a privileged access to the sockets or interfaces
|
||||
of another container. Of course, if the host system is setup
|
||||
accordingly, containers can interact with each other through their
|
||||
respective network interfaces — just like they can interact with
|
||||
external hosts. When you specify public ports for your containers or use
|
||||
[*links*](../../use/working_with_links_names/#working-with-links-names)
|
||||
[*links*](/use/working_with_links_names/#working-with-links-names)
|
||||
then IP traffic is allowed between containers. They can ping each other,
|
||||
send/receive UDP packets, and establish TCP connections, but that can be
|
||||
restricted if necessary. From a network architecture point of view, all
|
||||
|
@ -54,8 +54,8 @@ This means that since July 2008 (date of the 2.6.26 release, now 5 years
|
|||
ago), namespace code has been exercised and scrutinized on a large
|
||||
number of production systems. And there is more: the design and
|
||||
inspiration for the namespaces code are even older. Namespaces are
|
||||
actually an effort to reimplement the features of
|
||||
[OpenVZ](http://en.wikipedia.org/wiki/OpenVZ) in such a way that they
|
||||
actually an effort to reimplement the features of [OpenVZ](
|
||||
http://en.wikipedia.org/wiki/OpenVZ) in such a way that they
|
||||
could be merged within the mainstream kernel. And OpenVZ was initially
|
||||
released in 2005, so both the design and the implementation are pretty
|
||||
mature.
|
||||
|
@ -90,11 +90,10 @@ Docker daemon**. This is a direct consequence of some powerful Docker
|
|||
features. Specifically, Docker allows you to share a directory between
|
||||
the Docker host and a guest container; and it allows you to do so
|
||||
without limiting the access rights of the container. This means that you
|
||||
can start a container where the `/host` directory
|
||||
will be the `/` directory on your host; and the
|
||||
container will be able to alter your host filesystem without any
|
||||
restriction. This sounds crazy? Well, you have to know that **all
|
||||
virtualization systems allowing filesystem resource sharing behave the
|
||||
can start a container where the `/host` directory will be the `/` directory
|
||||
on your host; and the container will be able to alter your host filesystem
|
||||
without any restriction. This sounds crazy? Well, you have to know that
|
||||
**all virtualization systems allowing filesystem resource sharing behave the
|
||||
same way**. Nothing prevents you from sharing your root filesystem (or
|
||||
even your root block device) with a virtual machine.
|
||||
|
||||
|
@ -120,8 +119,8 @@ and client SSL certificates.
|
|||
|
||||
Recent improvements in Linux namespaces will soon allow to run
|
||||
full-featured containers without root privileges, thanks to the new user
|
||||
namespace. This is covered in detail
|
||||
[here](http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/).
|
||||
namespace. This is covered in detail [here](
|
||||
http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/).
|
||||
Moreover, this will solve the problem caused by sharing filesystems
|
||||
between host and guest, since the user namespace allows users within
|
||||
containers (including the root user) to be mapped to other users in the
|
||||
|
@ -130,13 +129,13 @@ host system.
|
|||
The end goal for Docker is therefore to implement two additional
|
||||
security improvements:
|
||||
|
||||
- map the root user of a container to a non-root user of the Docker
|
||||
host, to mitigate the effects of a container-to-host privilege
|
||||
escalation;
|
||||
- allow the Docker daemon to run without root privileges, and delegate
|
||||
operations requiring those privileges to well-audited sub-processes,
|
||||
each with its own (very limited) scope: virtual network setup,
|
||||
filesystem management, etc.
|
||||
- map the root user of a container to a non-root user of the Docker
|
||||
host, to mitigate the effects of a container-to-host privilege
|
||||
escalation;
|
||||
- allow the Docker daemon to run without root privileges, and delegate
|
||||
operations requiring those privileges to well-audited sub-processes,
|
||||
each with its own (very limited) scope: virtual network setup,
|
||||
filesystem management, etc.
|
||||
|
||||
Finally, if you run Docker on a server, it is recommended to run
|
||||
exclusively Docker in the server, and move all other services within
|
||||
|
@ -152,11 +151,11 @@ capabilities. What does that mean?
|
|||
Capabilities turn the binary "root/non-root" dichotomy into a
|
||||
fine-grained access control system. Processes (like web servers) that
|
||||
just need to bind on a port below 1024 do not have to run as root: they
|
||||
can just be granted the `net_bind_service`
|
||||
capability instead. And there are many other capabilities, for almost
|
||||
all the specific areas where root privileges are usually needed.
|
||||
can just be granted the `net_bind_service` capability instead. And there
|
||||
are many other capabilities, for almost all the specific areas where root
|
||||
privileges are usually needed.
|
||||
|
||||
This means a lot for container security; let’s see why!
|
||||
This means a lot for container security; let's see why!
|
||||
|
||||
Your average server (bare metal or virtual machine) needs to run a bunch
|
||||
of processes as root. Those typically include SSH, cron, syslogd;
|
||||
|
@ -165,41 +164,41 @@ tools (to handle e.g. DHCP, WPA, or VPNs), and much more. A container is
|
|||
very different, because almost all of those tasks are handled by the
|
||||
infrastructure around the container:
|
||||
|
||||
- SSH access will typically be managed by a single server running in
|
||||
the Docker host;
|
||||
- `cron`, when necessary, should run as a user
|
||||
process, dedicated and tailored for the app that needs its
|
||||
scheduling service, rather than as a platform-wide facility;
|
||||
- log management will also typically be handed to Docker, or by
|
||||
third-party services like Loggly or Splunk;
|
||||
- hardware management is irrelevant, meaning that you never need to
|
||||
run `udevd` or equivalent daemons within
|
||||
containers;
|
||||
- network management happens outside of the containers, enforcing
|
||||
separation of concerns as much as possible, meaning that a container
|
||||
should never need to perform `ifconfig`,
|
||||
`route`, or ip commands (except when a container
|
||||
is specifically engineered to behave like a router or firewall, of
|
||||
course).
|
||||
- SSH access will typically be managed by a single server running in
|
||||
the Docker host;
|
||||
- `cron`, when necessary, should run as a user
|
||||
process, dedicated and tailored for the app that needs its
|
||||
scheduling service, rather than as a platform-wide facility;
|
||||
- log management will also typically be handed to Docker, or by
|
||||
third-party services like Loggly or Splunk;
|
||||
- hardware management is irrelevant, meaning that you never need to
|
||||
run `udevd` or equivalent daemons within
|
||||
containers;
|
||||
- network management happens outside of the containers, enforcing
|
||||
separation of concerns as much as possible, meaning that a container
|
||||
should never need to perform `ifconfig`,
|
||||
`route`, or ip commands (except when a container
|
||||
is specifically engineered to behave like a router or firewall, of
|
||||
course).
|
||||
|
||||
This means that in most cases, containers will not need "real" root
|
||||
privileges *at all*. And therefore, containers can run with a reduced
|
||||
capability set; meaning that "root" within a container has much less
|
||||
privileges than the real "root". For instance, it is possible to:
|
||||
|
||||
- deny all "mount" operations;
|
||||
- deny access to raw sockets (to prevent packet spoofing);
|
||||
- deny access to some filesystem operations, like creating new device
|
||||
nodes, changing the owner of files, or altering attributes
|
||||
(including the immutable flag);
|
||||
- deny module loading;
|
||||
- and many others.
|
||||
- deny all "mount" operations;
|
||||
- deny access to raw sockets (to prevent packet spoofing);
|
||||
- deny access to some filesystem operations, like creating new device
|
||||
nodes, changing the owner of files, or altering attributes (including
|
||||
the immutable flag);
|
||||
- deny module loading;
|
||||
- and many others.
|
||||
|
||||
This means that even if an intruder manages to escalate to root within a
|
||||
container, it will be much harder to do serious damage, or to escalate
|
||||
to the host.
|
||||
|
||||
This won’t affect regular web apps; but malicious users will find that
|
||||
This won't affect regular web apps; but malicious users will find that
|
||||
the arsenal at their disposal has shrunk considerably! You can see [the
|
||||
list of dropped capabilities in the Docker
|
||||
code](https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97),
|
||||
|
@ -217,28 +216,28 @@ modern Linux kernels. It is also possible to leverage existing,
|
|||
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
|
||||
Docker.
|
||||
|
||||
While Docker currently only enables capabilities, it doesn’t interfere
|
||||
While Docker currently only enables capabilities, it doesn't interfere
|
||||
with the other systems. This means that there are many different ways to
|
||||
harden a Docker host. Here are a few examples.
|
||||
|
||||
- You can run a kernel with GRSEC and PAX. This will add many safety
|
||||
checks, both at compile-time and run-time; it will also defeat many
|
||||
exploits, thanks to techniques like address randomization. It
|
||||
doesn’t require Docker-specific configuration, since those security
|
||||
features apply system-wide, independently of containers.
|
||||
- If your distribution comes with security model templates for LXC
|
||||
containers, you can use them out of the box. For instance, Ubuntu
|
||||
comes with AppArmor templates for LXC, and those templates provide
|
||||
an extra safety net (even though it overlaps greatly with
|
||||
capabilities).
|
||||
- You can define your own policies using your favorite access control
|
||||
mechanism. Since Docker containers are standard LXC containers,
|
||||
there is nothing “magic” or specific to Docker.
|
||||
- You can run a kernel with GRSEC and PAX. This will add many safety
|
||||
checks, both at compile-time and run-time; it will also defeat many
|
||||
exploits, thanks to techniques like address randomization. It
|
||||
doesn't require Docker-specific configuration, since those security
|
||||
features apply system-wide, independently of containers.
|
||||
- If your distribution comes with security model templates for LXC
|
||||
containers, you can use them out of the box. For instance, Ubuntu
|
||||
comes with AppArmor templates for LXC, and those templates provide
|
||||
an extra safety net (even though it overlaps greatly with
|
||||
capabilities).
|
||||
- You can define your own policies using your favorite access control
|
||||
mechanism. Since Docker containers are standard LXC containers,
|
||||
there is nothing “magic” or specific to Docker.
|
||||
|
||||
Just like there are many third-party tools to augment Docker containers
|
||||
with e.g. special network topologies or shared filesystems, you can
|
||||
expect to see tools to harden existing Docker containers without
|
||||
affecting Docker’s core.
|
||||
affecting Docker's core.
|
||||
|
||||
## Conclusions
|
||||
|
||||
|
@ -254,5 +253,5 @@ containerization systems, you will be able to implement them as well
|
|||
with Docker, since everything is provided by the kernel anyway.
|
||||
|
||||
For more context and especially for comparisons with VMs and other
|
||||
container systems, please also see the [original blog
|
||||
post](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/).
|
||||
container systems, please also see the [original blog post](
|
||||
http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/).
|
||||
|
|
|
@ -1,269 +0,0 @@
|
|||
:title: Docker Security
|
||||
:description: Review of the Docker Daemon attack surface
|
||||
:keywords: Docker, Docker documentation, security
|
||||
|
||||
.. _dockersecurity:
|
||||
|
||||
Docker Security
|
||||
===============
|
||||
|
||||
*Adapted from* `Containers & Docker: How Secure are They? <blogsecurity_>`_
|
||||
|
||||
There are three major areas to consider when reviewing Docker security:
|
||||
|
||||
* the intrinsic security of containers, as implemented by kernel
|
||||
namespaces and cgroups;
|
||||
* the attack surface of the Docker daemon itself;
|
||||
* the "hardening" security features of the kernel and how they
|
||||
interact with containers.
|
||||
|
||||
Kernel Namespaces
|
||||
-----------------
|
||||
|
||||
Docker containers are essentially LXC containers, and they come with
|
||||
the same security features. When you start a container with ``docker
|
||||
run``, behind the scenes Docker uses ``lxc-start`` to execute the
|
||||
Docker container. This creates a set of namespaces and control groups
|
||||
for the container. Those namespaces and control groups are not created
|
||||
by Docker itself, but by ``lxc-start``. This means that as the LXC
|
||||
userland tools evolve (and provide additional namespaces and isolation
|
||||
features), Docker will automatically make use of them.
|
||||
|
||||
**Namespaces provide the first and most straightforward form of
|
||||
isolation**: processes running within a container cannot see, and even
|
||||
less affect, processes running in another container, or in the host
|
||||
system.
|
||||
|
||||
**Each container also gets its own network stack**, meaning that a
|
||||
container doesn’t get a privileged access to the sockets or interfaces
|
||||
of another container. Of course, if the host system is setup
|
||||
accordingly, containers can interact with each other through their
|
||||
respective network interfaces — just like they can interact with
|
||||
external hosts. When you specify public ports for your containers or
|
||||
use :ref:`links <working_with_links_names>` then IP traffic is allowed
|
||||
between containers. They can ping each other, send/receive UDP
|
||||
packets, and establish TCP connections, but that can be restricted if
|
||||
necessary. From a network architecture point of view, all containers
|
||||
on a given Docker host are sitting on bridge interfaces. This means
|
||||
that they are just like physical machines connected through a common
|
||||
Ethernet switch; no more, no less.
|
||||
|
||||
How mature is the code providing kernel namespaces and private
|
||||
networking? Kernel namespaces were introduced `between kernel version
|
||||
2.6.15 and 2.6.26
|
||||
<http://lxc.sourceforge.net/index.php/about/kernel-namespaces/>`_. This
|
||||
means that since July 2008 (date of the 2.6.26 release, now 5 years
|
||||
ago), namespace code has been exercised and scrutinized on a large
|
||||
number of production systems. And there is more: the design and
|
||||
inspiration for the namespaces code are even older. Namespaces are
|
||||
actually an effort to reimplement the features of `OpenVZ
|
||||
<http://en.wikipedia.org/wiki/OpenVZ>`_ in such a way that they could
|
||||
be merged within the mainstream kernel. And OpenVZ was initially
|
||||
released in 2005, so both the design and the implementation are
|
||||
pretty mature.
|
||||
|
||||
Control Groups
|
||||
--------------
|
||||
|
||||
Control Groups are the other key component of Linux Containers. They
|
||||
implement resource accounting and limiting. They provide a lot of very
|
||||
useful metrics, but they also help to ensure that each container gets
|
||||
its fair share of memory, CPU, disk I/O; and, more importantly, that a
|
||||
single container cannot bring the system down by exhausting one of
|
||||
those resources.
|
||||
|
||||
So while they do not play a role in preventing one container from
|
||||
accessing or affecting the data and processes of another container,
|
||||
they are essential to fend off some denial-of-service attacks. They
|
||||
are particularly important on multi-tenant platforms, like public and
|
||||
private PaaS, to guarantee a consistent uptime (and performance) even
|
||||
when some applications start to misbehave.
|
||||
|
||||
Control Groups have been around for a while as well: the code was
|
||||
started in 2006, and initially merged in kernel 2.6.24.
|
||||
|
||||
.. _dockersecurity_daemon:
|
||||
|
||||
Docker Daemon Attack Surface
|
||||
----------------------------
|
||||
|
||||
Running containers (and applications) with Docker implies running the
|
||||
Docker daemon. This daemon currently requires root privileges, and you
|
||||
should therefore be aware of some important details.
|
||||
|
||||
First of all, **only trusted users should be allowed to control your
|
||||
Docker daemon**. This is a direct consequence of some powerful Docker
|
||||
features. Specifically, Docker allows you to share a directory between
|
||||
the Docker host and a guest container; and it allows you to do so
|
||||
without limiting the access rights of the container. This means that
|
||||
you can start a container where the ``/host`` directory will be the
|
||||
``/`` directory on your host; and the container will be able to alter
|
||||
your host filesystem without any restriction. This sounds crazy? Well,
|
||||
you have to know that **all virtualization systems allowing filesystem
|
||||
resource sharing behave the same way**. Nothing prevents you from
|
||||
sharing your root filesystem (or even your root block device) with a
|
||||
virtual machine.
|
||||
|
||||
This has a strong security implication: if you instrument Docker from
|
||||
e.g. a web server to provision containers through an API, you should
|
||||
be even more careful than usual with parameter checking, to make sure
|
||||
that a malicious user cannot pass crafted parameters causing Docker to
|
||||
create arbitrary containers.
|
||||
|
||||
For this reason, the REST API endpoint (used by the Docker CLI to
|
||||
communicate with the Docker daemon) changed in Docker 0.5.2, and now
|
||||
uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the
|
||||
latter being prone to cross-site-scripting attacks if you happen to
|
||||
run Docker directly on your local machine, outside of a VM). You can
|
||||
then use traditional UNIX permission checks to limit access to the
|
||||
control socket.
|
||||
|
||||
You can also expose the REST API over HTTP if you explicitly decide
|
||||
so. However, if you do that, being aware of the abovementioned
|
||||
security implication, you should ensure that it will be reachable
|
||||
only from a trusted network or VPN; or protected with e.g. ``stunnel``
|
||||
and client SSL certificates.
|
||||
|
||||
Recent improvements in Linux namespaces will soon allow to run
|
||||
full-featured containers without root privileges, thanks to the new
|
||||
user namespace. This is covered in detail `here
|
||||
<http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/>`_. Moreover,
|
||||
this will solve the problem caused by sharing filesystems between host
|
||||
and guest, since the user namespace allows users within containers
|
||||
(including the root user) to be mapped to other users in the host
|
||||
system.
|
||||
|
||||
The end goal for Docker is therefore to implement two additional
|
||||
security improvements:
|
||||
|
||||
* map the root user of a container to a non-root user of the Docker
|
||||
host, to mitigate the effects of a container-to-host privilege
|
||||
escalation;
|
||||
* allow the Docker daemon to run without root privileges, and delegate
|
||||
operations requiring those privileges to well-audited sub-processes,
|
||||
each with its own (very limited) scope: virtual network setup,
|
||||
filesystem management, etc.
|
||||
|
||||
Finally, if you run Docker on a server, it is recommended to run
|
||||
exclusively Docker in the server, and move all other services within
|
||||
containers controlled by Docker. Of course, it is fine to keep your
|
||||
favorite admin tools (probably at least an SSH server), as well as
|
||||
existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
|
||||
|
||||
Linux Kernel Capabilities
|
||||
-------------------------
|
||||
|
||||
By default, Docker starts containers with a very restricted set of
|
||||
capabilities. What does that mean?
|
||||
|
||||
Capabilities turn the binary "root/non-root" dichotomy into a
|
||||
fine-grained access control system. Processes (like web servers) that
|
||||
just need to bind on a port below 1024 do not have to run as root:
|
||||
they can just be granted the ``net_bind_service`` capability
|
||||
instead. And there are many other capabilities, for almost all the
|
||||
specific areas where root privileges are usually needed.
|
||||
|
||||
This means a lot for container security; let’s see why!
|
||||
|
||||
Your average server (bare metal or virtual machine) needs to run a
|
||||
bunch of processes as root. Those typically include SSH, cron,
|
||||
syslogd; hardware management tools (to e.g. load modules), network
|
||||
configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much
|
||||
more. A container is very different, because almost all of those tasks
|
||||
are handled by the infrastructure around the container:
|
||||
|
||||
* SSH access will typically be managed by a single server running in
|
||||
the Docker host;
|
||||
* ``cron``, when necessary, should run as a user process, dedicated
|
||||
and tailored for the app that needs its scheduling service, rather
|
||||
than as a platform-wide facility;
|
||||
* log management will also typically be handed to Docker, or by
|
||||
third-party services like Loggly or Splunk;
|
||||
* hardware management is irrelevant, meaning that you never need to
|
||||
run ``udevd`` or equivalent daemons within containers;
|
||||
* network management happens outside of the containers, enforcing
|
||||
separation of concerns as much as possible, meaning that a container
|
||||
should never need to perform ``ifconfig``, ``route``, or ip commands
|
||||
(except when a container is specifically engineered to behave like a
|
||||
router or firewall, of course).
|
||||
|
||||
This means that in most cases, containers will not need "real" root
|
||||
privileges *at all*. And therefore, containers can run with a reduced
|
||||
capability set; meaning that "root" within a container has much less
|
||||
privileges than the real "root". For instance, it is possible to:
|
||||
|
||||
* deny all "mount" operations;
|
||||
* deny access to raw sockets (to prevent packet spoofing);
|
||||
* deny access to some filesystem operations, like creating new device
|
||||
nodes, changing the owner of files, or altering attributes
|
||||
(including the immutable flag);
|
||||
* deny module loading;
|
||||
* and many others.
|
||||
|
||||
This means that even if an intruder manages to escalate to root within
|
||||
a container, it will be much harder to do serious damage, or to
|
||||
escalate to the host.
|
||||
|
||||
This won't affect regular web apps; but malicious users will find that
|
||||
the arsenal at their disposal has shrunk considerably! You can see
|
||||
`the list of dropped capabilities in the Docker code
|
||||
<https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97>`_,
|
||||
and a full list of available capabilities in `Linux manpages
|
||||
<http://man7.org/linux/man-pages/man7/capabilities.7.html>`_.
|
||||
|
||||
Of course, you can always enable extra capabilities if you really need
|
||||
them (for instance, if you want to use a FUSE-based filesystem), but
|
||||
by default, Docker containers will be locked down to ensure maximum
|
||||
safety.
|
||||
|
||||
Other Kernel Security Features
|
||||
------------------------------
|
||||
|
||||
Capabilities are just one of the many security features provided by
|
||||
modern Linux kernels. It is also possible to leverage existing,
|
||||
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
|
||||
Docker.
|
||||
|
||||
While Docker currently only enables capabilities, it doesn't interfere
|
||||
with the other systems. This means that there are many different ways
|
||||
to harden a Docker host. Here are a few examples.
|
||||
|
||||
* You can run a kernel with GRSEC and PAX. This will add many safety
|
||||
checks, both at compile-time and run-time; it will also defeat many
|
||||
exploits, thanks to techniques like address randomization. It
|
||||
doesn’t require Docker-specific configuration, since those security
|
||||
features apply system-wide, independently of containers.
|
||||
* If your distribution comes with security model templates for LXC
|
||||
containers, you can use them out of the box. For instance, Ubuntu
|
||||
comes with AppArmor templates for LXC, and those templates provide
|
||||
an extra safety net (even though it overlaps greatly with
|
||||
capabilities).
|
||||
* You can define your own policies using your favorite access control
|
||||
mechanism. Since Docker containers are standard LXC containers,
|
||||
there is nothing “magic” or specific to Docker.
|
||||
|
||||
Just like there are many third-party tools to augment Docker
|
||||
containers with e.g. special network topologies or shared filesystems,
|
||||
you can expect to see tools to harden existing Docker containers
|
||||
without affecting Docker’s core.
|
||||
|
||||
Conclusions
|
||||
-----------
|
||||
|
||||
Docker containers are, by default, quite secure; especially if you
|
||||
take care of running your processes inside the containers as
|
||||
non-privileged users (i.e. non root).
|
||||
|
||||
You can add an extra layer of safety by enabling Apparmor, SELinux,
|
||||
GRSEC, or your favorite hardening solution.
|
||||
|
||||
Last but not least, if you see interesting security features in other
|
||||
containerization systems, you will be able to implement them as well
|
||||
with Docker, since everything is provided by the kernel anyway.
|
||||
|
||||
For more context and especially for comparisons with VMs and other
|
||||
container systems, please also see the `original blog post
|
||||
<blogsecurity_>`_.
|
||||
|
||||
.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Docker documentation build configuration file, created by
|
||||
# sphinx-quickstart on Tue Mar 19 12:34:07 2013.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
# the 'redirect_home.html' page redirects using a http meta refresh which, according
|
||||
# to official sources is more or less equivalent of a 301.
|
||||
|
||||
html_additional_pages = {
|
||||
'concepts/containers': 'redirect_home.html',
|
||||
'concepts/introduction': 'redirect_home.html',
|
||||
'builder/basics': 'redirect_build.html',
|
||||
}
|
||||
|
||||
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
|
||||
|
||||
# Configure extlinks
|
||||
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
|
||||
'Issue ') }
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
html_add_permalinks = u'¶'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'toctree'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Docker'
|
||||
copyright = u'2014 Docker, Inc.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.1'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'docker'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
html_theme_path = ['../theme']
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
|
||||
# We use a png favicon. This is not compatible with internet explorer, but looks
|
||||
# much better on all other browsers. However, sphynx doesn't like it (it likes
|
||||
# .ico better) so we have just put it in the template rather than used this setting
|
||||
# html_favicon = 'favicon.png'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['static_files']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Dockerdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('toctree', 'Docker.tex', u'Docker Documentation',
|
||||
u'Team Docker', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('reference/commandline/cli', 'docker', u'Docker CLI Documentation',
|
||||
[u'Team Docker'], 1),
|
||||
('reference/builder', 'Dockerfile', u'Dockerfile Documentation',
|
||||
[u'Team Docker'], 5),
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output ------------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('toctree', 'Docker', u'Docker Documentation',
|
||||
u'Team Docker', 'Docker', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
|
@ -2,6 +2,6 @@
|
|||
|
||||
## Contents:
|
||||
|
||||
- [Contributing to Docker](contributing/)
|
||||
- [Setting Up a Dev Environment](devenvironment/)
|
||||
- [Contributing to Docker](contributing/)
|
||||
- [Setting Up a Dev Environment](devenvironment/)
|
||||
|
||||
|
|
|
@ -6,19 +6,19 @@ page_keywords: contributing, docker, documentation, help, guideline
|
|||
|
||||
Want to hack on Docker? Awesome!
|
||||
|
||||
The repository includes [all the instructions you need to get
|
||||
started](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
|
||||
The repository includes [all the instructions you need to get started](
|
||||
https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
|
||||
|
||||
The [developer environment
|
||||
Dockerfile](https://github.com/dotcloud/docker/blob/master/Dockerfile)
|
||||
The [developer environment Dockerfile](
|
||||
https://github.com/dotcloud/docker/blob/master/Dockerfile)
|
||||
specifies the tools and versions used to test and build Docker.
|
||||
|
||||
If you’re making changes to the documentation, see the
|
||||
[README.md](https://github.com/dotcloud/docker/blob/master/docs/README.md).
|
||||
If you're making changes to the documentation, see the [README.md](
|
||||
https://github.com/dotcloud/docker/blob/master/docs/README.md).
|
||||
|
||||
The [documentation environment
|
||||
Dockerfile](https://github.com/dotcloud/docker/blob/master/docs/Dockerfile)
|
||||
The [documentation environment Dockerfile](
|
||||
https://github.com/dotcloud/docker/blob/master/docs/Dockerfile)
|
||||
specifies the tools and versions used to build the Documentation.
|
||||
|
||||
Further interesting details can be found in the [Packaging
|
||||
hints](https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md).
|
||||
Further interesting details can be found in the [Packaging hints](
|
||||
https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md).
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
:title: Contribution Guidelines
|
||||
:description: Contribution guidelines: create issues, conventions, pull requests
|
||||
:keywords: contributing, docker, documentation, help, guideline
|
||||
|
||||
Contributing to Docker
|
||||
======================
|
||||
|
||||
Want to hack on Docker? Awesome!
|
||||
|
||||
The repository includes `all the instructions you need to get
|
||||
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
|
||||
|
||||
The `developer environment Dockerfile
|
||||
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
|
||||
specifies the tools and versions used to test and build Docker.
|
||||
|
||||
If you're making changes to the documentation, see the
|
||||
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
|
||||
|
||||
The `documentation environment Dockerfile
|
||||
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
|
||||
specifies the tools and versions used to build the Documentation.
|
||||
|
||||
Further interesting details can be found in the `Packaging hints
|
||||
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.
|
|
@ -12,18 +12,18 @@ binaries, go environment, go dependencies, etc.
|
|||
|
||||
## Install Docker
|
||||
|
||||
Docker’s build environment itself is a Docker container, so the first
|
||||
Docker's build environment itself is a Docker container, so the first
|
||||
step is to install Docker on your system.
|
||||
|
||||
You can follow the [install instructions most relevant to your
|
||||
system](https://docs.docker.io/en/latest/installation/). Make sure you
|
||||
system](https://docs.docker.io/installation/). Make sure you
|
||||
have a working, up-to-date docker installation, then continue to the
|
||||
next step.
|
||||
|
||||
## Install tools used for this tutorial
|
||||
|
||||
Install `git`; honest, it’s very good. You can use
|
||||
other ways to get the Docker source, but they’re not anywhere near as
|
||||
Install `git`; honest, it's very good. You can use
|
||||
other ways to get the Docker source, but they're not anywhere near as
|
||||
easy.
|
||||
|
||||
Install `make`. This tutorial uses our base Makefile
|
||||
|
@ -32,8 +32,8 @@ Again, you can do it in other ways but you need to do more work.
|
|||
|
||||
## Check out the Source
|
||||
|
||||
git clone http://git@github.com/dotcloud/docker
|
||||
cd docker
|
||||
$ git clone https://git@github.com/dotcloud/docker
|
||||
$ cd docker
|
||||
|
||||
To checkout a different revision just use `git checkout`
|
||||
with the name of branch or revision number.
|
||||
|
@ -45,7 +45,7 @@ Dockerfile in the current directory. Essentially, it will install all
|
|||
the build and runtime dependencies necessary to build and test Docker.
|
||||
This command will take some time to complete when you first execute it.
|
||||
|
||||
sudo make build
|
||||
$ sudo make build
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean
|
||||
build of docker, neatly encapsulated in a standard build environment.
|
||||
|
@ -54,10 +54,9 @@ build of docker, neatly encapsulated in a standard build environment.
|
|||
|
||||
To create the Docker binary, run this command:
|
||||
|
||||
sudo make binary
|
||||
$ sudo make binary
|
||||
|
||||
This will create the Docker binary in
|
||||
`./bundles/<version>-dev/binary/`
|
||||
This will create the Docker binary in `./bundles/<version>-dev/binary/`
|
||||
|
||||
### Using your built Docker binary
|
||||
|
||||
|
@ -66,7 +65,7 @@ The binary is available outside the container in the directory
|
|||
host docker executable with this binary for live testing - for example,
|
||||
on ubuntu:
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
$ sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
> **Note**:
|
||||
> Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
@ -75,7 +74,7 @@ on ubuntu:
|
|||
|
||||
To execute the test cases, run this command:
|
||||
|
||||
sudo make test
|
||||
$ sudo make test
|
||||
|
||||
If the test are successful then the tail of the output should look
|
||||
something like this
|
||||
|
@ -106,11 +105,10 @@ something like this
|
|||
PASS
|
||||
ok github.com/dotcloud/docker/utils 0.017s
|
||||
|
||||
If $TESTFLAGS is set in the environment, it is passed as extra
|
||||
arguments to ‘go test’. You can use this to select certain tests to run,
|
||||
eg.
|
||||
If $TESTFLAGS is set in the environment, it is passed as extra arguments
|
||||
to `go test`. You can use this to select certain tests to run, e.g.
|
||||
|
||||
> TESTFLAGS=’-run \^TestBuild\$’ make test
|
||||
$ TESTFLAGS=`-run \^TestBuild\$` make test
|
||||
|
||||
If the output indicates "FAIL" and you see errors like this:
|
||||
|
||||
|
@ -118,14 +116,14 @@ If the output indicates "FAIL" and you see errors like this:
|
|||
|
||||
utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device
|
||||
|
||||
Then you likely don’t have enough memory available the test suite. 2GB
|
||||
Then you likely don't have enough memory available the test suite. 2GB
|
||||
is recommended.
|
||||
|
||||
## Use Docker
|
||||
|
||||
You can run an interactive session in the newly built container:
|
||||
|
||||
sudo make shell
|
||||
$ sudo make shell
|
||||
|
||||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
|
@ -135,13 +133,14 @@ If you want to read the documentation from a local website, or are
|
|||
making changes to it, you can build the documentation and then serve it
|
||||
by:
|
||||
|
||||
sudo make docs
|
||||
$ sudo make docs
|
||||
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
# type Ctrl-C to exit
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the [\#docker-dev IRC
|
||||
If you need more help then hop on to the [#docker-dev IRC
|
||||
channel](irc://chat.freenode.net#docker-dev) or post a message on the
|
||||
[Docker developer mailing
|
||||
list](https://groups.google.com/d/forum/docker-dev).
|
||||
|
|
|
@ -1,167 +0,0 @@
|
|||
:title: Setting Up a Dev Environment
|
||||
:description: Guides on how to contribute to docker
|
||||
:keywords: Docker, documentation, developers, contributing, dev environment
|
||||
|
||||
Setting Up a Dev Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To make it easier to contribute to Docker, we provide a standard
|
||||
development environment. It is important that the same environment be
|
||||
used for all tests, builds and releases. The standard development
|
||||
environment defines all build dependencies: system libraries and
|
||||
binaries, go environment, go dependencies, etc.
|
||||
|
||||
|
||||
Step 1: Install Docker
|
||||
----------------------
|
||||
|
||||
Docker's build environment itself is a Docker container, so the first
|
||||
step is to install Docker on your system.
|
||||
|
||||
You can follow the `install instructions most relevant to your system
|
||||
<https://docs.docker.io/en/latest/installation/>`_. Make sure you have
|
||||
a working, up-to-date docker installation, then continue to the next
|
||||
step.
|
||||
|
||||
|
||||
Step 2: Install tools used for this tutorial
|
||||
--------------------------------------------
|
||||
|
||||
Install ``git``; honest, it's very good. You can use other ways to get the Docker
|
||||
source, but they're not anywhere near as easy.
|
||||
|
||||
Install ``make``. This tutorial uses our base Makefile to kick off the docker
|
||||
containers in a repeatable and consistent way. Again, you can do it in other ways
|
||||
but you need to do more work.
|
||||
|
||||
Step 3: Check out the Source
|
||||
----------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone http://git@github.com/dotcloud/docker
|
||||
cd docker
|
||||
|
||||
To checkout a different revision just use ``git checkout`` with the name of branch or revision number.
|
||||
|
||||
|
||||
Step 4: Build the Environment
|
||||
-----------------------------
|
||||
|
||||
This following command will build a development environment using the Dockerfile in the current directory. Essentially, it will install all the build and runtime dependencies necessary to build and test Docker. This command will take some time to complete when you first execute it.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make build
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean build of
|
||||
docker, neatly encapsulated in a standard build environment.
|
||||
|
||||
|
||||
Step 5: Build the Docker Binary
|
||||
-------------------------------
|
||||
|
||||
To create the Docker binary, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make binary
|
||||
|
||||
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
||||
|
||||
Using your built Docker binary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The binary is available outside the container in the directory
|
||||
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
|
||||
with this binary for live testing - for example, on ubuntu:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
||||
|
||||
Step 5: Run the Tests
|
||||
---------------------
|
||||
|
||||
To execute the test cases, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make test
|
||||
|
||||
If the test are successful then the tail of the output should look something like this
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
--- PASS: TestWriteBroadcaster (0.00 seconds)
|
||||
=== RUN TestRaceWriteBroadcaster
|
||||
--- PASS: TestRaceWriteBroadcaster (0.00 seconds)
|
||||
=== RUN TestTruncIndex
|
||||
--- PASS: TestTruncIndex (0.00 seconds)
|
||||
=== RUN TestCompareKernelVersion
|
||||
--- PASS: TestCompareKernelVersion (0.00 seconds)
|
||||
=== RUN TestHumanSize
|
||||
--- PASS: TestHumanSize (0.00 seconds)
|
||||
=== RUN TestParseHost
|
||||
--- PASS: TestParseHost (0.00 seconds)
|
||||
=== RUN TestParseRepositoryTag
|
||||
--- PASS: TestParseRepositoryTag (0.00 seconds)
|
||||
=== RUN TestGetResolvConf
|
||||
--- PASS: TestGetResolvConf (0.00 seconds)
|
||||
=== RUN TestCheckLocalDns
|
||||
--- PASS: TestCheckLocalDns (0.00 seconds)
|
||||
=== RUN TestParseRelease
|
||||
--- PASS: TestParseRelease (0.00 seconds)
|
||||
=== RUN TestDependencyGraphCircular
|
||||
--- PASS: TestDependencyGraphCircular (0.00 seconds)
|
||||
=== RUN TestDependencyGraph
|
||||
--- PASS: TestDependencyGraph (0.00 seconds)
|
||||
PASS
|
||||
ok github.com/dotcloud/docker/utils 0.017s
|
||||
|
||||
If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
You can use this to select certain tests to run, eg.
|
||||
|
||||
TESTFLAGS='-run ^TestBuild$' make test
|
||||
|
||||
If the output indicates "FAIL" and you see errors like this:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
server.go:1302 Error: Insertion failed because database is full: database or disk is full
|
||||
|
||||
utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device
|
||||
|
||||
Then you likely don't have enough memory available the test suite. 2GB is recommended.
|
||||
|
||||
Step 6: Use Docker
|
||||
-------------------
|
||||
|
||||
You can run an interactive session in the newly built container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make shell
|
||||
|
||||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
|
||||
Extra Step: Build and view the Documentation
|
||||
--------------------------------------------
|
||||
|
||||
If you want to read the documentation from a local website, or are making changes
|
||||
to it, you can build the documentation and then serve it by:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make docs
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.
|
|
@ -1,14 +0,0 @@
|
|||
:title: Contributing to Docker
|
||||
:description: Guides on how to contribute to docker
|
||||
:keywords: Docker, documentation, developers, contributing, dev environment
|
||||
|
||||
|
||||
|
||||
Contributing
|
||||
============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
contributing
|
||||
devenvironment
|
32
docs/sources/docker-io/accounts.md
Normal file
32
docs/sources/docker-io/accounts.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
page_title: Accounts on Docker.io
|
||||
page_description: Docker.io accounts
|
||||
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation
|
||||
|
||||
# Accounts on Docker.io
|
||||
|
||||
## Docker.io Accounts
|
||||
|
||||
You can `search` for Docker images and `pull` them from [Docker.io](https://index.docker.io)
|
||||
without signing in or even having an account. However, in order to `push` images,
|
||||
leave comments or to *star* a repository, you are going to need a [Docker.io](
|
||||
https://www.docker.io) account.
|
||||
|
||||
### Registration for a Docker.io Account
|
||||
|
||||
You can get a [Docker.io](https://index.docker.io) account by
|
||||
[signing up for one here](https://www.docker.io/account/signup/). A valid
|
||||
email address is required to register, which you will need to verify for
|
||||
account activation.
|
||||
|
||||
### Email activation process
|
||||
|
||||
You need to have at least one verified email address to be able to use your
|
||||
[Docker.io](https://index.docker.io) account. If you can't find the validation email,
|
||||
you can request another by visiting the [Resend Email Confirmation](
|
||||
https://www.docker.io/account/resend-email-confirmation/) page.
|
||||
|
||||
### Password reset process
|
||||
|
||||
If you can't access your account for some reason, you can reset your password
|
||||
from the [*Password Reset*](https://www.docker.io/account/forgot-password/)
|
||||
page.
|
|
@ -1,15 +1,15 @@
|
|||
page_title: Trusted Builds in the Docker Index
|
||||
page_description: Docker Index Trusted Builds
|
||||
page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds
|
||||
page_title: Trusted Builds on Docker.io
|
||||
page_description: Docker.io Trusted Builds
|
||||
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds
|
||||
|
||||
# Trusted Builds in the Docker Index
|
||||
# Trusted Builds on Docker.io
|
||||
|
||||
## Trusted Builds
|
||||
|
||||
*Trusted Builds* is a special feature allowing you to specify a source
|
||||
repository with a *Dockerfile* to be built by the Docker build clusters. The
|
||||
system will clone your repository and build the Dockerfile using the repository
|
||||
as the context. The resulting image will then be uploaded to the index and
|
||||
as the context. The resulting image will then be uploaded to the registry and
|
||||
marked as a `Trusted Build`.
|
||||
|
||||
Trusted Builds have a number of advantages. For example, users of *your* Trusted
|
||||
|
@ -17,18 +17,18 @@ Build can be certain that the resulting image was built exactly how it claims
|
|||
to be.
|
||||
|
||||
Furthermore, the Dockerfile will be available to anyone browsing your repository
|
||||
on the Index. Another advantage of the Trusted Builds feature is the automated
|
||||
on the registry. Another advantage of the Trusted Builds feature is the automated
|
||||
builds. This makes sure that your repository is always up to date.
|
||||
|
||||
### Linking with a GitHub account
|
||||
|
||||
In order to setup a Trusted Build, you need to first link your Docker Index
|
||||
account with a GitHub one. This will allow the Docker Index to see your
|
||||
repositories.
|
||||
In order to setup a Trusted Build, you need to first link your [Docker.io](
|
||||
https://index.docker.io) account with a GitHub one. This will allow the registry
|
||||
to see your repositories.
|
||||
|
||||
> *Note:* We currently request access for *read* and *write* since the Index
|
||||
> needs to setup a GitHub service hook. Although nothing else is done with
|
||||
> your account, this is how GitHub manages permissions, sorry!
|
||||
> *Note:* We currently request access for *read* and *write* since [Docker.io](
|
||||
> https://index.docker.io) needs to setup a GitHub service hook. Although nothing
|
||||
> else is done with your account, this is how GitHub manages permissions, sorry!
|
||||
|
||||
### Creating a Trusted Build
|
||||
|
||||
|
@ -77,8 +77,8 @@ Trusted Build:
|
|||
### The Dockerfile and Trusted Builds
|
||||
|
||||
During the build process, we copy the contents of your Dockerfile. We also
|
||||
add it to the Docker Index for the Docker community to see on the repository
|
||||
page.
|
||||
add it to the [Docker.io](https://index.docker.io) for the Docker community
|
||||
to see on the repository page.
|
||||
|
||||
### README.md
|
||||
|
13
docs/sources/docker-io/home.md
Normal file
13
docs/sources/docker-io/home.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
page_title: The Docker.io Registry Help
|
||||
page_description: The Docker Registry help documentation home
|
||||
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation
|
||||
|
||||
# The Docker.io Registry Help
|
||||
|
||||
## Introduction
|
||||
|
||||
For your questions about the [Docker.io](https://index.docker.io) registry you
|
||||
can use [this documentation](docs.md).
|
||||
|
||||
If you can not find something you are looking for, please feel free to
|
||||
[contact us](https://index.docker.io/help/support/).
|
|
@ -1,15 +1,16 @@
|
|||
page_title: Repositories and Images in the Docker Index
|
||||
page_description: Docker Index repositories
|
||||
page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation
|
||||
page_title: Repositories and Images on Docker.io
|
||||
page_description: Repositories and Images on Docker.io
|
||||
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation
|
||||
|
||||
# Repositories and Images in the Docker Index
|
||||
# Repositories and Images on Docker.io
|
||||
|
||||
## Searching for repositories and images
|
||||
|
||||
You can `search` for all the publicly available repositories and images using
|
||||
Docker. If a repository is not public (i.e., private), it won't be listed on
|
||||
the Index search results. To see repository statuses, you can look at your
|
||||
[profile page](https://index.docker.io/account/).
|
||||
the repository search results. To see repository statuses, you can look at your
|
||||
[profile page](https://index.docker.io/account/) on [Docker.io](
|
||||
https://index.docker.io).
|
||||
|
||||
## Repositories
|
||||
|
||||
|
@ -22,20 +23,20 @@ of bookmark your favorites.
|
|||
|
||||
You can interact with other members of the Docker community and maintainers by
|
||||
leaving comments on repositories. If you find any comments that are not
|
||||
appropriate, you can flag them for the Index admins' review.
|
||||
appropriate, you can flag them for the admins' review.
|
||||
|
||||
### Private Docker Repositories
|
||||
|
||||
To work with a private repository on the Docker Index, you will need to add one
|
||||
via the [Add Repository](https://index.docker.io/account/repositories/add) link.
|
||||
Once the private repository is created, you can `push` and `pull` images to and
|
||||
from it using Docker.
|
||||
To work with a private repository on [Docker.io](https://index.docker.io), you
|
||||
will need to add one via the [Add Repository](https://index.docker.io/account/repositories/add)
|
||||
link. Once the private repository is created, you can `push` and `pull` images
|
||||
to and from it using Docker.
|
||||
|
||||
> *Note:* You need to be signed in and have access to work with a private
|
||||
> repository.
|
||||
|
||||
Private repositories are just like public ones. However, it isn't possible to
|
||||
browse them or search their content on the public index. They do not get cached
|
||||
browse them or search their content on the public registry. They do not get cached
|
||||
the same way as a public repository either.
|
||||
|
||||
It is possible to give access to a private repository to those whom you
|
||||
|
@ -44,7 +45,7 @@ designate (i.e., collaborators) from its settings page.
|
|||
From there, you can also switch repository status (*public* to *private*, or
|
||||
viceversa). You will need to have an available private repository slot open
|
||||
before you can do such a switch. If you don't have any, you can always upgrade
|
||||
your [Docker Index plan](https://index.docker.io/plans/).
|
||||
your [Docker.io](https://index.docker.io/plans/) plan.
|
||||
|
||||
### Collaborators and their role
|
||||
|
|
@ -9,17 +9,17 @@ substantial services like those which you might find in production.
|
|||
|
||||
## Contents:
|
||||
|
||||
- [Check your Docker install](hello_world/)
|
||||
- [Hello World](hello_world/#hello-world)
|
||||
- [Hello World Daemon](hello_world/#hello-world-daemon)
|
||||
- [Node.js Web App](nodejs_web_app/)
|
||||
- [Redis Service](running_redis_service/)
|
||||
- [SSH Daemon Service](running_ssh_service/)
|
||||
- [CouchDB Service](couchdb_data_volumes/)
|
||||
- [PostgreSQL Service](postgresql_service/)
|
||||
- [Building an Image with MongoDB](mongodb/)
|
||||
- [Riak Service](running_riak_service/)
|
||||
- [Using Supervisor with Docker](using_supervisord/)
|
||||
- [Process Management with CFEngine](cfengine_process_management/)
|
||||
- [Python Web App](python_web_app/)
|
||||
- [Check your Docker install](hello_world/)
|
||||
- [Hello World](hello_world/#hello-world)
|
||||
- [Hello World Daemon](hello_world/#hello-world-daemon)
|
||||
- [Node.js Web App](nodejs_web_app/)
|
||||
- [Redis Service](running_redis_service/)
|
||||
- [SSH Daemon Service](running_ssh_service/)
|
||||
- [CouchDB Service](couchdb_data_volumes/)
|
||||
- [PostgreSQL Service](postgresql_service/)
|
||||
- [Building an Image with MongoDB](mongodb/)
|
||||
- [Riak Service](running_riak_service/)
|
||||
- [Using Supervisor with Docker](using_supervisord/)
|
||||
- [Process Management with CFEngine](cfengine_process_management/)
|
||||
- [Python Web App](python_web_app/)
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ page_keywords: docker, example, package installation, networking, debian, ubuntu
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup).
|
||||
> - **If you’re using OS X or docker via TCP** then you shouldn’t use
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup).
|
||||
> - **If you're using OS X or docker via TCP** then you shouldn't use
|
||||
> sudo.
|
||||
|
||||
When you have multiple Docker servers, or build unrelated Docker
|
||||
containers which can’t make use of the Docker build cache, it can be
|
||||
containers which can't make use of the Docker build cache, it can be
|
||||
useful to have a caching proxy for your packages. This container makes
|
||||
the second download of any package almost instant.
|
||||
|
||||
|
@ -45,7 +45,7 @@ Then run it, mapping the exposed port to one on the host
|
|||
|
||||
$ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
|
||||
|
||||
To see the logfiles that are ‘tailed’ in the default command, you can
|
||||
To see the logfiles that are `tailed` in the default command, you can
|
||||
use:
|
||||
|
||||
$ sudo docker logs -f test_apt_cacher_ng
|
||||
|
@ -53,13 +53,12 @@ use:
|
|||
To get your Debian-based containers to use the proxy, you can do one of
|
||||
three things
|
||||
|
||||
1. Add an apt Proxy setting
|
||||
`echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`
|
||||
|
||||
2. Set an environment variable:
|
||||
`http_proxy=http://dockerhost:3142/`
|
||||
3. Change your `sources.list` entries to start with
|
||||
`http://dockerhost:3142/`
|
||||
1. Add an apt Proxy setting
|
||||
`echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`
|
||||
2. Set an environment variable:
|
||||
`http_proxy=http://dockerhost:3142/`
|
||||
3. Change your `sources.list` entries to start with
|
||||
`http://dockerhost:3142/`
|
||||
|
||||
**Option 1** injects the settings safely into your apt configuration in
|
||||
a local version of a common base:
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
:title: Running an apt-cacher-ng service
|
||||
:description: Installing and running an apt-cacher-ng service
|
||||
:keywords: docker, example, package installation, networking, debian, ubuntu
|
||||
|
||||
.. _running_apt-cacher-ng_service:
|
||||
|
||||
Apt-Cacher-ng Service
|
||||
=====================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
|
||||
When you have multiple Docker servers, or build unrelated Docker containers
|
||||
which can't make use of the Docker build cache, it can be useful to have a
|
||||
caching proxy for your packages. This container makes the second download of
|
||||
any package almost instant.
|
||||
|
||||
Use the following Dockerfile:
|
||||
|
||||
.. literalinclude:: apt-cacher-ng.Dockerfile
|
||||
|
||||
To build the image using:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker build -t eg_apt_cacher_ng .
|
||||
|
||||
Then run it, mapping the exposed port to one on the host
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
|
||||
|
||||
To see the logfiles that are 'tailed' in the default command, you can use:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker logs -f test_apt_cacher_ng
|
||||
|
||||
To get your Debian-based containers to use the proxy, you can do one of three things
|
||||
|
||||
1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy``
|
||||
2. Set an environment variable: ``http_proxy=http://dockerhost:3142/``
|
||||
3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/``
|
||||
|
||||
**Option 1** injects the settings safely into your apt configuration in a local
|
||||
version of a common base:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu
|
||||
RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
|
||||
RUN apt-get update ; apt-get install vim git
|
||||
|
||||
# docker build -t my_ubuntu .
|
||||
|
||||
**Option 2** is good for testing, but will
|
||||
break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
|
||||
|
||||
**Option 3** is the least portable, but there will be times when you might need to
|
||||
do it and you can do it from your ``Dockerfile`` too.
|
||||
|
||||
Apt-cacher-ng has some tools that allow you to manage the repository, and they
|
||||
can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the
|
||||
service:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
|
||||
|
||||
$$ /usr/lib/apt-cacher-ng/distkill.pl
|
||||
Scanning /var/cache/apt-cacher-ng, please wait...
|
||||
Found distributions:
|
||||
bla, taggedcount: 0
|
||||
1. precise-security (36 index files)
|
||||
2. wheezy (25 index files)
|
||||
3. precise-updates (36 index files)
|
||||
4. precise (36 index files)
|
||||
5. wheezy-updates (18 index files)
|
||||
|
||||
Found architectures:
|
||||
6. amd64 (36 index files)
|
||||
7. i386 (24 index files)
|
||||
|
||||
WARNING: The removal action may wipe out whole directories containing
|
||||
index files. Select d to see detailed list.
|
||||
|
||||
(Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
|
||||
|
||||
|
||||
Finally, clean up after your test by stopping and removing the container, and
|
||||
then removing the image.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker stop test_apt_cacher_ng
|
||||
$ sudo docker rm test_apt_cacher_ng
|
||||
$ sudo docker rmi eg_apt_cacher_ng
|
|
@ -10,14 +10,14 @@ Docker monitors one process in each running container and the container
|
|||
lives or dies with that process. By introducing CFEngine inside Docker
|
||||
containers, we can alleviate a few of the issues that may arise:
|
||||
|
||||
- It is possible to easily start multiple processes within a
|
||||
container, all of which will be managed automatically, with the
|
||||
normal `docker run` command.
|
||||
- If a managed process dies or crashes, CFEngine will start it again
|
||||
within 1 minute.
|
||||
- The container itself will live as long as the CFEngine scheduling
|
||||
daemon (cf-execd) lives. With CFEngine, we are able to decouple the
|
||||
life of the container from the uptime of the service it provides.
|
||||
- It is possible to easily start multiple processes within a
|
||||
container, all of which will be managed automatically, with the
|
||||
normal `docker run` command.
|
||||
- If a managed process dies or crashes, CFEngine will start it again
|
||||
within 1 minute.
|
||||
- The container itself will live as long as the CFEngine scheduling
|
||||
daemon (cf-execd) lives. With CFEngine, we are able to decouple the
|
||||
life of the container from the uptime of the service it provides.
|
||||
|
||||
## How it works
|
||||
|
||||
|
@ -25,23 +25,20 @@ CFEngine, together with the cfe-docker integration policies, are
|
|||
installed as part of the Dockerfile. This builds CFEngine into our
|
||||
Docker image.
|
||||
|
||||
The Dockerfile’s `ENTRYPOINT` takes an arbitrary
|
||||
The Dockerfile's `ENTRYPOINT` takes an arbitrary
|
||||
amount of commands (with any desired arguments) as parameters. When we
|
||||
run the Docker container these parameters get written to CFEngine
|
||||
policies and CFEngine takes over to ensure that the desired processes
|
||||
are running in the container.
|
||||
|
||||
CFEngine scans the process table for the `basename`
|
||||
of the commands given to the `ENTRYPOINT` and runs
|
||||
the command to start the process if the `basename`
|
||||
CFEngine scans the process table for the `basename` of the commands given
|
||||
to the `ENTRYPOINT` and runs the command to start the process if the `basename`
|
||||
is not found. For example, if we start the container with
|
||||
`docker run "/path/to/my/application parameters"`,
|
||||
CFEngine will look for a process named `application`
|
||||
and run the command. If an entry for `application`
|
||||
is not found in the process table at any point in time, CFEngine will
|
||||
execute `/path/to/my/application parameters` to
|
||||
start the application once again. The check on the process table happens
|
||||
every minute.
|
||||
`docker run "/path/to/my/application parameters"`, CFEngine will look for a
|
||||
process named `application` and run the command. If an entry for `application`
|
||||
is not found in the process table at any point in time, CFEngine will execute
|
||||
`/path/to/my/application parameters` to start the application once again. The
|
||||
check on the process table happens every minute.
|
||||
|
||||
Note that it is therefore important that the command to start your
|
||||
application leaves a process with the basename of the command. This can
|
||||
|
@ -56,11 +53,10 @@ in a single container.
|
|||
|
||||
There are three steps:
|
||||
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the
|
||||
containerized CFEngine installation.
|
||||
3. Start your application processes as part of the
|
||||
`docker run` command.
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the
|
||||
containerized CFEngine installation.
|
||||
3. Start your application processes as part of the `docker run` command.
|
||||
|
||||
### Building the container image
|
||||
|
||||
|
@ -90,25 +86,22 @@ The first two steps can be done as part of a Dockerfile, as follows.
|
|||
|
||||
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
|
||||
|
||||
By saving this file as `Dockerfile` to a working
|
||||
directory, you can then build your container with the docker build
|
||||
command, e.g. `docker build -t managed_image`.
|
||||
By saving this file as Dockerfile to a working directory, you can then build
|
||||
your container with the docker build command, e.g.
|
||||
`docker build -t managed_image`.
|
||||
|
||||
### Testing the container
|
||||
|
||||
Start the container with `apache2` and
|
||||
`sshd` running and managed, forwarding a port to our
|
||||
SSH instance:
|
||||
Start the container with `apache2` and `sshd` running and managed, forwarding
|
||||
a port to our SSH instance:
|
||||
|
||||
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
$ docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
|
||||
We now clearly see one of the benefits of the cfe-docker integration: it
|
||||
allows to start several processes as part of a normal
|
||||
`docker run` command.
|
||||
allows to start several processes as part of a normal `docker run` command.
|
||||
|
||||
We can now log in to our new container and see that both
|
||||
`apache2` and `sshd` are
|
||||
running. We have set the root password to "password" in the Dockerfile
|
||||
We can now log in to our new container and see that both `apache2` and `sshd`
|
||||
are running. We have set the root password to "password" in the Dockerfile
|
||||
above and can use that to log in with ssh:
|
||||
|
||||
ssh -p222 root@127.0.0.1
|
||||
|
@ -144,9 +137,8 @@ CFEngine.
|
|||
To make sure your applications get managed in the same manner, there are
|
||||
just two things you need to adjust from the above example:
|
||||
|
||||
- In the Dockerfile used above, install your applications instead of
|
||||
`apache2` and `sshd`.
|
||||
- When you start the container with `docker run`,
|
||||
specify the command line arguments to your applications rather than
|
||||
`apache2` and `sshd`.
|
||||
|
||||
- In the Dockerfile used above, install your applications instead of
|
||||
`apache2` and `sshd`.
|
||||
- When you start the container with `docker run`,
|
||||
specify the command line arguments to your applications rather than
|
||||
`apache2` and `sshd`.
|
||||
|
|
|
@ -1,137 +0,0 @@
|
|||
:title: Process Management with CFEngine
|
||||
:description: Managing containerized processes with CFEngine
|
||||
:keywords: cfengine, process, management, usage, docker, documentation
|
||||
|
||||
Process Management with CFEngine
|
||||
================================
|
||||
|
||||
Create Docker containers with managed processes.
|
||||
|
||||
Docker monitors one process in each running container and the container lives or dies with that process.
|
||||
By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise:
|
||||
|
||||
* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command.
|
||||
* If a managed process dies or crashes, CFEngine will start it again within 1 minute.
|
||||
* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides.
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image.
|
||||
|
||||
The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters.
|
||||
When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container.
|
||||
|
||||
CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found.
|
||||
For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command.
|
||||
If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again.
|
||||
The check on the process table happens every minute.
|
||||
|
||||
Note that it is therefore important that the command to start your application leaves a process with the basename of the command.
|
||||
This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This example assumes you have Docker installed and working.
|
||||
We will install and manage ``apache2`` and ``sshd`` in a single container.
|
||||
|
||||
There are three steps:
|
||||
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation.
|
||||
3. Start your application processes as part of the ``docker run`` command.
|
||||
|
||||
|
||||
Building the container image
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The first two steps can be done as part of a Dockerfile, as follows.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu
|
||||
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
|
||||
|
||||
RUN apt-get -y install wget lsb-release unzip ca-certificates
|
||||
|
||||
# install latest CFEngine
|
||||
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
|
||||
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install cfengine-community
|
||||
|
||||
# install cfe-docker process management policy
|
||||
RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
|
||||
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
|
||||
|
||||
# apache2 and openssh are just for testing purposes, install your own apps here
|
||||
RUN apt-get -y install openssh-server apache2
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN echo "root:password" | chpasswd # need a password for ssh
|
||||
|
||||
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
|
||||
|
||||
|
||||
By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command,
|
||||
e.g. ``docker build -t managed_image``.
|
||||
|
||||
Testing the container
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
|
||||
We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes
|
||||
as part of a normal ``docker run`` command.
|
||||
|
||||
We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to
|
||||
"password" in the Dockerfile above and can use that to log in with ssh:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -p222 root@127.0.0.1
|
||||
|
||||
ps -ef
|
||||
UID PID PPID C STIME TTY TIME CMD
|
||||
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
|
||||
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
|
||||
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
|
||||
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
|
||||
root 105 93 0 07:48 pts/0 00:00:00 -bash
|
||||
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
|
||||
|
||||
|
||||
If we stop apache2, it will be started again within a minute by CFEngine.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
service apache2 status
|
||||
Apache2 is running (pid 32).
|
||||
service apache2 stop
|
||||
* Stopping web server apache2 ... waiting [ OK ]
|
||||
service apache2 status
|
||||
Apache2 is NOT running.
|
||||
# ... wait up to 1 minute...
|
||||
service apache2 status
|
||||
Apache2 is running (pid 173).
|
||||
|
||||
|
||||
Adapting to your applications
|
||||
-----------------------------
|
||||
|
||||
To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example:
|
||||
|
||||
* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``.
|
||||
* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``.
|
|
@ -9,40 +9,39 @@ page_keywords: docker, example, package installation, networking, couchdb, data
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
Here’s an example of using data volumes to share the same data between
|
||||
Here's an example of using data volumes to share the same data between
|
||||
two CouchDB containers. This could be used for hot upgrades, testing
|
||||
different versions of CouchDB on the same data, etc.
|
||||
|
||||
## Create first database
|
||||
|
||||
Note that we’re marking `/var/lib/couchdb` as a data
|
||||
volume.
|
||||
Note that we're marking `/var/lib/couchdb` as a data volume.
|
||||
|
||||
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
$ COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
|
||||
## Add data to the first database
|
||||
|
||||
We’re assuming your Docker host is reachable at `localhost`. If not,
|
||||
We're assuming your Docker host is reachable at `localhost`. If not,
|
||||
replace `localhost` with the public IP of your Docker host.
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
||||
$ HOST=localhost
|
||||
$ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
|
||||
$ echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
||||
|
||||
## Create second database
|
||||
|
||||
This time, we’re requesting shared access to `$COUCH1`'s volumes.
|
||||
This time, we're requesting shared access to `$COUCH1`'s volumes.
|
||||
|
||||
COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
$ COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
|
||||
## Browse data on the second database
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
||||
$ HOST=localhost
|
||||
$ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
|
||||
$ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
||||
|
||||
Congratulations, you are now running two Couchdb containers, completely
|
||||
isolated from each other *except* for their data.
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
:title: Sharing data between 2 couchdb databases
|
||||
:description: Sharing data between 2 couchdb databases
|
||||
:keywords: docker, example, package installation, networking, couchdb, data volumes
|
||||
|
||||
.. _running_couchdb_service:
|
||||
|
||||
CouchDB Service
|
||||
===============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
Here's an example of using data volumes to share the same data between
|
||||
two CouchDB containers. This could be used for hot upgrades, testing
|
||||
different versions of CouchDB on the same data, etc.
|
||||
|
||||
Create first database
|
||||
---------------------
|
||||
|
||||
Note that we're marking ``/var/lib/couchdb`` as a data volume.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
|
||||
Add data to the first database
|
||||
------------------------------
|
||||
|
||||
We're assuming your Docker host is reachable at ``localhost``. If not,
|
||||
replace ``localhost`` with the public IP of your Docker host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
||||
|
||||
Create second database
|
||||
----------------------
|
||||
|
||||
This time, we're requesting shared access to ``$COUCH1``'s volumes.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
|
||||
Browse data on the second database
|
||||
----------------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
||||
|
||||
Congratulations, you are now running two Couchdb containers, completely
|
||||
isolated from each other *except* for their data.
|
|
@ -15,7 +15,7 @@ like `/var/lib/docker/repositories: permission denied`
|
|||
you may have an incomplete Docker installation or insufficient
|
||||
privileges to access docker on your machine.
|
||||
|
||||
Please refer to [*Installation*](../../installation/)
|
||||
Please refer to [*Installation*](/installation/)
|
||||
for installation instructions.
|
||||
|
||||
## Hello World
|
||||
|
@ -25,8 +25,8 @@ for installation instructions.
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](#check-your-docker-installation).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
This is the most basic example available for using Docker.
|
||||
|
||||
|
@ -35,11 +35,10 @@ Download the small base image named `busybox`:
|
|||
# Download a busybox image
|
||||
$ sudo docker pull busybox
|
||||
|
||||
The `busybox` image is a minimal Linux system. You
|
||||
can do the same with any number of other images, such as
|
||||
`debian`, `ubuntu` or
|
||||
`centos`. The images can be found and retrieved
|
||||
using the [Docker index](http://index.docker.io).
|
||||
The `busybox` image is a minimal Linux system. You can do the same with
|
||||
any number of other images, such as `debian`, `ubuntu` or `centos`. The
|
||||
images can be found and retrieved using the
|
||||
[Docker.io](http://index.docker.io) registry.
|
||||
|
||||
$ sudo docker run busybox /bin/echo hello world
|
||||
|
||||
|
@ -61,7 +60,6 @@ See the example in action
|
|||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
|
||||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
|
||||
## Hello World Daemon
|
||||
|
@ -71,8 +69,8 @@ See the example in action
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](#check-your-docker-installation).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
And now for the most boring daemon ever written!
|
||||
|
||||
|
@ -82,64 +80,64 @@ continue to do this until we stop it.
|
|||
|
||||
**Steps:**
|
||||
|
||||
CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
||||
$ container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
||||
|
||||
We are going to run a simple hello world daemon in a new container made
|
||||
from the `ubuntu` image.
|
||||
|
||||
- **"sudo docker run -d "** run a command in a new container. We pass
|
||||
"-d" so it runs as a daemon.
|
||||
- **"ubuntu"** is the image we want to run the command inside of.
|
||||
- **"/bin/sh -c"** is the command we want to run in the container
|
||||
- **"while true; do echo hello world; sleep 1; done"** is the mini
|
||||
script we want to run, that will just print hello world once a
|
||||
second until we stop it.
|
||||
- **$container_id** the output of the run command will return a
|
||||
container id, we can use in future commands to see what is going on
|
||||
with this process.
|
||||
- **"sudo docker run -d "** run a command in a new container. We pass
|
||||
"-d" so it runs as a daemon.
|
||||
- **"ubuntu"** is the image we want to run the command inside of.
|
||||
- **"/bin/sh -c"** is the command we want to run in the container
|
||||
- **"while true; do echo hello world; sleep 1; done"** is the mini
|
||||
script we want to run, that will just print hello world once a
|
||||
second until we stop it.
|
||||
- **$container_id** the output of the run command will return a
|
||||
container id, we can use in future commands to see what is going on
|
||||
with this process.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker logs $container_id
|
||||
$ sudo docker logs $container_id
|
||||
|
||||
Check the logs make sure it is working correctly.
|
||||
|
||||
- **"docker logs**" This will return the logs for a container
|
||||
- **$container_id** The Id of the container we want the logs for.
|
||||
- **"docker logs**" This will return the logs for a container
|
||||
- **$container_id** The Id of the container we want the logs for.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker attach --sig-proxy=false $container_id
|
||||
$ sudo docker attach --sig-proxy=false $container_id
|
||||
|
||||
Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"–sig-proxy=false"** Do not forward signals to the container;
|
||||
allows us to exit the attachment using Control-C without stopping
|
||||
the container.
|
||||
- **$container_id** The Id of the container we want to attach to.
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"–sig-proxy=false"** Do not forward signals to the container;
|
||||
allows us to exit the attachment using Control-C without stopping
|
||||
the container.
|
||||
- **$container_id** The Id of the container we want to attach to.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
sudo docker ps
|
||||
$ sudo docker ps
|
||||
|
||||
Check the process list to make sure it is running.
|
||||
|
||||
- **"docker ps"** this shows all running process managed by docker
|
||||
- **"docker ps"** this shows all running process managed by docker
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker stop $container_id
|
||||
$ sudo docker stop $container_id
|
||||
|
||||
Stop the container, since we don’t need it anymore.
|
||||
Stop the container, since we don't need it anymore.
|
||||
|
||||
- **"docker stop"** This stops a container
|
||||
- **$container_id** The Id of the container we want to stop.
|
||||
- **"docker stop"** This stops a container
|
||||
- **$container_id** The Id of the container we want to stop.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker ps
|
||||
$ sudo docker ps
|
||||
|
||||
Make sure it is really stopped.
|
||||
|
||||
|
@ -151,16 +149,14 @@ See the example in action
|
|||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
|
||||
- [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app)
|
||||
- [*Redis Service*](../running_redis_service/#running-redis-service)
|
||||
- [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service)
|
||||
- [*CouchDB
|
||||
Service*](../couchdb_data_volumes/#running-couchdb-service)
|
||||
- [*PostgreSQL Service*](../postgresql_service/#postgresql-service)
|
||||
- [*Building an Image with MongoDB*](../mongodb/#mongodb-image)
|
||||
- [*Python Web App*](../python_web_app/#python-web-app)
|
||||
The next example in the series is a [*Node.js Web App*](
|
||||
../nodejs_web_app/#nodejs-web-app) example, or you could skip to any of the
|
||||
other examples:
|
||||
|
||||
- [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app)
|
||||
- [*Redis Service*](../running_redis_service/#running-redis-service)
|
||||
- [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service)
|
||||
- [*CouchDB Service*](../couchdb_data_volumes/#running-couchdb-service)
|
||||
- [*PostgreSQL Service*](../postgresql_service/#postgresql-service)
|
||||
- [*Building an Image with MongoDB*](../mongodb/#mongodb-image)
|
||||
- [*Python Web App*](../python_web_app/#python-web-app)
|
||||
|
|
|
@ -1,181 +0,0 @@
|
|||
:title: Hello world example
|
||||
:description: A simple hello world example with Docker
|
||||
:keywords: docker, example, hello world
|
||||
|
||||
.. _running_examples:
|
||||
|
||||
Check your Docker install
|
||||
-------------------------
|
||||
|
||||
This guide assumes you have a working installation of Docker. To check
|
||||
your Docker install, run the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Check that you have a working install
|
||||
$ sudo docker info
|
||||
|
||||
If you get ``docker: command not found`` or something like
|
||||
``/var/lib/docker/repositories: permission denied`` you may have an incomplete
|
||||
Docker installation or insufficient privileges to access docker on your machine.
|
||||
|
||||
Please refer to :ref:`installation_list` for installation instructions.
|
||||
|
||||
|
||||
.. _hello_world:
|
||||
|
||||
Hello World
|
||||
-----------
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
This is the most basic example available for using Docker.
|
||||
|
||||
Download the small base image named ``busybox``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Download a busybox image
|
||||
$ sudo docker pull busybox
|
||||
|
||||
The ``busybox`` image is a minimal Linux system. You can do the same
|
||||
with any number of other images, such as ``debian``, ``ubuntu`` or ``centos``.
|
||||
The images can be found and retrieved using the `Docker index`_.
|
||||
|
||||
.. _Docker index: http://index.docker.io
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run busybox /bin/echo hello world
|
||||
|
||||
This command will run a simple ``echo`` command, that will echo ``hello world`` back to the console over standard out.
|
||||
|
||||
**Explanation:**
|
||||
|
||||
- **"sudo"** execute the following commands as user *root*
|
||||
- **"docker run"** run a command in a new container
|
||||
- **"busybox"** is the image we are running the command in.
|
||||
- **"/bin/echo"** is the command we want to run in the container
|
||||
- **"hello world"** is the input for the echo command
|
||||
|
||||
|
||||
|
||||
**Video:**
|
||||
|
||||
See the example in action
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<iframe width="560" height="400" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/7658.js"
|
||||
id="asciicast-7658" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
----
|
||||
|
||||
.. _hello_world_daemon:
|
||||
|
||||
Hello World Daemon
|
||||
------------------
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
And now for the most boring daemon ever written!
|
||||
|
||||
We will use the Ubuntu image to run a simple hello world daemon that will just print hello
|
||||
world to standard out every second. It will continue to do this until
|
||||
we stop it.
|
||||
|
||||
**Steps:**
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
||||
|
||||
We are going to run a simple hello world daemon in a new container
|
||||
made from the ``ubuntu`` image.
|
||||
|
||||
- **"sudo docker run -d "** run a command in a new container. We pass "-d"
|
||||
so it runs as a daemon.
|
||||
- **"ubuntu"** is the image we want to run the command inside of.
|
||||
- **"/bin/sh -c"** is the command we want to run in the container
|
||||
- **"while true; do echo hello world; sleep 1; done"** is the mini
|
||||
script we want to run, that will just print hello world once a
|
||||
second until we stop it.
|
||||
- **$container_id** the output of the run command will return a
|
||||
container id, we can use in future commands to see what is going on
|
||||
with this process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker logs $container_id
|
||||
|
||||
Check the logs make sure it is working correctly.
|
||||
|
||||
- **"docker logs**" This will return the logs for a container
|
||||
- **$container_id** The Id of the container we want the logs for.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker attach --sig-proxy=false $container_id
|
||||
|
||||
Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"--sig-proxy=false"** Do not forward signals to the container; allows
|
||||
us to exit the attachment using Control-C without stopping the container.
|
||||
- **$container_id** The Id of the container we want to attach to.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker ps
|
||||
|
||||
Check the process list to make sure it is running.
|
||||
|
||||
- **"docker ps"** this shows all running process managed by docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker stop $container_id
|
||||
|
||||
Stop the container, since we don't need it anymore.
|
||||
|
||||
- **"docker stop"** This stops a container
|
||||
- **$container_id** The Id of the container we want to stop.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker ps
|
||||
|
||||
Make sure it is really stopped.
|
||||
|
||||
|
||||
**Video:**
|
||||
|
||||
See the example in action
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<iframe width="560" height="400" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/2562.js"
|
||||
id="asciicast-2562" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
The next example in the series is a :ref:`nodejs_web_app` example, or
|
||||
you could skip to any of the other examples:
|
||||
|
||||
|
||||
* :ref:`nodejs_web_app`
|
||||
* :ref:`running_redis_service`
|
||||
* :ref:`running_ssh_service`
|
||||
* :ref:`running_couchdb_service`
|
||||
* :ref:`postgresql_service`
|
||||
* :ref:`mongodb_image`
|
||||
* :ref:`python_web_app`
|
|
@ -8,7 +8,7 @@ By default, Docker runs via a non-networked Unix socket. It can also
|
|||
optionally communicate using a HTTP socket.
|
||||
|
||||
If you need Docker reachable via the network in a safe manner, you can
|
||||
enable TLS by specifying the tlsverify flag and pointing Docker’s
|
||||
enable TLS by specifying the tlsverify flag and pointing Docker's
|
||||
tlscacert flag to a trusted CA certificate.
|
||||
|
||||
In daemon mode, it will only allow connections from clients
|
||||
|
@ -31,12 +31,12 @@ keys:
|
|||
Now that we have a CA, you can create a server key and certificate
|
||||
signing request. Make sure that "Common Name (e.g. server FQDN or YOUR
|
||||
name)" matches the hostname you will use to connect to Docker or just
|
||||
use ‘\*’ for a certificate valid for any hostname:
|
||||
use `\*` for a certificate valid for any hostname:
|
||||
|
||||
$ openssl genrsa -des3 -out server-key.pem
|
||||
$ openssl req -new -key server-key.pem -out server.csr
|
||||
|
||||
Next we’re going to sign the key with our CA:
|
||||
Next we're going to sign the key with our CA:
|
||||
|
||||
$ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-out server-cert.pem
|
||||
|
@ -76,7 +76,7 @@ need to provide your client keys, certificates and trusted CA:
|
|||
-H=dns-name-of-docker-host:4243
|
||||
|
||||
> **Warning**:
|
||||
> As shown in the example above, you don’t have to run the
|
||||
> As shown in the example above, you don't have to run the
|
||||
> `docker` client with `sudo` or
|
||||
> the `docker` group when you use certificate
|
||||
> authentication. That means anyone with the keys can give any
|
||||
|
@ -86,22 +86,22 @@ need to provide your client keys, certificates and trusted CA:
|
|||
|
||||
## Other modes
|
||||
|
||||
If you don’t want to have complete two-way authentication, you can run
|
||||
If you don't want to have complete two-way authentication, you can run
|
||||
Docker in various other modes by mixing the flags.
|
||||
|
||||
### Daemon modes
|
||||
|
||||
- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
|
||||
- tls, tlscert, tlskey: Do not authenticate clients
|
||||
- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
|
||||
- tls, tlscert, tlskey: Do not authenticate clients
|
||||
|
||||
### Client modes
|
||||
|
||||
- tls: Authenticate server based on public/default CA pool
|
||||
- tlsverify, tlscacert: Authenticate server based on given CA
|
||||
- tls, tlscert, tlskey: Authenticate with client certificate, do not
|
||||
authenticate server based on given CA
|
||||
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client
|
||||
certificate, authenticate server based on given CA
|
||||
- tls: Authenticate server based on public/default CA pool
|
||||
- tlsverify, tlscacert: Authenticate server based on given CA
|
||||
- tls, tlscert, tlskey: Authenticate with client certificate, do not
|
||||
authenticate server based on given CA
|
||||
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client
|
||||
certificate, authenticate server based on given CA
|
||||
|
||||
The client will send its client certificate if found, so you just need
|
||||
to drop your keys into \~/.docker/\<ca, cert or key\>.pem
|
||||
to drop your keys into ~/.docker/<ca, cert or key>.pem
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
:title: Docker HTTPS Setup
|
||||
:description: How to setup docker with https
|
||||
:keywords: docker, example, https, daemon
|
||||
|
||||
.. _running_docker_https:
|
||||
|
||||
Running Docker with https
|
||||
=========================
|
||||
|
||||
By default, Docker runs via a non-networked Unix socket. It can also optionally
|
||||
communicate using a HTTP socket.
|
||||
|
||||
If you need Docker reachable via the network in a safe manner, you can enable
|
||||
TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a
|
||||
trusted CA certificate.
|
||||
|
||||
In daemon mode, it will only allow connections from clients authenticated by a
|
||||
certificate signed by that CA. In client mode, it will only connect to servers
|
||||
with a certificate signed by that CA.
|
||||
|
||||
.. warning::
|
||||
|
||||
Using TLS and managing a CA is an advanced topic. Please make you self familiar
|
||||
with openssl, x509 and tls before using it in production.
|
||||
|
||||
Create a CA, server and client keys with OpenSSL
|
||||
------------------------------------------------
|
||||
|
||||
First, initialize the CA serial file and generate CA private and public keys:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ echo 01 > ca.srl
|
||||
$ openssl genrsa -des3 -out ca-key.pem
|
||||
$ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
|
||||
|
||||
Now that we have a CA, you can create a server key and certificate signing request.
|
||||
Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use
|
||||
to connect to Docker or just use '*' for a certificate valid for any hostname:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl genrsa -des3 -out server-key.pem
|
||||
$ openssl req -new -key server-key.pem -out server.csr
|
||||
|
||||
Next we're going to sign the key with our CA:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-out server-cert.pem
|
||||
|
||||
For client authentication, create a client key and certificate signing request:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl genrsa -des3 -out client-key.pem
|
||||
$ openssl req -new -key client-key.pem -out client.csr
|
||||
|
||||
|
||||
To make the key suitable for client authentication, create a extensions config file:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ echo extendedKeyUsage = clientAuth > extfile.cnf
|
||||
|
||||
Now sign the key:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-out client-cert.pem -extfile extfile.cnf
|
||||
|
||||
Finally you need to remove the passphrase from the client and server key:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl rsa -in server-key.pem -out server-key.pem
|
||||
$ openssl rsa -in client-key.pem -out client-key.pem
|
||||
|
||||
Now you can make the Docker daemon only accept connections from clients providing
|
||||
a certificate trusted by our CA:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
|
||||
-H=0.0.0.0:4243
|
||||
|
||||
To be able to connect to Docker and validate its certificate, you now need to provide your client keys,
|
||||
certificates and trusted CA:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
|
||||
-H=dns-name-of-docker-host:4243
|
||||
|
||||
.. warning::
|
||||
|
||||
As shown in the example above, you don't have to run the ``docker``
|
||||
client with ``sudo`` or the ``docker`` group when you use
|
||||
certificate authentication. That means anyone with the keys can
|
||||
give any instructions to your Docker daemon, giving them root
|
||||
access to the machine hosting the daemon. Guard these keys as you
|
||||
would a root password!
|
||||
|
||||
Other modes
|
||||
-----------
|
||||
If you don't want to have complete two-way authentication, you can run Docker in
|
||||
various other modes by mixing the flags.
|
||||
|
||||
Daemon modes
|
||||
~~~~~~~~~~~~
|
||||
- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
|
||||
- tls, tlscert, tlskey: Do not authenticate clients
|
||||
|
||||
Client modes
|
||||
~~~~~~~~~~~~
|
||||
- tls: Authenticate server based on public/default CA pool
|
||||
- tlsverify, tlscacert: Authenticate server based on given CA
|
||||
- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate
|
||||
server based on given CA
|
||||
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate,
|
||||
authenticate server based on given CA
|
||||
|
||||
The client will send its client certificate if found, so you just need to drop
|
||||
your keys into `~/.docker/<ca, cert or key>.pem`
|
|
@ -1,30 +0,0 @@
|
|||
:title: Docker Examples
|
||||
:description: Examples on how to use Docker
|
||||
:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples, postgresql, link
|
||||
|
||||
|
||||
.. _example_list:
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Here are some examples of how to use Docker to create running
|
||||
processes, starting from a very simple *Hello World* and progressing
|
||||
to more substantial services like those which you might find in production.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
hello_world
|
||||
nodejs_web_app
|
||||
running_redis_service
|
||||
running_ssh_service
|
||||
couchdb_data_volumes
|
||||
postgresql_service
|
||||
mongodb
|
||||
running_riak_service
|
||||
using_supervisord
|
||||
cfengine_process_management
|
||||
python_web_app
|
||||
apt-cacher-ng
|
||||
https
|
|
@ -9,57 +9,57 @@ page_keywords: docker, example, package installation, networking, mongodb
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
The goal of this example is to show how you can build your own Docker
|
||||
images with MongoDB pre-installed. We will do that by constructing a
|
||||
`Dockerfile` that downloads a base image, adds an
|
||||
Dockerfile that downloads a base image, adds an
|
||||
apt source and installs the database software on Ubuntu.
|
||||
|
||||
## Creating a `Dockerfile`
|
||||
## Creating a Dockerfile
|
||||
|
||||
Create an empty file called `Dockerfile`:
|
||||
Create an empty file called Dockerfile:
|
||||
|
||||
touch Dockerfile
|
||||
$ touch Dockerfile
|
||||
|
||||
Next, define the parent image you want to use to build your own image on
|
||||
top of. Here, we’ll use [Ubuntu](https://index.docker.io/_/ubuntu/)
|
||||
top of. Here, we'll use [Ubuntu](https://index.docker.io/_/ubuntu/)
|
||||
(tag: `latest`) available on the [docker
|
||||
index](http://index.docker.io):
|
||||
|
||||
FROM ubuntu:latest
|
||||
|
||||
Since we want to be running the latest version of MongoDB we’ll need to
|
||||
Since we want to be running the latest version of MongoDB we'll need to
|
||||
add the 10gen repo to our apt sources list.
|
||||
|
||||
# Add 10gen official apt source to the sources list
|
||||
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
|
||||
RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
|
||||
|
||||
Then, we don’t want Ubuntu to complain about init not being available so
|
||||
we’ll divert `/sbin/initctl` to
|
||||
Then, we don't want Ubuntu to complain about init not being available so
|
||||
we'll divert `/sbin/initctl` to
|
||||
`/bin/true` so it thinks everything is working.
|
||||
|
||||
# Hack for initctl not being available in Ubuntu
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -s /bin/true /sbin/initctl
|
||||
|
||||
Afterwards we’ll be able to update our apt repositories and install
|
||||
Afterwards we'll be able to update our apt repositories and install
|
||||
MongoDB
|
||||
|
||||
# Install MongoDB
|
||||
RUN apt-get update
|
||||
RUN apt-get install mongodb-10gen
|
||||
|
||||
To run MongoDB we’ll have to create the default data directory (because
|
||||
To run MongoDB we'll have to create the default data directory (because
|
||||
we want it to run without needing to provide a special configuration
|
||||
file)
|
||||
|
||||
# Create the MongoDB data directory
|
||||
RUN mkdir -p /data/db
|
||||
|
||||
Finally, we’ll expose the standard port that MongoDB runs on, 27107, as
|
||||
Finally, we'll expose the standard port that MongoDB runs on, 27107, as
|
||||
well as define an `ENTRYPOINT` instruction for the
|
||||
container.
|
||||
|
||||
|
@ -67,23 +67,23 @@ container.
|
|||
ENTRYPOINT ["usr/bin/mongod"]
|
||||
|
||||
Now, lets build the image which will go through the
|
||||
`Dockerfile` we made and run all of the commands.
|
||||
Dockerfile we made and run all of the commands.
|
||||
|
||||
sudo docker build -t <yourname>/mongodb .
|
||||
$ sudo docker build -t <yourname>/mongodb .
|
||||
|
||||
Now you should be able to run `mongod` as a daemon
|
||||
and be able to connect on the local port!
|
||||
|
||||
# Regular style
|
||||
MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
|
||||
$ MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
|
||||
|
||||
# Lean and mean
|
||||
MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
|
||||
$ MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
|
||||
|
||||
# Check the logs out
|
||||
sudo docker logs $MONGO_ID
|
||||
$ sudo docker logs $MONGO_ID
|
||||
|
||||
# Connect and play around
|
||||
mongo --port <port you get from `docker ps`>
|
||||
$ mongo --port <port you get from `docker ps`>
|
||||
|
||||
Sweet!
|
||||
|
|
|
@ -1,100 +0,0 @@
|
|||
:title: Building a Docker Image with MongoDB
|
||||
:description: How to build a Docker image with MongoDB pre-installed
|
||||
:keywords: docker, example, package installation, networking, mongodb
|
||||
|
||||
.. _mongodb_image:
|
||||
|
||||
Building an Image with MongoDB
|
||||
==============================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
The goal of this example is to show how you can build your own
|
||||
Docker images with MongoDB pre-installed. We will do that by
|
||||
constructing a ``Dockerfile`` that downloads a base image, adds an
|
||||
apt source and installs the database software on Ubuntu.
|
||||
|
||||
Creating a ``Dockerfile``
|
||||
+++++++++++++++++++++++++
|
||||
|
||||
Create an empty file called ``Dockerfile``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
touch Dockerfile
|
||||
|
||||
Next, define the parent image you want to use to build your own image on top of.
|
||||
Here, we’ll use `Ubuntu <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``)
|
||||
available on the `docker index <http://index.docker.io>`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu:latest
|
||||
|
||||
Since we want to be running the latest version of MongoDB we'll need to add the
|
||||
10gen repo to our apt sources list.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add 10gen official apt source to the sources list
|
||||
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
|
||||
RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
|
||||
|
||||
Then, we don't want Ubuntu to complain about init not being available so we'll
|
||||
divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Hack for initctl not being available in Ubuntu
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -sf /bin/true /sbin/initctl
|
||||
|
||||
Afterwards we'll be able to update our apt repositories and install MongoDB
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Install MongoDB
|
||||
RUN apt-get update
|
||||
RUN apt-get install mongodb-10gen
|
||||
|
||||
To run MongoDB we'll have to create the default data directory (because we want it to
|
||||
run without needing to provide a special configuration file)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Create the MongoDB data directory
|
||||
RUN mkdir -p /data/db
|
||||
|
||||
Finally, we'll expose the standard port that MongoDB runs on, 27107, as well as
|
||||
define an ``ENTRYPOINT`` instruction for the container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
EXPOSE 27017
|
||||
ENTRYPOINT ["usr/bin/mongod"]
|
||||
|
||||
Now, lets build the image which will go through the ``Dockerfile`` we made and
|
||||
run all of the commands.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t <yourname>/mongodb .
|
||||
|
||||
Now you should be able to run ``mongod`` as a daemon and be able to connect on
|
||||
the local port!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Regular style
|
||||
MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb)
|
||||
|
||||
# Lean and mean
|
||||
MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb --noprealloc --smallfiles)
|
||||
|
||||
# Check the logs out
|
||||
sudo docker logs $MONGO_ID
|
||||
|
||||
# Connect and play around
|
||||
mongo --port <port you get from `docker ps`>
|
||||
|
||||
Sweet!
|
|
@ -9,8 +9,8 @@ page_keywords: docker, example, package installation, node, centos
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
The goal of this example is to show you how you can build your own
|
||||
Docker images from a parent image using a `Dockerfile`
|
||||
|
@ -52,11 +52,11 @@ app using the [Express.js](http://expressjs.com/) framework:
|
|||
app.listen(PORT);
|
||||
console.log('Running on http://localhost:' + PORT);
|
||||
|
||||
In the next steps, we’ll look at how you can run this app inside a
|
||||
CentOS container using Docker. First, you’ll need to build a Docker
|
||||
In the next steps, we'll look at how you can run this app inside a
|
||||
CentOS container using Docker. First, you'll need to build a Docker
|
||||
image of your app.
|
||||
|
||||
## Creating a `Dockerfile`
|
||||
## Creating a Dockerfile
|
||||
|
||||
Create an empty file called `Dockerfile`:
|
||||
|
||||
|
@ -69,47 +69,44 @@ requires to build (this example uses Docker 0.3.4):
|
|||
# DOCKER-VERSION 0.3.4
|
||||
|
||||
Next, define the parent image you want to use to build your own image on
|
||||
top of. Here, we’ll use [CentOS](https://index.docker.io/_/centos/)
|
||||
top of. Here, we'll use [CentOS](https://index.docker.io/_/centos/)
|
||||
(tag: `6.4`) available on the [Docker
|
||||
index](https://index.docker.io/):
|
||||
|
||||
FROM centos:6.4
|
||||
|
||||
Since we’re building a Node.js app, you’ll have to install Node.js as
|
||||
Since we're building a Node.js app, you'll have to install Node.js as
|
||||
well as npm on your CentOS image. Node.js is required to run your app
|
||||
and npm to install your app’s dependencies defined in
|
||||
and npm to install your app's dependencies defined in
|
||||
`package.json`. To install the right package for
|
||||
CentOS, we’ll use the instructions from the [Node.js
|
||||
wiki](https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6):
|
||||
CentOS, we'll use the instructions from the [Node.js wiki](
|
||||
https://github.com/joyent/node/wiki/Installing-Node.js-
|
||||
via-package-manager#rhelcentosscientific-linux-6):
|
||||
|
||||
# Enable EPEL for Node.js
|
||||
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
# Install Node.js and npm
|
||||
RUN yum install -y npm
|
||||
|
||||
To bundle your app’s source code inside the Docker image, use the
|
||||
`ADD` instruction:
|
||||
To bundle your app's source code inside the Docker image, use the `ADD`
|
||||
instruction:
|
||||
|
||||
# Bundle app source
|
||||
ADD . /src
|
||||
|
||||
Install your app dependencies using the `npm`
|
||||
binary:
|
||||
Install your app dependencies using the `npm` binary:
|
||||
|
||||
# Install app dependencies
|
||||
RUN cd /src; npm install
|
||||
|
||||
Your app binds to port `8080` so you’ll use the
|
||||
`EXPOSE` instruction to have it mapped by the
|
||||
`docker` daemon:
|
||||
Your app binds to port `8080` so you'll use the` EXPOSE` instruction to have
|
||||
it mapped by the `docker` daemon:
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
Last but not least, define the command to run your app using
|
||||
`CMD` which defines your runtime, i.e.
|
||||
`node`, and the path to our app, i.e.
|
||||
`src/index.js` (see the step where we added the
|
||||
source to the container):
|
||||
Last but not least, define the command to run your app using `CMD` which
|
||||
defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js`
|
||||
(see the step where we added the source to the container):
|
||||
|
||||
CMD ["node", "/src/index.js"]
|
||||
|
||||
|
@ -133,72 +130,68 @@ Your `Dockerfile` should now look like this:
|
|||
|
||||
## Building your image
|
||||
|
||||
Go to the directory that has your `Dockerfile` and
|
||||
run the following command to build a Docker image. The `-t`
|
||||
flag let’s you tag your image so it’s easier to find later
|
||||
using the `docker images` command:
|
||||
Go to the directory that has your `Dockerfile` and run the following command
|
||||
to build a Docker image. The `-t` flag let's you tag your image so it's easier
|
||||
to find later using the `docker images` command:
|
||||
|
||||
sudo docker build -t <your username>/centos-node-hello .
|
||||
$ sudo docker build -t <your username>/centos-node-hello .
|
||||
|
||||
Your image will now be listed by Docker:
|
||||
|
||||
sudo docker images
|
||||
$ sudo docker images
|
||||
|
||||
> # Example
|
||||
> REPOSITORY TAG ID CREATED
|
||||
> centos 6.4 539c0211cd76 8 weeks ago
|
||||
> gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
|
||||
# Example
|
||||
REPOSITORY TAG ID CREATED
|
||||
centos 6.4 539c0211cd76 8 weeks ago
|
||||
gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
|
||||
|
||||
## Run the image
|
||||
|
||||
Running your image with `-d` runs the container in
|
||||
detached mode, leaving the container running in the background. The
|
||||
`-p` flag redirects a public port to a private port
|
||||
in the container. Run the image you previously built:
|
||||
Running your image with `-d` runs the container in detached mode, leaving the
|
||||
container running in the background. The `-p` flag redirects a public port to
|
||||
a private port in the container. Run the image you previously built:
|
||||
|
||||
sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
|
||||
$ sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
|
||||
|
||||
Print the output of your app:
|
||||
|
||||
# Get container ID
|
||||
sudo docker ps
|
||||
$ sudo docker ps
|
||||
|
||||
# Print app output
|
||||
sudo docker logs <container id>
|
||||
$ sudo docker logs <container id>
|
||||
|
||||
> # Example
|
||||
> Running on http://localhost:8080
|
||||
# Example
|
||||
Running on http://localhost:8080
|
||||
|
||||
## Test
|
||||
|
||||
To test your app, get the the port of your app that Docker mapped:
|
||||
|
||||
sudo docker ps
|
||||
$ sudo docker ps
|
||||
|
||||
> # Example
|
||||
> ID IMAGE COMMAND ... PORTS
|
||||
> ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
|
||||
# Example
|
||||
ID IMAGE COMMAND ... PORTS
|
||||
ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
|
||||
|
||||
In the example above, Docker mapped the `8080` port
|
||||
of the container to `49160`.
|
||||
In the example above, Docker mapped the `8080` port of the container to `49160`.
|
||||
|
||||
Now you can call your app using `curl` (install if
|
||||
needed via: `sudo apt-get install curl`):
|
||||
Now you can call your app using `curl` (install if needed via:
|
||||
`sudo apt-get install curl`):
|
||||
|
||||
curl -i localhost:49160
|
||||
$ curl -i localhost:49160
|
||||
|
||||
> HTTP/1.1 200 OK
|
||||
> X-Powered-By: Express
|
||||
> Content-Type: text/html; charset=utf-8
|
||||
> Content-Length: 12
|
||||
> Date: Sun, 02 Jun 2013 03:53:22 GMT
|
||||
> Connection: keep-alive
|
||||
>
|
||||
> Hello World
|
||||
HTTP/1.1 200 OK
|
||||
X-Powered-By: Express
|
||||
Content-Type: text/html; charset=utf-8
|
||||
Content-Length: 12
|
||||
Date: Sun, 02 Jun 2013 03:53:22 GMT
|
||||
Connection: keep-alive
|
||||
|
||||
Hello World
|
||||
|
||||
We hope this tutorial helped you get up and running with Node.js and
|
||||
CentOS on Docker. You can get the full source code at
|
||||
[https://github.com/gasi/docker-node-hello](https://github.com/gasi/docker-node-hello).
|
||||
|
||||
Continue to [*Redis
|
||||
Service*](../running_redis_service/#running-redis-service).
|
||||
Continue to [*Redis Service*](../running_redis_service/#running-redis-service).
|
||||
|
|
|
@ -1,239 +0,0 @@
|
|||
:title: Running a Node.js app on CentOS
|
||||
:description: Installing and running a Node.js app on CentOS
|
||||
:keywords: docker, example, package installation, node, centos
|
||||
|
||||
.. _nodejs_web_app:
|
||||
|
||||
Node.js Web App
|
||||
===============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
The goal of this example is to show you how you can build your own
|
||||
Docker images from a parent image using a ``Dockerfile`` . We will do
|
||||
that by making a simple Node.js hello world web application running on
|
||||
CentOS. You can get the full source code at
|
||||
https://github.com/gasi/docker-node-hello.
|
||||
|
||||
Create Node.js app
|
||||
++++++++++++++++++
|
||||
|
||||
First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its
|
||||
dependencies:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"name": "docker-centos-hello",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"description": "Node.js Hello World app on CentOS using docker",
|
||||
"author": "Daniel Gasienica <daniel@gasienica.ch>",
|
||||
"dependencies": {
|
||||
"express": "3.2.4"
|
||||
}
|
||||
}
|
||||
|
||||
Then, create an ``index.js`` file that defines a web app using the
|
||||
`Express.js <http://expressjs.com/>`_ framework:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
var express = require('express');
|
||||
|
||||
// Constants
|
||||
var PORT = 8080;
|
||||
|
||||
// App
|
||||
var app = express();
|
||||
app.get('/', function (req, res) {
|
||||
res.send('Hello World\n');
|
||||
});
|
||||
|
||||
app.listen(PORT);
|
||||
console.log('Running on http://localhost:' + PORT);
|
||||
|
||||
|
||||
In the next steps, we’ll look at how you can run this app inside a CentOS
|
||||
container using Docker. First, you’ll need to build a Docker image of your app.
|
||||
|
||||
Creating a ``Dockerfile``
|
||||
+++++++++++++++++++++++++
|
||||
|
||||
Create an empty file called ``Dockerfile``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
touch Dockerfile
|
||||
|
||||
Open the ``Dockerfile`` in your favorite text editor and add the following line
|
||||
that defines the version of Docker the image requires to build
|
||||
(this example uses Docker 0.3.4):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# DOCKER-VERSION 0.3.4
|
||||
|
||||
Next, define the parent image you want to use to build your own image on top of.
|
||||
Here, we’ll use `CentOS <https://index.docker.io/_/centos/>`_ (tag: ``6.4``)
|
||||
available on the `Docker index`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM centos:6.4
|
||||
|
||||
Since we’re building a Node.js app, you’ll have to install Node.js as well as
|
||||
npm on your CentOS image. Node.js is required to run your app and npm to install
|
||||
your app’s dependencies defined in ``package.json``.
|
||||
To install the right package for CentOS, we’ll use the instructions from the
|
||||
`Node.js wiki`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Enable EPEL for Node.js
|
||||
RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
# Install Node.js and npm
|
||||
RUN yum install -y npm
|
||||
|
||||
To bundle your app’s source code inside the Docker image, use the ``ADD``
|
||||
instruction:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Bundle app source
|
||||
ADD . /src
|
||||
|
||||
Install your app dependencies using the ``npm`` binary:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Install app dependencies
|
||||
RUN cd /src; npm install
|
||||
|
||||
Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` instruction
|
||||
to have it mapped by the ``docker`` daemon:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
Last but not least, define the command to run your app using ``CMD``
|
||||
which defines your runtime, i.e. ``node``, and the path to our app,
|
||||
i.e. ``src/index.js`` (see the step where we added the source to the
|
||||
container):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CMD ["node", "/src/index.js"]
|
||||
|
||||
Your ``Dockerfile`` should now look like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
# DOCKER-VERSION 0.3.4
|
||||
FROM centos:6.4
|
||||
|
||||
# Enable EPEL for Node.js
|
||||
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
# Install Node.js and npm
|
||||
RUN yum install -y npm
|
||||
|
||||
# Bundle app source
|
||||
ADD . /src
|
||||
# Install app dependencies
|
||||
RUN cd /src; npm install
|
||||
|
||||
EXPOSE 8080
|
||||
CMD ["node", "/src/index.js"]
|
||||
|
||||
|
||||
Building your image
|
||||
+++++++++++++++++++
|
||||
|
||||
Go to the directory that has your ``Dockerfile`` and run the following
|
||||
command to build a Docker image. The ``-t`` flag let’s you tag your
|
||||
image so it’s easier to find later using the ``docker images``
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t <your username>/centos-node-hello .
|
||||
|
||||
Your image will now be listed by Docker:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker images
|
||||
|
||||
> # Example
|
||||
> REPOSITORY TAG ID CREATED
|
||||
> centos 6.4 539c0211cd76 8 weeks ago
|
||||
> gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
|
||||
|
||||
|
||||
Run the image
|
||||
+++++++++++++
|
||||
|
||||
Running your image with ``-d`` runs the container in detached mode, leaving the
|
||||
container running in the background. The ``-p`` flag redirects a public port to a private port in the container. Run the image you previously built:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
|
||||
|
||||
Print the output of your app:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Get container ID
|
||||
sudo docker ps
|
||||
|
||||
# Print app output
|
||||
sudo docker logs <container id>
|
||||
|
||||
> # Example
|
||||
> Running on http://localhost:8080
|
||||
|
||||
|
||||
Test
|
||||
++++
|
||||
|
||||
To test your app, get the the port of your app that Docker mapped:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker ps
|
||||
|
||||
> # Example
|
||||
> ID IMAGE COMMAND ... PORTS
|
||||
> ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
|
||||
|
||||
In the example above, Docker mapped the ``8080`` port of the container to
|
||||
``49160``.
|
||||
|
||||
Now you can call your app using ``curl`` (install if needed via:
|
||||
``sudo apt-get install curl``):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -i localhost:49160
|
||||
|
||||
> HTTP/1.1 200 OK
|
||||
> X-Powered-By: Express
|
||||
> Content-Type: text/html; charset=utf-8
|
||||
> Content-Length: 12
|
||||
> Date: Sun, 02 Jun 2013 03:53:22 GMT
|
||||
> Connection: keep-alive
|
||||
>
|
||||
> Hello World
|
||||
|
||||
We hope this tutorial helped you get up and running with Node.js and
|
||||
CentOS on Docker. You can get the full source code at
|
||||
https://github.com/gasi/docker-node-hello.
|
||||
|
||||
Continue to :ref:`running_redis_service`.
|
||||
|
||||
|
||||
.. _Node.js wiki: https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6
|
||||
.. _docker index: https://index.docker.io/
|
|
@ -9,13 +9,13 @@ page_keywords: docker, example, package installation, postgresql
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
## Installing PostgreSQL on Docker
|
||||
|
||||
Assuming there is no Docker image that suits your needs in [the
|
||||
index](http://index.docker.io), you can create one yourself.
|
||||
Assuming there is no Docker image that suits your needs in [the index](
|
||||
http://index.docker.io), you can create one yourself.
|
||||
|
||||
Start by creating a new Dockerfile:
|
||||
|
||||
|
@ -25,7 +25,7 @@ Start by creating a new Dockerfile:
|
|||
> suitably secure.
|
||||
|
||||
#
|
||||
# example Dockerfile for http://docs.docker.io/en/latest/examples/postgresql_service/
|
||||
# example Dockerfile for http://docs.docker.io/examples/postgresql_service/
|
||||
#
|
||||
|
||||
FROM ubuntu
|
||||
|
@ -87,7 +87,7 @@ And run the PostgreSQL server container (in the foreground):
|
|||
$ sudo docker run -rm -P -name pg_test eg_postgresql
|
||||
|
||||
There are 2 ways to connect to the PostgreSQL server. We can use [*Link
|
||||
Containers*](../../use/working_with_links_names/#working-with-links-names),
|
||||
Containers*](/use/working_with_links_names/#working-with-links-names),
|
||||
or we can access it from our host (or the network).
|
||||
|
||||
> **Note**:
|
||||
|
@ -96,8 +96,8 @@ or we can access it from our host (or the network).
|
|||
|
||||
### Using container linking
|
||||
|
||||
Containers can be linked to another container’s ports directly using
|
||||
`-link remote_name:local_alias` in the client’s
|
||||
Containers can be linked to another container's ports directly using
|
||||
`-link remote_name:local_alias` in the client's
|
||||
`docker run`. This will set a number of environment
|
||||
variables that can then be used to connect:
|
||||
|
||||
|
@ -125,14 +125,14 @@ prompt, you can create a table and populate it.
|
|||
psql (9.3.1)
|
||||
Type "help" for help.
|
||||
|
||||
docker=# CREATE TABLE cities (
|
||||
$ docker=# CREATE TABLE cities (
|
||||
docker(# name varchar(80),
|
||||
docker(# location point
|
||||
docker(# );
|
||||
CREATE TABLE
|
||||
docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)');
|
||||
$ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)');
|
||||
INSERT 0 1
|
||||
docker=# select * from cities;
|
||||
$ docker=# select * from cities;
|
||||
name | location
|
||||
---------------+-----------
|
||||
San Francisco | (-194,53)
|
||||
|
@ -143,7 +143,7 @@ prompt, you can create a table and populate it.
|
|||
You can use the defined volumes to inspect the PostgreSQL log files and
|
||||
to backup your configuration and data:
|
||||
|
||||
docker run -rm --volumes-from pg_test -t -i busybox sh
|
||||
$ docker run -rm --volumes-from pg_test -t -i busybox sh
|
||||
|
||||
/ # ls
|
||||
bin etc lib linuxrc mnt proc run sys usr
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
:title: PostgreSQL service How-To
|
||||
:description: Running and installing a PostgreSQL service
|
||||
:keywords: docker, example, package installation, postgresql
|
||||
|
||||
.. _postgresql_service:
|
||||
|
||||
PostgreSQL Service
|
||||
==================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
Installing PostgreSQL on Docker
|
||||
-------------------------------
|
||||
|
||||
Assuming there is no Docker image that suits your needs in `the index`_, you
|
||||
can create one yourself.
|
||||
|
||||
.. _the index: http://index.docker.io
|
||||
|
||||
Start by creating a new Dockerfile:
|
||||
|
||||
.. note::
|
||||
|
||||
This PostgreSQL setup is for development only purposes. Refer
|
||||
to the PostgreSQL documentation to fine-tune these settings so that it
|
||||
is suitably secure.
|
||||
|
||||
.. literalinclude:: postgresql_service.Dockerfile
|
||||
|
||||
Build an image from the Dockerfile assign it a name.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker build -t eg_postgresql .
|
||||
|
||||
And run the PostgreSQL server container (in the foreground):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run --rm -P --name pg_test eg_postgresql
|
||||
|
||||
There are 2 ways to connect to the PostgreSQL server. We can use
|
||||
:ref:`working_with_links_names`, or we can access it from our host (or the network).
|
||||
|
||||
.. note:: The ``--rm`` removes the container and its image when the container
|
||||
exists successfully.
|
||||
|
||||
Using container linking
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Containers can be linked to another container's ports directly using
|
||||
``--link remote_name:local_alias`` in the client's ``docker run``. This will
|
||||
set a number of environment variables that can then be used to connect:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
|
||||
|
||||
postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
|
||||
|
||||
Connecting from your host system
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Assuming you have the postgresql-client installed, you can use the host-mapped port
|
||||
to test as well. You need to use ``docker ps`` to find out what local host port the
|
||||
container is mapped to first:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test
|
||||
$ psql -h localhost -p 49153 -d docker -U docker --password
|
||||
|
||||
Testing the database
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once you have authenticated and have a ``docker =#`` prompt, you can
|
||||
create a table and populate it.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
psql (9.3.1)
|
||||
Type "help" for help.
|
||||
|
||||
docker=# CREATE TABLE cities (
|
||||
docker(# name varchar(80),
|
||||
docker(# location point
|
||||
docker(# );
|
||||
CREATE TABLE
|
||||
docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)');
|
||||
INSERT 0 1
|
||||
docker=# select * from cities;
|
||||
name | location
|
||||
---------------+-----------
|
||||
San Francisco | (-194,53)
|
||||
(1 row)
|
||||
|
||||
Using the container volumes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can use the defined volumes to inspect the PostgreSQL log files and to backup your
|
||||
configuration and data:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run --rm --volumes-from pg_test -t -i busybox sh
|
||||
|
||||
/ # ls
|
||||
bin etc lib linuxrc mnt proc run sys usr
|
||||
dev home lib64 media opt root sbin tmp var
|
||||
/ # ls /etc/postgresql/9.3/main/
|
||||
environment pg_hba.conf postgresql.conf
|
||||
pg_ctl.conf pg_ident.conf start.conf
|
||||
/tmp # ls /var/log
|
||||
ldconfig postgresql
|
||||
|
|
@ -9,8 +9,8 @@ page_keywords: docker, example, python, web app
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
While using Dockerfiles is the preferred way to create maintainable and
|
||||
repeatable images, its useful to know how you can try things out and
|
||||
|
@ -18,13 +18,13 @@ then commit your live changes to an image.
|
|||
|
||||
The goal of this example is to show you how you can modify your own
|
||||
Docker images by making changes to a running container, and then saving
|
||||
the results as a new image. We will do that by making a simple ‘hello
|
||||
world’ Flask web application image.
|
||||
the results as a new image. We will do that by making a simple `hello
|
||||
world` Flask web application image.
|
||||
|
||||
## Download the initial image
|
||||
|
||||
Download the `shykes/pybuilder` Docker image from
|
||||
the `http://index.docker.io` registry.
|
||||
Download the `shykes/pybuilder` Docker image from the `http://index.docker.io`
|
||||
registry.
|
||||
|
||||
This image contains a `buildapp` script to download
|
||||
the web app and then `pip install` any required
|
||||
|
@ -36,7 +36,7 @@ modules, and a `runapp` script that finds the
|
|||
> **Note**:
|
||||
> This container was built with a very old version of docker (May 2013 -
|
||||
> see [shykes/pybuilder](https://github.com/shykes/pybuilder) ), when the
|
||||
> `Dockerfile` format was different, but the image can
|
||||
> Dockerfile format was different, but the image can
|
||||
> still be used now.
|
||||
|
||||
## Interactively make some modifications
|
||||
|
@ -49,7 +49,7 @@ the `$URL` variable. The container is given a name
|
|||
`pybuilder_run` which we will use in the next steps.
|
||||
|
||||
While this example is simple, you could run any number of interactive
|
||||
commands, try things out, and then exit when you’re done.
|
||||
commands, try things out, and then exit when you're done.
|
||||
|
||||
$ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash
|
||||
|
||||
|
@ -76,11 +76,11 @@ mapped to a local port
|
|||
|
||||
$ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp
|
||||
|
||||
- **"docker run -d "** run a command in a new container. We pass "-d"
|
||||
so it runs as a daemon.
|
||||
- **"-p 5000"** the web app is going to listen on this port, so it
|
||||
must be mapped from the container to the host system.
|
||||
- **/usr/local/bin/runapp** is the command which starts the web app.
|
||||
- **"docker run -d "** run a command in a new container. We pass "-d"
|
||||
so it runs as a daemon.
|
||||
- **"-p 5000"** the web app is going to listen on this port, so it
|
||||
must be mapped from the container to the host system.
|
||||
- **/usr/local/bin/runapp** is the command which starts the web app.
|
||||
|
||||
## View the container logs
|
||||
|
||||
|
@ -93,7 +93,7 @@ another terminal and continue with the example while watching the result
|
|||
in the logs.
|
||||
|
||||
$ sudo docker logs -f web_worker
|
||||
* Running on http://0.0.0.0:5000/
|
||||
* Running on http://0.0.0.0:5000/
|
||||
|
||||
## See the webapp output
|
||||
|
||||
|
@ -117,7 +117,7 @@ everything worked as planned you should see the line
|
|||
|
||||
List `--all` the Docker containers. If this
|
||||
container had already finished running, it will still be listed here
|
||||
with a status of ‘Exit 0’.
|
||||
with a status of `Exit 0`.
|
||||
|
||||
$ sudo docker stop web_worker
|
||||
$ sudo docker rm web_worker pybuilder_run
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
:title: Python Web app example
|
||||
:description: Building your own python web app using docker
|
||||
:keywords: docker, example, python, web app
|
||||
|
||||
.. _python_web_app:
|
||||
|
||||
Python Web App
|
||||
==============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
While using Dockerfiles is the preferred way to create maintainable
|
||||
and repeatable images, its useful to know how you can try things out
|
||||
and then commit your live changes to an image.
|
||||
|
||||
The goal of this example is to show you how you can modify your own
|
||||
Docker images by making changes to a running
|
||||
container, and then saving the results as a new image. We will do
|
||||
that by making a simple 'hello world' Flask web application image.
|
||||
|
||||
Download the initial image
|
||||
--------------------------
|
||||
|
||||
Download the ``shykes/pybuilder`` Docker image from the ``http://index.docker.io``
|
||||
registry.
|
||||
|
||||
This image contains a ``buildapp`` script to download the web app and then ``pip install``
|
||||
any required modules, and a ``runapp`` script that finds the ``app.py`` and runs it.
|
||||
|
||||
.. _`shykes/pybuilder`: https://github.com/shykes/pybuilder
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker pull shykes/pybuilder
|
||||
|
||||
.. note:: This container was built with a very old version of docker
|
||||
(May 2013 - see `shykes/pybuilder`_ ), when the ``Dockerfile`` format was different,
|
||||
but the image can still be used now.
|
||||
|
||||
Interactively make some modifications
|
||||
-------------------------------------
|
||||
|
||||
We then start a new container running interactively using the image.
|
||||
First, we set a ``URL`` variable that points to a tarball of a simple
|
||||
helloflask web app, and then we run a command contained in the image called
|
||||
``buildapp``, passing it the ``$URL`` variable. The container is
|
||||
given a name ``pybuilder_run`` which we will use in the next steps.
|
||||
|
||||
While this example is simple, you could run any number of interactive commands,
|
||||
try things out, and then exit when you're done.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash
|
||||
|
||||
$$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
|
||||
$$ /usr/local/bin/buildapp $URL
|
||||
[...]
|
||||
$$ exit
|
||||
|
||||
Commit the container to create a new image
|
||||
------------------------------------------
|
||||
|
||||
Save the changes we just made in the container to a new image called
|
||||
``/builds/github.com/shykes/helloflask/master``. You now have 3 different
|
||||
ways to refer to the container: name ``pybuilder_run``, short-id ``c8b2e8228f11``, or
|
||||
long-id ``c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker commit pybuilder_run /builds/github.com/shykes/helloflask/master
|
||||
c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9
|
||||
|
||||
|
||||
Run the new image to start the web worker
|
||||
-----------------------------------------
|
||||
|
||||
Use the new image to create a new container with
|
||||
network port 5000 mapped to a local port
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp
|
||||
|
||||
|
||||
- **"docker run -d "** run a command in a new container. We pass "-d"
|
||||
so it runs as a daemon.
|
||||
- **"-p 5000"** the web app is going to listen on this port, so it
|
||||
must be mapped from the container to the host system.
|
||||
- **/usr/local/bin/runapp** is the command which starts the web app.
|
||||
|
||||
|
||||
View the container logs
|
||||
-----------------------
|
||||
|
||||
View the logs for the new ``web_worker`` container and
|
||||
if everything worked as planned you should see the line ``Running on
|
||||
http://0.0.0.0:5000/`` in the log output.
|
||||
|
||||
To exit the view without stopping the container, hit Ctrl-C, or open another
|
||||
terminal and continue with the example while watching the result in the logs.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker logs -f web_worker
|
||||
* Running on http://0.0.0.0:5000/
|
||||
|
||||
|
||||
See the webapp output
|
||||
---------------------
|
||||
|
||||
Look up the public-facing port which is NAT-ed. Find the private port
|
||||
used by the container and store it inside of the ``WEB_PORT`` variable.
|
||||
|
||||
Access the web app using the ``curl`` binary. If everything worked as planned you
|
||||
should see the line ``Hello world!`` inside of your console.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ WEB_PORT=$(sudo docker port web_worker 5000 | awk -F: '{ print $2 }')
|
||||
|
||||
# install curl if necessary, then ...
|
||||
$ curl http://127.0.0.1:$WEB_PORT
|
||||
Hello world!
|
||||
|
||||
|
||||
Clean up example containers and images
|
||||
--------------------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker ps --all
|
||||
|
||||
List ``--all`` the Docker containers. If this container had already finished
|
||||
running, it will still be listed here with a status of 'Exit 0'.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker stop web_worker
|
||||
$ sudo docker rm web_worker pybuilder_run
|
||||
$ sudo docker rmi /builds/github.com/shykes/helloflask/master shykes/pybuilder:latest
|
||||
|
||||
And now stop the running web worker, and delete the containers, so that we can
|
||||
then delete the images that we used.
|
||||
|
|
@ -9,8 +9,8 @@ page_keywords: docker, example, package installation, networking, redis
|
|||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
> - **If you don't like sudo** then see [*Giving non-root
|
||||
> access*](/installation/binaries/#dockergroup)
|
||||
|
||||
Very simple, no frills, Redis service attached to a web application
|
||||
using a link.
|
||||
|
@ -29,44 +29,42 @@ image.
|
|||
Next we build an image from our `Dockerfile`.
|
||||
Replace `<your username>` with your own user name.
|
||||
|
||||
sudo docker build -t <your username>/redis .
|
||||
$ sudo docker build -t <your username>/redis .
|
||||
|
||||
## Run the service
|
||||
|
||||
Use the image we’ve just created and name your container
|
||||
`redis`.
|
||||
Use the image we've just created and name your container `redis`.
|
||||
|
||||
Running the service with `-d` runs the container in
|
||||
detached mode, leaving the container running in the background.
|
||||
Running the service with `-d` runs the container in detached mode, leaving
|
||||
the container running in the background.
|
||||
|
||||
Importantly, we’re not exposing any ports on our container. Instead
|
||||
we’re going to use a container link to provide access to our Redis
|
||||
Importantly, we're not exposing any ports on our container. Instead
|
||||
we're going to use a container link to provide access to our Redis
|
||||
database.
|
||||
|
||||
sudo docker run --name redis -d <your username>/redis
|
||||
$ sudo docker run --name redis -d <your username>/redis
|
||||
|
||||
## Create your web application container
|
||||
|
||||
Next we can create a container for our application. We’re going to use
|
||||
the `-link` flag to create a link to the
|
||||
`redis` container we’ve just created with an alias
|
||||
of `db`. This will create a secure tunnel to the
|
||||
`redis` container and expose the Redis instance
|
||||
running inside that container to only this container.
|
||||
Next we can create a container for our application. We're going to use
|
||||
the `-link` flag to create a link to the `redis` container we've just
|
||||
created with an alias of `db`. This will create a secure tunnel to the
|
||||
`redis` container and expose the Redis instance running inside that
|
||||
container to only this container.
|
||||
|
||||
sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash
|
||||
$ sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash
|
||||
|
||||
Once inside our freshly created container we need to install Redis to
|
||||
get the `redis-cli` binary to test our connection.
|
||||
|
||||
apt-get update
|
||||
apt-get -y install redis-server
|
||||
service redis-server stop
|
||||
$ apt-get update
|
||||
$ apt-get -y install redis-server
|
||||
$ service redis-server stop
|
||||
|
||||
As we’ve used the `--link redis:db` option, Docker
|
||||
As we've used the `--link redis:db` option, Docker
|
||||
has created some environment variables in our web application container.
|
||||
|
||||
env | grep DB_
|
||||
$ env | grep DB_
|
||||
|
||||
# Should return something similar to this with your values
|
||||
DB_NAME=/violet_wolf/db
|
||||
|
@ -76,19 +74,18 @@ has created some environment variables in our web application container.
|
|||
DB_PORT_6379_TCP_ADDR=172.17.0.33
|
||||
DB_PORT_6379_TCP_PROTO=tcp
|
||||
|
||||
We can see that we’ve got a small list of environment variables prefixed
|
||||
with `DB`. The `DB` comes from
|
||||
the link alias specified when we launched the container. Let’s use the
|
||||
`DB_PORT_6379_TCP_ADDR` variable to connect to our
|
||||
Redis container.
|
||||
We can see that we've got a small list of environment variables prefixed
|
||||
with `DB`. The `DB` comes from the link alias specified when we launched
|
||||
the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to
|
||||
our Redis container.
|
||||
|
||||
redis-cli -h $DB_PORT_6379_TCP_ADDR
|
||||
redis 172.17.0.33:6379>
|
||||
redis 172.17.0.33:6379> set docker awesome
|
||||
$ redis-cli -h $DB_PORT_6379_TCP_ADDR
|
||||
$ redis 172.17.0.33:6379>
|
||||
$ redis 172.17.0.33:6379> set docker awesome
|
||||
OK
|
||||
redis 172.17.0.33:6379> get docker
|
||||
$ redis 172.17.0.33:6379> get docker
|
||||
"awesome"
|
||||
redis 172.17.0.33:6379> exit
|
||||
$ redis 172.17.0.33:6379> exit
|
||||
|
||||
We could easily use this or other environment variables in our web
|
||||
application to make a connection to our `redis`
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue