Merge branch 'master' into load-profile
Conflicts: daemon/execdriver/native/create.go daemon/execdriver/native/driver.go Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes <guillaume@charmes.net> (github: creack)
This commit is contained in:
commit
813cebc64f
277 changed files with 50785 additions and 2480 deletions
|
@ -82,6 +82,9 @@ RUN go get code.google.com/p/go.tools/cmd/cover
|
|||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
|
||||
|
||||
# Get the "busybox" image source so we can build locally instead of pulling
|
||||
RUN git clone https://github.com/jpetazzo/docker-busybox.git /docker-busybox
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
|
|
15
Makefile
15
Makefile
|
@ -1,4 +1,4 @@
|
|||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli validate
|
||||
|
||||
# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
|
||||
BINDDIR := bundles
|
||||
|
@ -11,7 +11,8 @@ DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
|||
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
|
||||
DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)"
|
||||
# to allow `make DOCSDIR=docs docs-shell`
|
||||
DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
|
||||
|
||||
default: binary
|
||||
|
||||
|
@ -25,10 +26,13 @@ cross: build
|
|||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs: docs-build
|
||||
$(DOCKER_RUN_DOCS)
|
||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" mkdocs serve
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) bash
|
||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" bash
|
||||
|
||||
docs-release: docs-build
|
||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli
|
||||
|
@ -39,6 +43,9 @@ test-integration: build
|
|||
test-integration-cli: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
|
||||
|
||||
validate: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@ package builtins
|
|||
|
||||
import (
|
||||
api "github.com/dotcloud/docker/api/server"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/server"
|
||||
)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver/devmapper"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver/devmapper"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
|
82
contrib/mkimage-alpine.sh
Executable file
82
contrib/mkimage-alpine.sh
Executable file
|
@ -0,0 +1,82 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
[ $(id -u) -eq 0 ] || {
|
||||
printf >&2 '%s requires root\n' "$0"
|
||||
exit 1
|
||||
}
|
||||
|
||||
usage() {
|
||||
printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0"
|
||||
exit 1
|
||||
}
|
||||
|
||||
tmp() {
|
||||
TMP=$(mktemp -d /tmp/alpine-docker-XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d /tmp/alpine-docker-rootfs-XXXXXXXXXX)
|
||||
trap "rm -rf $TMP $ROOTFS" EXIT TERM INT
|
||||
}
|
||||
|
||||
apkv() {
|
||||
curl -s $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
|
||||
grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
|
||||
}
|
||||
|
||||
getapk() {
|
||||
curl -s $REPO/$ARCH/apk-tools-static-$(apkv).apk |
|
||||
tar -xz -C $TMP sbin/apk.static
|
||||
}
|
||||
|
||||
mkbase() {
|
||||
$TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \
|
||||
--root $ROOTFS --initdb add alpine-base
|
||||
}
|
||||
|
||||
conf() {
|
||||
printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories
|
||||
}
|
||||
|
||||
pack() {
|
||||
local id
|
||||
id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL)
|
||||
|
||||
docker tag $id alpine:latest
|
||||
docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id
|
||||
}
|
||||
|
||||
save() {
|
||||
[ $SAVE -eq 1 ] || return
|
||||
|
||||
tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz
|
||||
}
|
||||
|
||||
while getopts "hr:m:s" opt; do
|
||||
case $opt in
|
||||
r)
|
||||
REL=$OPTARG
|
||||
;;
|
||||
m)
|
||||
MIRROR=$OPTARG
|
||||
;;
|
||||
s)
|
||||
SAVE=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
REL=${REL:-edge}
|
||||
MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine}
|
||||
SAVE=${SAVE:-0}
|
||||
REPO=$MIRROR/$REL/main
|
||||
ARCH=$(uname -m)
|
||||
|
||||
tmp
|
||||
getapk
|
||||
mkbase
|
||||
conf
|
||||
pack
|
||||
save
|
|
@ -57,6 +57,7 @@ mknod -m 666 $DEV/tty0 c 4 0
|
|||
mknod -m 666 $DEV/full c 1 7
|
||||
mknod -m 600 $DEV/initctl p
|
||||
mknod -m 666 $DEV/ptmx c 5 2
|
||||
ln -sf /proc/self/fd $DEV/fd
|
||||
|
||||
tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
|
||||
docker run -i -t archlinux echo Success.
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/links"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -64,7 +64,7 @@ type Container struct {
|
|||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
|
||||
runtime *Runtime
|
||||
daemon *Daemon
|
||||
|
||||
waitLock chan struct{}
|
||||
Volumes map[string]string
|
||||
|
@ -76,42 +76,6 @@ type Container struct {
|
|||
activeLinks map[string]*links.Link
|
||||
}
|
||||
|
||||
// FIXME: move deprecated port stuff to nat to clean up the core.
|
||||
type PortMapping map[string]string // Deprecated
|
||||
|
||||
type NetworkSettings struct {
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
Gateway string
|
||||
Bridge string
|
||||
PortMapping map[string]PortMapping // Deprecated
|
||||
Ports nat.PortMap
|
||||
}
|
||||
|
||||
func (settings *NetworkSettings) PortMappingAPI() *engine.Table {
|
||||
var outs = engine.NewTable("", 0)
|
||||
for port, bindings := range settings.Ports {
|
||||
p, _ := nat.ParsePort(port.Port())
|
||||
if len(bindings) == 0 {
|
||||
out := &engine.Env{}
|
||||
out.SetInt("PublicPort", p)
|
||||
out.Set("Type", port.Proto())
|
||||
outs.Add(out)
|
||||
continue
|
||||
}
|
||||
for _, binding := range bindings {
|
||||
out := &engine.Env{}
|
||||
h, _ := nat.ParsePort(binding.HostPort)
|
||||
out.SetInt("PrivatePort", p)
|
||||
out.SetInt("PublicPort", h)
|
||||
out.Set("Type", port.Proto())
|
||||
out.Set("IP", binding.HostIp)
|
||||
outs.Add(out)
|
||||
}
|
||||
}
|
||||
return outs
|
||||
}
|
||||
|
||||
// Inject the io.Reader at the given path. Note: do not close the reader
|
||||
func (container *Container) Inject(file io.Reader, pth string) error {
|
||||
if err := container.Mount(); err != nil {
|
||||
|
@ -148,10 +112,6 @@ func (container *Container) Inject(file io.Reader, pth string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) When() time.Time {
|
||||
return container.Created
|
||||
}
|
||||
|
||||
func (container *Container) FromDisk() error {
|
||||
data, err := ioutil.ReadFile(container.jsonPath())
|
||||
if err != nil {
|
||||
|
@ -358,14 +318,14 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
|||
})
|
||||
}
|
||||
|
||||
func populateCommand(c *Container) {
|
||||
func populateCommand(c *Container, env []string) {
|
||||
var (
|
||||
en *execdriver.Network
|
||||
driverConfig = make(map[string][]string)
|
||||
)
|
||||
|
||||
en = &execdriver.Network{
|
||||
Mtu: c.runtime.config.Mtu,
|
||||
Mtu: c.daemon.config.Mtu,
|
||||
Interface: nil,
|
||||
}
|
||||
|
||||
|
@ -402,18 +362,7 @@ func populateCommand(c *Container) {
|
|||
Resources: resources,
|
||||
}
|
||||
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
}
|
||||
|
||||
func (container *Container) ArgsAsString() string {
|
||||
var args []string
|
||||
for _, arg := range container.Args {
|
||||
if strings.Contains(arg, " ") {
|
||||
args = append(args, fmt.Sprintf("'%s'", arg))
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
return strings.Join(args, " ")
|
||||
c.command.Env = env
|
||||
}
|
||||
|
||||
func (container *Container) Start() (err error) {
|
||||
|
@ -423,186 +372,50 @@ func (container *Container) Start() (err error) {
|
|||
if container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if we encounter and error during start we need to ensure that any other
|
||||
// setup has been cleaned up properly
|
||||
defer func() {
|
||||
if err != nil {
|
||||
container.cleanup()
|
||||
}
|
||||
}()
|
||||
|
||||
if container.ResolvConfPath == "" {
|
||||
if err := container.setupContainerDns(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.setupContainerDns(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.runtime.config.DisableNetwork {
|
||||
container.Config.NetworkDisabled = true
|
||||
container.buildHostnameAndHostsFiles("127.0.1.1")
|
||||
} else {
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
|
||||
if err := container.initializeNetworking(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the config is compatible with the current kernel
|
||||
if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
container.Config.Memory = 0
|
||||
}
|
||||
if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
container.Config.MemorySwap = -1
|
||||
}
|
||||
|
||||
if container.runtime.sysInfo.IPv4ForwardingDisabled {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
}
|
||||
|
||||
container.verifyDaemonSettings()
|
||||
if err := prepareVolumesForContainer(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup environment
|
||||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=" + DefaultPathEnv,
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
|
||||
if container.Config.Tty {
|
||||
env = append(env, "TERM=xterm")
|
||||
}
|
||||
|
||||
// Init any links between the parent and children
|
||||
runtime := container.runtime
|
||||
|
||||
children, err := runtime.Children(container.Name)
|
||||
linkedEnv, err := container.setupLinkedContainers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
container.activeLinks = make(map[string]*links.Link, len(children))
|
||||
|
||||
// If we encounter an error make sure that we rollback any network
|
||||
// config and ip table changes
|
||||
rollback := func() {
|
||||
for _, link := range container.activeLinks {
|
||||
link.Disable()
|
||||
}
|
||||
container.activeLinks = nil
|
||||
}
|
||||
|
||||
for linkAlias, child := range children {
|
||||
if !child.State.IsRunning() {
|
||||
return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
|
||||
}
|
||||
|
||||
link, err := links.NewLink(
|
||||
container.NetworkSettings.IPAddress,
|
||||
child.NetworkSettings.IPAddress,
|
||||
linkAlias,
|
||||
child.Config.Env,
|
||||
child.Config.ExposedPorts,
|
||||
runtime.eng)
|
||||
|
||||
if err != nil {
|
||||
rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
container.activeLinks[link.Alias()] = link
|
||||
if err := link.Enable(); err != nil {
|
||||
rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
for _, envVar := range link.ToEnv() {
|
||||
env = append(env, envVar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// because the env on the container can override certain default values
|
||||
// we need to replace the 'env' keys where they match and append anything
|
||||
// else.
|
||||
env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
|
||||
env := container.createDaemonEnvironment(linkedEnv)
|
||||
// TODO: This is only needed for lxc so we should look for a way to
|
||||
// remove this dep
|
||||
if err := container.generateEnvConfig(env); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.Config.WorkingDir != "" {
|
||||
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
|
||||
|
||||
pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pthInfo != nil && !pthInfo.IsDir() {
|
||||
return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
|
||||
}
|
||||
}
|
||||
|
||||
envPath, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
if err := container.setupWorkingDirectory(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
populateCommand(container)
|
||||
container.command.Env = env
|
||||
|
||||
if err := setupMountsForContainer(container, envPath); err != nil {
|
||||
populateCommand(container, env)
|
||||
if err := setupMountsForContainer(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup logging of stdout and stderr to disk
|
||||
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
|
||||
if err := container.startLoggingToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.waitLock = make(chan struct{})
|
||||
|
||||
callbackLock := make(chan struct{})
|
||||
callback := func(command *execdriver.Command) {
|
||||
container.State.SetRunning(command.Pid())
|
||||
if command.Tty {
|
||||
// The callback is called after the process Start()
|
||||
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
|
||||
// which we close here.
|
||||
if c, ok := command.Stdout.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("%s", err)
|
||||
}
|
||||
close(callbackLock)
|
||||
}
|
||||
|
||||
// We use a callback here instead of a goroutine and an chan for
|
||||
// syncronization purposes
|
||||
cErr := utils.Go(func() error { return container.monitor(callback) })
|
||||
|
||||
// Start should not return until the process is actually running
|
||||
select {
|
||||
case <-callbackLock:
|
||||
case err := <-cErr:
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return container.waitForStart()
|
||||
}
|
||||
|
||||
func (container *Container) Run() error {
|
||||
|
@ -683,42 +496,18 @@ func (container *Container) allocateNetwork() error {
|
|||
var (
|
||||
env *engine.Env
|
||||
err error
|
||||
eng = container.runtime.eng
|
||||
eng = container.daemon.eng
|
||||
)
|
||||
|
||||
if container.State.IsGhost() {
|
||||
if container.runtime.config.DisableNetwork {
|
||||
env = &engine.Env{}
|
||||
} else {
|
||||
currentIP := container.NetworkSettings.IPAddress
|
||||
|
||||
job := eng.Job("allocate_interface", container.ID)
|
||||
if currentIP != "" {
|
||||
job.Setenv("RequestIP", currentIP)
|
||||
}
|
||||
|
||||
env, err = job.Stdout.AddEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
job := eng.Job("allocate_interface", container.ID)
|
||||
env, err = job.Stdout.AddEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
job := eng.Job("allocate_interface", container.ID)
|
||||
if env, err = job.Stdout.AddEnv(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.Config.PortSpecs != nil {
|
||||
utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
|
||||
if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -733,58 +522,23 @@ func (container *Container) allocateNetwork() error {
|
|||
bindings = make(nat.PortMap)
|
||||
)
|
||||
|
||||
if !container.State.IsGhost() {
|
||||
if container.Config.ExposedPorts != nil {
|
||||
portSpecs = container.Config.ExposedPorts
|
||||
}
|
||||
if container.hostConfig.PortBindings != nil {
|
||||
bindings = container.hostConfig.PortBindings
|
||||
}
|
||||
} else {
|
||||
if container.NetworkSettings.Ports != nil {
|
||||
for port, binding := range container.NetworkSettings.Ports {
|
||||
portSpecs[port] = struct{}{}
|
||||
bindings[port] = binding
|
||||
}
|
||||
}
|
||||
if container.Config.ExposedPorts != nil {
|
||||
portSpecs = container.Config.ExposedPorts
|
||||
}
|
||||
if container.hostConfig.PortBindings != nil {
|
||||
bindings = container.hostConfig.PortBindings
|
||||
}
|
||||
|
||||
container.NetworkSettings.PortMapping = nil
|
||||
|
||||
for port := range portSpecs {
|
||||
binding := bindings[port]
|
||||
if container.hostConfig.PublishAllPorts && len(binding) == 0 {
|
||||
binding = append(binding, nat.PortBinding{})
|
||||
if err := container.allocatePort(eng, port, bindings); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(binding); i++ {
|
||||
b := binding[i]
|
||||
|
||||
portJob := eng.Job("allocate_port", container.ID)
|
||||
portJob.Setenv("HostIP", b.HostIp)
|
||||
portJob.Setenv("HostPort", b.HostPort)
|
||||
portJob.Setenv("Proto", port.Proto())
|
||||
portJob.Setenv("ContainerPort", port.Port())
|
||||
|
||||
portEnv, err := portJob.Stdout.AddEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := portJob.Run(); err != nil {
|
||||
eng.Job("release_interface", container.ID).Run()
|
||||
return err
|
||||
}
|
||||
b.HostIp = portEnv.Get("HostIP")
|
||||
b.HostPort = portEnv.Get("HostPort")
|
||||
|
||||
binding[i] = b
|
||||
}
|
||||
bindings[port] = binding
|
||||
}
|
||||
container.WriteHostConfig()
|
||||
|
||||
container.NetworkSettings.Ports = bindings
|
||||
|
||||
container.NetworkSettings.Bridge = env.Get("Bridge")
|
||||
container.NetworkSettings.IPAddress = env.Get("IP")
|
||||
container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen")
|
||||
|
@ -797,7 +551,7 @@ func (container *Container) releaseNetwork() {
|
|||
if container.Config.NetworkDisabled {
|
||||
return
|
||||
}
|
||||
eng := container.runtime.eng
|
||||
eng := container.daemon.eng
|
||||
|
||||
eng.Job("release_interface", container.ID).Run()
|
||||
container.NetworkSettings = &NetworkSettings{}
|
||||
|
@ -810,12 +564,12 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
)
|
||||
|
||||
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
|
||||
exitCode, err = container.runtime.Run(container, pipes, callback)
|
||||
exitCode, err = container.daemon.Run(container, pipes, callback)
|
||||
if err != nil {
|
||||
utils.Errorf("Error running container: %s", err)
|
||||
}
|
||||
|
||||
if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() {
|
||||
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
// FIXME: there is a race condition here which causes this to fail during the unit tests.
|
||||
|
@ -838,8 +592,8 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
container.stdin, container.stdinPipe = io.Pipe()
|
||||
}
|
||||
|
||||
if container.runtime != nil && container.runtime.srv != nil {
|
||||
container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
|
||||
if container.daemon != nil && container.daemon.srv != nil {
|
||||
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
|
||||
}
|
||||
|
||||
close(container.waitLock)
|
||||
|
@ -885,7 +639,7 @@ func (container *Container) KillSig(sig int) error {
|
|||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
return container.runtime.Kill(container, sig)
|
||||
return container.daemon.Kill(container, sig)
|
||||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
|
@ -962,10 +716,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
|
|||
if err := container.Mount(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container.runtime == nil {
|
||||
if container.daemon == nil {
|
||||
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
|
||||
}
|
||||
archive, err := container.runtime.Diff(container)
|
||||
archive, err := container.daemon.Diff(container)
|
||||
if err != nil {
|
||||
container.Unmount()
|
||||
return nil, err
|
||||
|
@ -1012,22 +766,22 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
|
|||
}
|
||||
|
||||
func (container *Container) Mount() error {
|
||||
return container.runtime.Mount(container)
|
||||
return container.daemon.Mount(container)
|
||||
}
|
||||
|
||||
func (container *Container) Changes() ([]archive.Change, error) {
|
||||
return container.runtime.Changes(container)
|
||||
return container.daemon.Changes(container)
|
||||
}
|
||||
|
||||
func (container *Container) GetImage() (*image.Image, error) {
|
||||
if container.runtime == nil {
|
||||
if container.daemon == nil {
|
||||
return nil, fmt.Errorf("Can't get image of unregistered container")
|
||||
}
|
||||
return container.runtime.graph.Get(container.Image)
|
||||
return container.daemon.graph.Get(container.Image)
|
||||
}
|
||||
|
||||
func (container *Container) Unmount() error {
|
||||
return container.runtime.Unmount(container)
|
||||
return container.daemon.Unmount(container)
|
||||
}
|
||||
|
||||
func (container *Container) logPath(name string) string {
|
||||
|
@ -1080,7 +834,7 @@ func (container *Container) GetSize() (int64, int64) {
|
|||
var (
|
||||
sizeRw, sizeRootfs int64
|
||||
err error
|
||||
driver = container.runtime.driver
|
||||
driver = container.daemon.driver
|
||||
)
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
|
@ -1089,7 +843,7 @@ func (container *Container) GetSize() (int64, int64) {
|
|||
}
|
||||
defer container.Unmount()
|
||||
|
||||
if differ, ok := container.runtime.driver.(graphdriver.Differ); ok {
|
||||
if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
|
||||
sizeRw, err = differ.DiffSize(container.ID)
|
||||
if err != nil {
|
||||
utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
||||
|
@ -1182,29 +936,32 @@ func (container *Container) DisableLink(name string) {
|
|||
}
|
||||
|
||||
func (container *Container) setupContainerDns() error {
|
||||
if container.ResolvConfPath != "" {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
config = container.hostConfig
|
||||
runtime = container.runtime
|
||||
config = container.hostConfig
|
||||
daemon = container.daemon
|
||||
)
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 {
|
||||
if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
|
||||
var (
|
||||
dns = utils.GetNameservers(resolvConf)
|
||||
dnsSearch = utils.GetSearchDomains(resolvConf)
|
||||
)
|
||||
if len(config.Dns) > 0 {
|
||||
dns = config.Dns
|
||||
} else if len(runtime.config.Dns) > 0 {
|
||||
dns = runtime.config.Dns
|
||||
} else if len(daemon.config.Dns) > 0 {
|
||||
dns = daemon.config.Dns
|
||||
}
|
||||
if len(config.DnsSearch) > 0 {
|
||||
dnsSearch = config.DnsSearch
|
||||
} else if len(runtime.config.DnsSearch) > 0 {
|
||||
dnsSearch = runtime.config.DnsSearch
|
||||
} else if len(daemon.config.DnsSearch) > 0 {
|
||||
dnsSearch = daemon.config.DnsSearch
|
||||
}
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
|
@ -1227,3 +984,198 @@ func (container *Container) setupContainerDns() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) initializeNetworking() error {
|
||||
if container.daemon.config.DisableNetwork {
|
||||
container.Config.NetworkDisabled = true
|
||||
container.buildHostnameAndHostsFiles("127.0.1.1")
|
||||
} else {
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure the config is compatible with the current kernel
|
||||
func (container *Container) verifyDaemonSettings() {
|
||||
if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
container.Config.Memory = 0
|
||||
}
|
||||
if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
container.Config.MemorySwap = -1
|
||||
}
|
||||
if container.daemon.sysInfo.IPv4ForwardingDisabled {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container) setupLinkedContainers() ([]string, error) {
|
||||
var (
|
||||
env []string
|
||||
daemon = container.daemon
|
||||
)
|
||||
children, err := daemon.Children(container.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
container.activeLinks = make(map[string]*links.Link, len(children))
|
||||
|
||||
// If we encounter an error make sure that we rollback any network
|
||||
// config and ip table changes
|
||||
rollback := func() {
|
||||
for _, link := range container.activeLinks {
|
||||
link.Disable()
|
||||
}
|
||||
container.activeLinks = nil
|
||||
}
|
||||
|
||||
for linkAlias, child := range children {
|
||||
if !child.State.IsRunning() {
|
||||
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
|
||||
}
|
||||
|
||||
link, err := links.NewLink(
|
||||
container.NetworkSettings.IPAddress,
|
||||
child.NetworkSettings.IPAddress,
|
||||
linkAlias,
|
||||
child.Config.Env,
|
||||
child.Config.ExposedPorts,
|
||||
daemon.eng)
|
||||
|
||||
if err != nil {
|
||||
rollback()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container.activeLinks[link.Alias()] = link
|
||||
if err := link.Enable(); err != nil {
|
||||
rollback()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, envVar := range link.ToEnv() {
|
||||
env = append(env, envVar)
|
||||
}
|
||||
}
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
|
||||
// Setup environment
|
||||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=" + DefaultPathEnv,
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
if container.Config.Tty {
|
||||
env = append(env, "TERM=xterm")
|
||||
}
|
||||
env = append(env, linkedEnv...)
|
||||
// because the env on the container can override certain default values
|
||||
// we need to replace the 'env' keys where they match and append anything
|
||||
// else.
|
||||
env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
func (container *Container) setupWorkingDirectory() error {
|
||||
if container.Config.WorkingDir != "" {
|
||||
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
|
||||
|
||||
pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pthInfo != nil && !pthInfo.IsDir() {
|
||||
return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) startLoggingToDisk() error {
|
||||
// Setup logging of stdout and stderr to disk
|
||||
if err := container.daemon.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.daemon.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) waitForStart() error {
|
||||
callbackLock := make(chan struct{})
|
||||
callback := func(command *execdriver.Command) {
|
||||
container.State.SetRunning(command.Pid())
|
||||
if command.Tty {
|
||||
// The callback is called after the process Start()
|
||||
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
|
||||
// which we close here.
|
||||
if c, ok := command.Stdout.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("%s", err)
|
||||
}
|
||||
close(callbackLock)
|
||||
}
|
||||
|
||||
// We use a callback here instead of a goroutine and an chan for
|
||||
// syncronization purposes
|
||||
cErr := utils.Go(func() error { return container.monitor(callback) })
|
||||
|
||||
// Start should not return until the process is actually running
|
||||
select {
|
||||
case <-callbackLock:
|
||||
case err := <-cErr:
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error {
|
||||
binding := bindings[port]
|
||||
if container.hostConfig.PublishAllPorts && len(binding) == 0 {
|
||||
binding = append(binding, nat.PortBinding{})
|
||||
}
|
||||
|
||||
for i := 0; i < len(binding); i++ {
|
||||
b := binding[i]
|
||||
|
||||
job := eng.Job("allocate_port", container.ID)
|
||||
job.Setenv("HostIP", b.HostIp)
|
||||
job.Setenv("HostPort", b.HostPort)
|
||||
job.Setenv("Proto", port.Proto())
|
||||
job.Setenv("ContainerPort", port.Port())
|
||||
|
||||
portEnv, err := job.Stdout.AddEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
eng.Job("release_interface", container.ID).Run()
|
||||
return err
|
||||
}
|
||||
b.HostIp = portEnv.Get("HostIP")
|
||||
b.HostPort = portEnv.Get("HostPort")
|
||||
|
||||
binding[i] = b
|
||||
}
|
||||
bindings[port] = binding
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/nat"
|
|
@ -1,9 +1,16 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/lxc"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
_ "github.com/dotcloud/docker/daemon/graphdriver/vfs"
|
||||
_ "github.com/dotcloud/docker/daemon/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/daemonconfig"
|
||||
"github.com/dotcloud/docker/dockerversion"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
|
@ -14,13 +21,6 @@ import (
|
|||
"github.com/dotcloud/docker/pkg/selinux"
|
||||
"github.com/dotcloud/docker/pkg/sysinfo"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/execdrivers"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/lxc"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
_ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
|
||||
_ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -44,7 +44,7 @@ var (
|
|||
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
|
||||
)
|
||||
|
||||
type Runtime struct {
|
||||
type Daemon struct {
|
||||
repository string
|
||||
sysInitPath string
|
||||
containers *list.List
|
||||
|
@ -76,17 +76,17 @@ func remountPrivate(mountPoint string) error {
|
|||
return mount.ForceMount("", mountPoint, "none", "private")
|
||||
}
|
||||
|
||||
// List returns an array of all containers registered in the runtime.
|
||||
func (runtime *Runtime) List() []*Container {
|
||||
// List returns an array of all containers registered in the daemon.
|
||||
func (daemon *Daemon) List() []*Container {
|
||||
containers := new(History)
|
||||
for e := runtime.containers.Front(); e != nil; e = e.Next() {
|
||||
for e := daemon.containers.Front(); e != nil; e = e.Next() {
|
||||
containers.Add(e.Value.(*Container))
|
||||
}
|
||||
return *containers
|
||||
}
|
||||
|
||||
func (runtime *Runtime) getContainerElement(id string) *list.Element {
|
||||
for e := runtime.containers.Front(); e != nil; e = e.Next() {
|
||||
func (daemon *Daemon) getContainerElement(id string) *list.Element {
|
||||
for e := daemon.containers.Front(); e != nil; e = e.Next() {
|
||||
container := e.Value.(*Container)
|
||||
if container.ID == id {
|
||||
return e
|
||||
|
@ -97,17 +97,17 @@ func (runtime *Runtime) getContainerElement(id string) *list.Element {
|
|||
|
||||
// Get looks for a container by the specified ID or name, and returns it.
|
||||
// If the container is not found, or if an error occurs, nil is returned.
|
||||
func (runtime *Runtime) Get(name string) *Container {
|
||||
if c, _ := runtime.GetByName(name); c != nil {
|
||||
func (daemon *Daemon) Get(name string) *Container {
|
||||
if c, _ := daemon.GetByName(name); c != nil {
|
||||
return c
|
||||
}
|
||||
|
||||
id, err := runtime.idIndex.Get(name)
|
||||
id, err := daemon.idIndex.Get(name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
e := runtime.getContainerElement(id)
|
||||
e := daemon.getContainerElement(id)
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -116,18 +116,18 @@ func (runtime *Runtime) Get(name string) *Container {
|
|||
|
||||
// Exists returns a true if a container of the specified ID or name exists,
|
||||
// false otherwise.
|
||||
func (runtime *Runtime) Exists(id string) bool {
|
||||
return runtime.Get(id) != nil
|
||||
func (daemon *Daemon) Exists(id string) bool {
|
||||
return daemon.Get(id) != nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) containerRoot(id string) string {
|
||||
return path.Join(runtime.repository, id)
|
||||
func (daemon *Daemon) containerRoot(id string) string {
|
||||
return path.Join(daemon.repository, id)
|
||||
}
|
||||
|
||||
// Load reads the contents of a container from disk
|
||||
// This is typically done at startup.
|
||||
func (runtime *Runtime) load(id string) (*Container, error) {
|
||||
container := &Container{root: runtime.containerRoot(id)}
|
||||
func (daemon *Daemon) load(id string) (*Container, error) {
|
||||
container := &Container{root: daemon.containerRoot(id)}
|
||||
if err := container.FromDisk(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -140,19 +140,19 @@ func (runtime *Runtime) load(id string) (*Container, error) {
|
|||
return container, nil
|
||||
}
|
||||
|
||||
// Register makes a container object usable by the runtime as <container.ID>
|
||||
func (runtime *Runtime) Register(container *Container) error {
|
||||
if container.runtime != nil || runtime.Exists(container.ID) {
|
||||
// Register makes a container object usable by the daemon as <container.ID>
|
||||
func (daemon *Daemon) Register(container *Container) error {
|
||||
if container.daemon != nil || daemon.Exists(container.ID) {
|
||||
return fmt.Errorf("Container is already loaded")
|
||||
}
|
||||
if err := validateID(container.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := runtime.ensureName(container); err != nil {
|
||||
if err := daemon.ensureName(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container.runtime = runtime
|
||||
container.daemon = daemon
|
||||
|
||||
// Attach to stdout and stderr
|
||||
container.stderr = utils.NewWriteBroadcaster()
|
||||
|
@ -164,8 +164,8 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
|
||||
}
|
||||
// done
|
||||
runtime.containers.PushBack(container)
|
||||
runtime.idIndex.Add(container.ID)
|
||||
daemon.containers.PushBack(container)
|
||||
daemon.idIndex.Add(container.ID)
|
||||
|
||||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
|
@ -192,7 +192,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
if err != nil {
|
||||
utils.Debugf("cannot find existing process for %d", existingPid)
|
||||
}
|
||||
runtime.execDriver.Terminate(cmd)
|
||||
daemon.execDriver.Terminate(cmd)
|
||||
}
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("ghost unmount error %s", err)
|
||||
|
@ -202,10 +202,10 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
}
|
||||
}
|
||||
|
||||
info := runtime.execDriver.Info(container.ID)
|
||||
info := daemon.execDriver.Info(container.ID)
|
||||
if !info.IsRunning() {
|
||||
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
||||
if runtime.config.AutoRestart {
|
||||
if daemon.config.AutoRestart {
|
||||
utils.Debugf("Restarting")
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("restart unmount error %s", err)
|
||||
|
@ -234,9 +234,9 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) ensureName(container *Container) error {
|
||||
func (daemon *Daemon) ensureName(container *Container) error {
|
||||
if container.Name == "" {
|
||||
name, err := generateRandomName(runtime)
|
||||
name, err := generateRandomName(daemon)
|
||||
if err != nil {
|
||||
name = utils.TruncateID(container.ID)
|
||||
}
|
||||
|
@ -245,8 +245,8 @@ func (runtime *Runtime) ensureName(container *Container) error {
|
|||
if err := container.ToDisk(); err != nil {
|
||||
utils.Debugf("Error saving container name %s", err)
|
||||
}
|
||||
if !runtime.containerGraph.Exists(name) {
|
||||
if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
|
||||
if !daemon.containerGraph.Exists(name) {
|
||||
if _, err := daemon.containerGraph.Set(name, container.ID); err != nil {
|
||||
utils.Debugf("Setting default id - %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ func (runtime *Runtime) ensureName(container *Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
|
||||
func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
|
||||
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -263,13 +263,13 @@ func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream strin
|
|||
return nil
|
||||
}
|
||||
|
||||
// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem.
|
||||
func (runtime *Runtime) Destroy(container *Container) error {
|
||||
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
|
||||
func (daemon *Daemon) Destroy(container *Container) error {
|
||||
if container == nil {
|
||||
return fmt.Errorf("The given container is <nil>")
|
||||
}
|
||||
|
||||
element := runtime.getContainerElement(container.ID)
|
||||
element := daemon.getContainerElement(container.ID)
|
||||
if element == nil {
|
||||
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
|
||||
}
|
||||
|
@ -278,42 +278,42 @@ func (runtime *Runtime) Destroy(container *Container) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := runtime.driver.Remove(container.ID); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
|
||||
if err := daemon.driver.Remove(container.ID); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
|
||||
}
|
||||
|
||||
initID := fmt.Sprintf("%s-init", container.ID)
|
||||
if err := runtime.driver.Remove(initID); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err)
|
||||
if err := daemon.driver.Remove(initID); err != nil {
|
||||
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
|
||||
}
|
||||
|
||||
if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
|
||||
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
|
||||
utils.Debugf("Unable to remove container from link graph: %s", err)
|
||||
}
|
||||
|
||||
// Deregister the container before removing its directory, to avoid race conditions
|
||||
runtime.idIndex.Delete(container.ID)
|
||||
runtime.containers.Remove(element)
|
||||
daemon.idIndex.Delete(container.ID)
|
||||
daemon.containers.Remove(element)
|
||||
if err := os.RemoveAll(container.root); err != nil {
|
||||
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) restore() error {
|
||||
func (daemon *Daemon) restore() error {
|
||||
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Printf("Loading containers: ")
|
||||
}
|
||||
dir, err := ioutil.ReadDir(runtime.repository)
|
||||
dir, err := ioutil.ReadDir(daemon.repository)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
containers := make(map[string]*Container)
|
||||
currentDriver := runtime.driver.String()
|
||||
currentDriver := daemon.driver.String()
|
||||
|
||||
for _, v := range dir {
|
||||
id := v.Name()
|
||||
container, err := runtime.load(id)
|
||||
container, err := daemon.load(id)
|
||||
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Print(".")
|
||||
}
|
||||
|
@ -332,12 +332,12 @@ func (runtime *Runtime) restore() error {
|
|||
}
|
||||
|
||||
register := func(container *Container) {
|
||||
if err := runtime.Register(container); err != nil {
|
||||
if err := daemon.Register(container); err != nil {
|
||||
utils.Debugf("Failed to register container %s: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if entities := runtime.containerGraph.List("/", -1); entities != nil {
|
||||
if entities := daemon.containerGraph.List("/", -1); entities != nil {
|
||||
for _, p := range entities.Paths() {
|
||||
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Print(".")
|
||||
|
@ -353,12 +353,12 @@ func (runtime *Runtime) restore() error {
|
|||
// Any containers that are left over do not exist in the graph
|
||||
for _, container := range containers {
|
||||
// Try to set the default name for a container if it exists prior to links
|
||||
container.Name, err = generateRandomName(runtime)
|
||||
container.Name, err = generateRandomName(daemon)
|
||||
if err != nil {
|
||||
container.Name = utils.TruncateID(container.ID)
|
||||
}
|
||||
|
||||
if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
|
||||
if _, err := daemon.containerGraph.Set(container.Name, container.ID); err != nil {
|
||||
utils.Debugf("Setting default id - %s", err)
|
||||
}
|
||||
register(container)
|
||||
|
@ -372,38 +372,38 @@ func (runtime *Runtime) restore() error {
|
|||
}
|
||||
|
||||
// Create creates a new container from the given configuration with a given name.
|
||||
func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) {
|
||||
func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
|
||||
var (
|
||||
container *Container
|
||||
warnings []string
|
||||
)
|
||||
|
||||
img, err := runtime.repositories.LookupImage(config.Image)
|
||||
img, err := daemon.repositories.LookupImage(config.Image)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := runtime.checkImageDepth(img); err != nil {
|
||||
if err := daemon.checkImageDepth(img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if warnings, err = runtime.mergeAndVerifyConfig(config, img); err != nil {
|
||||
if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if container, err = runtime.newContainer(name, config, img); err != nil {
|
||||
if container, err = daemon.newContainer(name, config, img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := runtime.createRootfs(container, img); err != nil {
|
||||
if err := daemon.createRootfs(container, img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := runtime.Register(container); err != nil {
|
||||
if err := daemon.Register(container); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return container, warnings, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) checkImageDepth(img *image.Image) error {
|
||||
func (daemon *Daemon) checkImageDepth(img *image.Image) error {
|
||||
// We add 2 layers to the depth because the container's rw and
|
||||
// init layer add to the restriction
|
||||
depth, err := img.Depth()
|
||||
|
@ -416,7 +416,7 @@ func (runtime *Runtime) checkImageDepth(img *image.Image) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
|
||||
func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
|
||||
if config != nil {
|
||||
if config.PortSpecs != nil {
|
||||
for _, p := range config.PortSpecs {
|
||||
|
@ -429,9 +429,9 @@ func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
|
||||
func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
|
||||
warnings := []string{}
|
||||
if runtime.checkDeprecatedExpose(img.Config) || runtime.checkDeprecatedExpose(config) {
|
||||
if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) {
|
||||
warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
|
||||
}
|
||||
if img.Config != nil {
|
||||
|
@ -445,14 +445,14 @@ func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *imag
|
|||
return warnings, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
|
||||
func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = utils.GenerateRandomID()
|
||||
)
|
||||
|
||||
if name == "" {
|
||||
name, err = generateRandomName(runtime)
|
||||
name, err = generateRandomName(daemon)
|
||||
if err != nil {
|
||||
name = utils.TruncateID(id)
|
||||
}
|
||||
|
@ -465,19 +465,19 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
|
|||
name = "/" + name
|
||||
}
|
||||
// Set the enitity in the graph using the default name specified
|
||||
if _, err := runtime.containerGraph.Set(name, id); err != nil {
|
||||
if _, err := daemon.containerGraph.Set(name, id); err != nil {
|
||||
if !graphdb.IsNonUniqueNameError(err) {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
conflictingContainer, err := runtime.GetByName(name)
|
||||
conflictingContainer, err := daemon.GetByName(name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "Could not find entity") {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Remove name and continue starting the container
|
||||
if err := runtime.containerGraph.Delete(name); err != nil {
|
||||
if err := daemon.containerGraph.Delete(name); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
|
@ -490,7 +490,7 @@ func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
|
|||
return id, name, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
|
||||
func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) {
|
||||
// Generate default hostname
|
||||
// FIXME: the lxc template no longer needs to set a default hostname
|
||||
if config.Hostname == "" {
|
||||
|
@ -498,7 +498,7 @@ func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
|
||||
func (daemon *Daemon) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
|
||||
var (
|
||||
entrypoint string
|
||||
args []string
|
||||
|
@ -513,18 +513,18 @@ func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string,
|
|||
return entrypoint, args
|
||||
}
|
||||
|
||||
func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
|
||||
func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
|
||||
var (
|
||||
id string
|
||||
err error
|
||||
)
|
||||
id, name, err = runtime.generateIdAndName(name)
|
||||
id, name, err = daemon.generateIdAndName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runtime.generateHostname(id, config)
|
||||
entrypoint, args := runtime.getEntrypointAndArgs(config)
|
||||
daemon.generateHostname(id, config)
|
||||
entrypoint, args := daemon.getEntrypointAndArgs(config)
|
||||
|
||||
container := &Container{
|
||||
// FIXME: we should generate the ID here instead of receiving it as an argument
|
||||
|
@ -537,34 +537,34 @@ func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img
|
|||
Image: img.ID, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
Name: name,
|
||||
Driver: runtime.driver.String(),
|
||||
ExecDriver: runtime.execDriver.Name(),
|
||||
Driver: daemon.driver.String(),
|
||||
ExecDriver: daemon.execDriver.Name(),
|
||||
}
|
||||
container.root = runtime.containerRoot(container.ID)
|
||||
container.root = daemon.containerRoot(container.ID)
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) createRootfs(container *Container, img *image.Image) error {
|
||||
func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error {
|
||||
// Step 1: create the container directory.
|
||||
// This doubles as a barrier to avoid race conditions.
|
||||
if err := os.Mkdir(container.root, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
initID := fmt.Sprintf("%s-init", container.ID)
|
||||
if err := runtime.driver.Create(initID, img.ID, ""); err != nil {
|
||||
if err := daemon.driver.Create(initID, img.ID, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
initPath, err := runtime.driver.Get(initID)
|
||||
initPath, err := daemon.driver.Get(initID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer runtime.driver.Put(initID)
|
||||
defer daemon.driver.Put(initID)
|
||||
|
||||
if err := graph.SetupInitLayer(initPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := runtime.driver.Create(container.ID, initID, ""); err != nil {
|
||||
if err := daemon.driver.Create(container.ID, initID, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -572,7 +572,7 @@ func (runtime *Runtime) createRootfs(container *Container, img *image.Image) err
|
|||
|
||||
// Commit creates a new filesystem image from the current state of a container.
|
||||
// The image can optionally be tagged into a repository
|
||||
func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
|
||||
func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
|
||||
// FIXME: freeze the container before copying it to avoid data corruption?
|
||||
if err := container.Mount(); err != nil {
|
||||
return nil, err
|
||||
|
@ -595,13 +595,13 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
|
|||
containerImage = container.Image
|
||||
containerConfig = container.Config
|
||||
}
|
||||
img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
|
||||
img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Register the image if needed
|
||||
if repository != "" {
|
||||
if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil {
|
||||
if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
|
||||
return img, err
|
||||
}
|
||||
}
|
||||
|
@ -618,31 +618,31 @@ func GetFullContainerName(name string) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) GetByName(name string) (*Container, error) {
|
||||
func (daemon *Daemon) GetByName(name string) (*Container, error) {
|
||||
fullName, err := GetFullContainerName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entity := runtime.containerGraph.Get(fullName)
|
||||
entity := daemon.containerGraph.Get(fullName)
|
||||
if entity == nil {
|
||||
return nil, fmt.Errorf("Could not find entity for %s", name)
|
||||
}
|
||||
e := runtime.getContainerElement(entity.ID())
|
||||
e := daemon.getContainerElement(entity.ID())
|
||||
if e == nil {
|
||||
return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
|
||||
}
|
||||
return e.Value.(*Container), nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
|
||||
func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
|
||||
name, err := GetFullContainerName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
children := make(map[string]*Container)
|
||||
|
||||
err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
|
||||
c := runtime.Get(e.ID())
|
||||
err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
|
||||
c := daemon.Get(e.ID())
|
||||
if c == nil {
|
||||
return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
|
||||
}
|
||||
|
@ -656,25 +656,25 @@ func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
|
|||
return children, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error {
|
||||
func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error {
|
||||
fullName := path.Join(parent.Name, alias)
|
||||
if !runtime.containerGraph.Exists(fullName) {
|
||||
_, err := runtime.containerGraph.Set(fullName, child.ID)
|
||||
if !daemon.containerGraph.Exists(fullName) {
|
||||
_, err := daemon.containerGraph.Set(fullName, child.ID)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME: harmonize with NewGraph()
|
||||
func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
|
||||
runtime, err := NewRuntimeFromDirectory(config, eng)
|
||||
func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
|
||||
daemon, err := NewDaemonFromDirectory(config, eng)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return runtime, nil
|
||||
return daemon, nil
|
||||
}
|
||||
|
||||
func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
|
||||
func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
|
||||
if !config.EnableSelinuxSupport {
|
||||
selinux.SetDisabled()
|
||||
}
|
||||
|
@ -693,9 +693,9 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
|
|||
return nil, err
|
||||
}
|
||||
|
||||
runtimeRepo := path.Join(config.Root, "containers")
|
||||
daemonRepo := path.Join(config.Root, "containers")
|
||||
|
||||
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
|
||||
if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -774,8 +774,8 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
|
|||
return nil, err
|
||||
}
|
||||
|
||||
runtime := &Runtime{
|
||||
repository: runtimeRepo,
|
||||
daemon := &Daemon{
|
||||
repository: daemonRepo,
|
||||
containers: list.New(),
|
||||
graph: g,
|
||||
repositories: repositories,
|
||||
|
@ -790,19 +790,19 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*
|
|||
eng: eng,
|
||||
}
|
||||
|
||||
if err := runtime.checkLocaldns(); err != nil {
|
||||
if err := daemon.checkLocaldns(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := runtime.restore(); err != nil {
|
||||
if err := daemon.restore(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return runtime, nil
|
||||
return daemon, nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) shutdown() error {
|
||||
func (daemon *Daemon) shutdown() error {
|
||||
group := sync.WaitGroup{}
|
||||
utils.Debugf("starting clean shutdown of all containers...")
|
||||
for _, container := range runtime.List() {
|
||||
for _, container := range daemon.List() {
|
||||
c := container
|
||||
if c.State.IsRunning() {
|
||||
utils.Debugf("stopping %s", c.ID)
|
||||
|
@ -823,22 +823,22 @@ func (runtime *Runtime) shutdown() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Close() error {
|
||||
func (daemon *Daemon) Close() error {
|
||||
errorsStrings := []string{}
|
||||
if err := runtime.shutdown(); err != nil {
|
||||
utils.Errorf("runtime.shutdown(): %s", err)
|
||||
if err := daemon.shutdown(); err != nil {
|
||||
utils.Errorf("daemon.shutdown(): %s", err)
|
||||
errorsStrings = append(errorsStrings, err.Error())
|
||||
}
|
||||
if err := portallocator.ReleaseAll(); err != nil {
|
||||
utils.Errorf("portallocator.ReleaseAll(): %s", err)
|
||||
errorsStrings = append(errorsStrings, err.Error())
|
||||
}
|
||||
if err := runtime.driver.Cleanup(); err != nil {
|
||||
utils.Errorf("runtime.driver.Cleanup(): %s", err.Error())
|
||||
if err := daemon.driver.Cleanup(); err != nil {
|
||||
utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
|
||||
errorsStrings = append(errorsStrings, err.Error())
|
||||
}
|
||||
if err := runtime.containerGraph.Close(); err != nil {
|
||||
utils.Errorf("runtime.containerGraph.Close(): %s", err.Error())
|
||||
if err := daemon.containerGraph.Close(); err != nil {
|
||||
utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
|
||||
errorsStrings = append(errorsStrings, err.Error())
|
||||
}
|
||||
if len(errorsStrings) > 0 {
|
||||
|
@ -847,55 +847,55 @@ func (runtime *Runtime) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Mount(container *Container) error {
|
||||
dir, err := runtime.driver.Get(container.ID)
|
||||
func (daemon *Daemon) Mount(container *Container) error {
|
||||
dir, err := daemon.driver.Get(container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err)
|
||||
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
|
||||
}
|
||||
if container.basefs == "" {
|
||||
container.basefs = dir
|
||||
} else if container.basefs != dir {
|
||||
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
|
||||
runtime.driver, container.ID, container.basefs, dir)
|
||||
daemon.driver, container.ID, container.basefs, dir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Unmount(container *Container) error {
|
||||
runtime.driver.Put(container.ID)
|
||||
func (daemon *Daemon) Unmount(container *Container) error {
|
||||
daemon.driver.Put(container.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) {
|
||||
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
|
||||
func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
|
||||
if differ, ok := daemon.driver.(graphdriver.Differ); ok {
|
||||
return differ.Changes(container.ID)
|
||||
}
|
||||
cDir, err := runtime.driver.Get(container.ID)
|
||||
cDir, err := daemon.driver.Get(container.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
defer runtime.driver.Put(container.ID)
|
||||
initDir, err := runtime.driver.Get(container.ID + "-init")
|
||||
defer daemon.driver.Put(container.ID)
|
||||
initDir, err := daemon.driver.Get(container.ID + "-init")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
|
||||
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
defer runtime.driver.Put(container.ID + "-init")
|
||||
defer daemon.driver.Put(container.ID + "-init")
|
||||
return archive.ChangesDirs(cDir, initDir)
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
|
||||
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
|
||||
func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
|
||||
if differ, ok := daemon.driver.(graphdriver.Differ); ok {
|
||||
return differ.Diff(container.ID)
|
||||
}
|
||||
|
||||
changes, err := runtime.Changes(container)
|
||||
changes, err := daemon.Changes(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cDir, err := runtime.driver.Get(container.ID)
|
||||
cDir, err := daemon.driver.Get(container.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
|
||||
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err)
|
||||
}
|
||||
|
||||
archive, err := archive.ExportChanges(cDir, changes)
|
||||
|
@ -904,26 +904,26 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
|
|||
}
|
||||
return utils.NewReadCloserWrapper(archive, func() error {
|
||||
err := archive.Close()
|
||||
runtime.driver.Put(container.ID)
|
||||
daemon.driver.Put(container.ID)
|
||||
return err
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
|
||||
return runtime.execDriver.Run(c.command, pipes, startCallback)
|
||||
func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
|
||||
return daemon.execDriver.Run(c.command, pipes, startCallback)
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Kill(c *Container, sig int) error {
|
||||
return runtime.execDriver.Kill(c.command, sig)
|
||||
func (daemon *Daemon) Kill(c *Container, sig int) error {
|
||||
return daemon.execDriver.Kill(c.command, sig)
|
||||
}
|
||||
|
||||
// Nuke kills all containers then removes all content
|
||||
// from the content root, including images, volumes and
|
||||
// container filesystems.
|
||||
// Again: this will remove your entire docker runtime!
|
||||
func (runtime *Runtime) Nuke() error {
|
||||
// Again: this will remove your entire docker daemon!
|
||||
func (daemon *Daemon) Nuke() error {
|
||||
var wg sync.WaitGroup
|
||||
for _, container := range runtime.List() {
|
||||
for _, container := range daemon.List() {
|
||||
wg.Add(1)
|
||||
go func(c *Container) {
|
||||
c.Kill()
|
||||
|
@ -931,63 +931,63 @@ func (runtime *Runtime) Nuke() error {
|
|||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
runtime.Close()
|
||||
daemon.Close()
|
||||
|
||||
return os.RemoveAll(runtime.config.Root)
|
||||
return os.RemoveAll(daemon.config.Root)
|
||||
}
|
||||
|
||||
// FIXME: this is a convenience function for integration tests
|
||||
// which need direct access to runtime.graph.
|
||||
// which need direct access to daemon.graph.
|
||||
// Once the tests switch to using engine and jobs, this method
|
||||
// can go away.
|
||||
func (runtime *Runtime) Graph() *graph.Graph {
|
||||
return runtime.graph
|
||||
func (daemon *Daemon) Graph() *graph.Graph {
|
||||
return daemon.graph
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Repositories() *graph.TagStore {
|
||||
return runtime.repositories
|
||||
func (daemon *Daemon) Repositories() *graph.TagStore {
|
||||
return daemon.repositories
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Config() *daemonconfig.Config {
|
||||
return runtime.config
|
||||
func (daemon *Daemon) Config() *daemonconfig.Config {
|
||||
return daemon.config
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
|
||||
return runtime.sysInfo
|
||||
func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo {
|
||||
return daemon.sysInfo
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SystemInitPath() string {
|
||||
return runtime.sysInitPath
|
||||
func (daemon *Daemon) SystemInitPath() string {
|
||||
return daemon.sysInitPath
|
||||
}
|
||||
|
||||
func (runtime *Runtime) GraphDriver() graphdriver.Driver {
|
||||
return runtime.driver
|
||||
func (daemon *Daemon) GraphDriver() graphdriver.Driver {
|
||||
return daemon.driver
|
||||
}
|
||||
|
||||
func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
|
||||
return runtime.execDriver
|
||||
func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
|
||||
return daemon.execDriver
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Volumes() *graph.Graph {
|
||||
return runtime.volumes
|
||||
func (daemon *Daemon) Volumes() *graph.Graph {
|
||||
return daemon.volumes
|
||||
}
|
||||
|
||||
func (runtime *Runtime) ContainerGraph() *graphdb.Database {
|
||||
return runtime.containerGraph
|
||||
func (daemon *Daemon) ContainerGraph() *graphdb.Database {
|
||||
return daemon.containerGraph
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SetServer(server Server) {
|
||||
runtime.srv = server
|
||||
func (daemon *Daemon) SetServer(server Server) {
|
||||
daemon.srv = server
|
||||
}
|
||||
|
||||
func (runtime *Runtime) checkLocaldns() error {
|
||||
func (daemon *Daemon) checkLocaldns() error {
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
|
||||
runtime.config.Dns = DefaultDns
|
||||
daemon.config.Dns = DefaultDns
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,11 +1,11 @@
|
|||
// +build !exclude_graphdriver_aufs
|
||||
|
||||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver/aufs"
|
||||
"github.com/dotcloud/docker/graph"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver/aufs"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
7
daemon/daemon_btrfs.go
Normal file
7
daemon/daemon_btrfs.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !exclude_graphdriver_btrfs
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
_ "github.com/dotcloud/docker/daemon/graphdriver/btrfs"
|
||||
)
|
7
daemon/daemon_devicemapper.go
Normal file
7
daemon/daemon_devicemapper.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !exclude_graphdriver_devicemapper
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
_ "github.com/dotcloud/docker/daemon/graphdriver/devmapper"
|
||||
)
|
|
@ -1,9 +1,9 @@
|
|||
// +build exclude_graphdriver_aufs
|
||||
|
||||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
)
|
||||
|
||||
func migrateIfAufs(driver graphdriver.Driver, root string) error {
|
|
@ -2,10 +2,10 @@ package execdrivers
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/lxc"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native"
|
||||
"github.com/dotcloud/docker/pkg/sysinfo"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/lxc"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native"
|
||||
"path"
|
||||
)
|
||||
|
|
@ -2,9 +2,9 @@ package lxc
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"log"
|
|
@ -3,9 +3,9 @@ package lxc
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/pkg/user"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"io/ioutil"
|
||||
"net"
|
|
@ -1,8 +1,8 @@
|
|||
package lxc
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
|
@ -3,7 +3,7 @@ package lxc
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
|
@ -1,7 +1,7 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/template"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
"testing"
|
||||
)
|
||||
|
|
@ -4,12 +4,12 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
|
||||
"github.com/dotcloud/docker/daemon/execdriver/native/template"
|
||||
"github.com/dotcloud/docker/pkg/apparmor"
|
||||
"github.com/dotcloud/docker/pkg/label"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/configuration"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/template"
|
||||
)
|
||||
|
||||
// createContainer populates and configures the container type with the
|
|
@ -3,12 +3,6 @@ package native
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/apparmor"
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
@ -18,6 +12,13 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/pkg/apparmor"
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
|
||||
"github.com/dotcloud/docker/pkg/system"
|
||||
)
|
||||
|
||||
const (
|
|
@ -5,7 +5,7 @@
|
|||
package native
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
|
@ -24,8 +24,8 @@ import (
|
|||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
mountpk "github.com/dotcloud/docker/pkg/mount"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"os/exec"
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
|
@ -11,7 +11,7 @@ import "C"
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
|
@ -4,7 +4,7 @@ package devmapper
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"os"
|
|
@ -4,7 +4,7 @@ package devmapper
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"runtime"
|
|
@ -2,7 +2,7 @@ package vfs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/runtime/graphdriver"
|
||||
"github.com/dotcloud/docker/daemon/graphdriver"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
@ -14,7 +14,7 @@ func (history *History) Len() int {
|
|||
|
||||
func (history *History) Less(i, j int) bool {
|
||||
containers := *history
|
||||
return containers[j].When().Before(containers[i].When())
|
||||
return containers[j].Created.Before(containers[i].Created)
|
||||
}
|
||||
|
||||
func (history *History) Swap(i, j int) {
|
42
daemon/network_settings.go
Normal file
42
daemon/network_settings.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
)
|
||||
|
||||
// FIXME: move deprecated port stuff to nat to clean up the core.
|
||||
type PortMapping map[string]string // Deprecated
|
||||
|
||||
type NetworkSettings struct {
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
Gateway string
|
||||
Bridge string
|
||||
PortMapping map[string]PortMapping // Deprecated
|
||||
Ports nat.PortMap
|
||||
}
|
||||
|
||||
func (settings *NetworkSettings) PortMappingAPI() *engine.Table {
|
||||
var outs = engine.NewTable("", 0)
|
||||
for port, bindings := range settings.Ports {
|
||||
p, _ := nat.ParsePort(port.Port())
|
||||
if len(bindings) == 0 {
|
||||
out := &engine.Env{}
|
||||
out.SetInt("PublicPort", p)
|
||||
out.Set("Type", port.Proto())
|
||||
outs.Add(out)
|
||||
continue
|
||||
}
|
||||
for _, binding := range bindings {
|
||||
out := &engine.Env{}
|
||||
h, _ := nat.ParsePort(binding.HostPort)
|
||||
out.SetInt("PrivatePort", p)
|
||||
out.SetInt("PublicPort", h)
|
||||
out.Set("Type", port.Proto())
|
||||
out.Set("IP", binding.HostIp)
|
||||
outs.Add(out)
|
||||
}
|
||||
}
|
||||
return outs
|
||||
}
|
|
@ -2,13 +2,13 @@ package bridge
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/ipallocator"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver/portmapper"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
@ -32,7 +32,7 @@ var (
|
|||
// This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
|
||||
// In theory this shouldn't matter - in practice there's bound to be a few scripts relying
|
||||
// on the internal addressing or other stupid things like that.
|
||||
// The shouldn't, but hey, let's not break them unless we really have to.
|
||||
// They shouldn't, but hey, let's not break them unless we really have to.
|
||||
"172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23
|
||||
"10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive
|
||||
"10.1.42.1/16",
|
|
@ -3,8 +3,8 @@ package ipallocator
|
|||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver"
|
||||
"github.com/dotcloud/docker/pkg/collections"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import "sort"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -51,14 +51,14 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[
|
|||
}
|
||||
|
||||
type checker struct {
|
||||
runtime *Runtime
|
||||
daemon *Daemon
|
||||
}
|
||||
|
||||
func (c *checker) Exists(name string) bool {
|
||||
return c.runtime.containerGraph.Exists("/" + name)
|
||||
return c.daemon.containerGraph.Exists("/" + name)
|
||||
}
|
||||
|
||||
// Generate a random and unique name
|
||||
func generateRandomName(runtime *Runtime) (string, error) {
|
||||
return namesgenerator.GenerateRandomName(&checker{runtime})
|
||||
func generateRandomName(daemon *Daemon) (string, error) {
|
||||
return namesgenerator.GenerateRandomName(&checker{daemon})
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -1,9 +1,9 @@
|
|||
package runtime
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/daemon/execdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -33,9 +33,14 @@ func prepareVolumesForContainer(container *Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setupMountsForContainer(container *Container, envPath string) error {
|
||||
func setupMountsForContainer(container *Container) error {
|
||||
envPath, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mounts := []execdriver.Mount{
|
||||
{container.runtime.sysInitPath, "/.dockerinit", false, true},
|
||||
{container.daemon.sysInitPath, "/.dockerinit", false, true},
|
||||
{envPath, "/.dockerenv", false, true},
|
||||
{container.ResolvConfPath, "/etc/resolv.conf", false, true},
|
||||
}
|
||||
|
@ -80,7 +85,7 @@ func applyVolumesFrom(container *Container) error {
|
|||
}
|
||||
}
|
||||
|
||||
c := container.runtime.Get(specParts[0])
|
||||
c := container.daemon.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
|
||||
}
|
||||
|
@ -162,7 +167,7 @@ func createVolumes(container *Container) error {
|
|||
return err
|
||||
}
|
||||
|
||||
volumesDriver := container.runtime.volumes.Driver()
|
||||
volumesDriver := container.daemon.volumes.Driver()
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
volPath = filepath.Clean(volPath)
|
||||
|
@ -195,7 +200,7 @@ func createVolumes(container *Container) error {
|
|||
// Do not pass a container as the parameter for the volume creation.
|
||||
// The graph driver using the container's information ( Image ) to
|
||||
// create the parent.
|
||||
c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil)
|
||||
c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
package daemonconfig
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/daemon/networkdriver"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/runtime/networkdriver"
|
||||
"net"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,19 +1,45 @@
|
|||
FROM ubuntu:12.04
|
||||
MAINTAINER Nick Stinemates
|
||||
#
|
||||
# docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
|
||||
# See the top level Makefile in https://github.com/dotcloud/docker for usage.
|
||||
#
|
||||
FROM debian:jessie
|
||||
MAINTAINER Sven Dowideit <SvenDowideit@docker.com> (@SvenDowideit)
|
||||
|
||||
# TODO switch to http://packages.ubuntu.com/trusty/python-sphinxcontrib-httpdomain once trusty is released
|
||||
RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git pandoc
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq make python-pip python-setuptools
|
||||
RUN pip install mkdocs
|
||||
|
||||
# installing sphinx for the rst->md conversion only - will be removed after May release
|
||||
# pip installs from docs/requirements.txt, but here to increase cacheability
|
||||
RUN pip install Sphinx==1.2.1
|
||||
RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
ADD . /docs
|
||||
RUN make -C /docs clean docs
|
||||
RUN pip install Sphinx==1.2.1
|
||||
RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
|
||||
# add MarkdownTools to get transclusion
|
||||
# (future development)
|
||||
#RUN easy_install -U setuptools
|
||||
#RUN pip install MarkdownTools2
|
||||
|
||||
# this week I seem to need the latest dev release of awscli too
|
||||
# awscli 1.3.6 does --error-document correctly
|
||||
# https://github.com/aws/aws-cli/commit/edc2290e173dfaedc70b48cfa3624d58c533c6c3
|
||||
RUN pip install awscli
|
||||
|
||||
# get my sitemap.xml branch of mkdocs and use that for now
|
||||
RUN git clone https://github.com/SvenDowideit/mkdocs &&\
|
||||
cd mkdocs/ &&\
|
||||
git checkout docker-markdown-merge &&\
|
||||
./setup.py install
|
||||
|
||||
ADD . /docs
|
||||
ADD MAINTAINERS /docs/sources/humans.txt
|
||||
WORKDIR /docs
|
||||
|
||||
#build the sphinx html
|
||||
#RUN make -C /docs clean docs
|
||||
|
||||
#convert to markdown
|
||||
#RUN ./convert.sh
|
||||
|
||||
WORKDIR /docs/_build/html
|
||||
CMD ["python", "-m", "SimpleHTTPServer"]
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["mkdocs", "serve"]
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
O.S. Tezer <ostezer@gmail.com> (@OSTezer)
|
||||
|
|
157
docs/README.md
Normal file → Executable file
157
docs/README.md
Normal file → Executable file
|
@ -4,77 +4,49 @@ Docker Documentation
|
|||
Overview
|
||||
--------
|
||||
|
||||
The source for Docker documentation is here under ``sources/`` in the
|
||||
form of .rst files. These files use
|
||||
[reStructuredText](http://docutils.sourceforge.net/rst.html)
|
||||
formatting with [Sphinx](http://sphinx-doc.org/) extensions for
|
||||
structure, cross-linking and indexing.
|
||||
The source for Docker documentation is here under ``sources/`` and uses
|
||||
extended Markdown, as implemented by [mkdocs](http://mkdocs.org).
|
||||
|
||||
The HTML files are built and hosted on
|
||||
[readthedocs.org](https://readthedocs.org/projects/docker/), appearing
|
||||
via proxy on https://docs.docker.io. The HTML files update
|
||||
The HTML files are built and hosted on https://docs.docker.io, and update
|
||||
automatically after each change to the master or release branch of the
|
||||
[docker files on GitHub](https://github.com/dotcloud/docker) thanks to
|
||||
post-commit hooks. The "release" branch maps to the "latest"
|
||||
documentation and the "master" branch maps to the "master"
|
||||
documentation and the "master" (unreleased development) branch maps to the "master"
|
||||
documentation.
|
||||
|
||||
## Branches
|
||||
|
||||
**There are two branches related to editing docs**: ``master`` and a
|
||||
``doc*`` branch (currently ``doc0.8.1``). You should normally edit
|
||||
docs on the ``master`` branch. That way your fixes will automatically
|
||||
get included in later releases, and docs maintainers can easily
|
||||
cherry-pick your changes to bring over to the current docs branch. In
|
||||
the rare case where your change is not forward-compatible, then you
|
||||
could base your change on the appropriate ``doc*`` branch.
|
||||
``docs`` branch. You should always edit
|
||||
docs on a local branch of the ``master`` branch, and send a PR against ``master``.
|
||||
That way your fixes
|
||||
will automatically get included in later releases, and docs maintainers
|
||||
can easily cherry-pick your changes into the ``docs`` release branch.
|
||||
In the rare case where your change is not forward-compatible,
|
||||
you may need to base your changes on the ``docs`` branch.
|
||||
|
||||
Now that we have a ``doc*`` branch, we can keep the ``latest`` docs
|
||||
Now that we have a ``docs`` branch, we can keep the [http://docs.docker.io](http://docs.docker.io) docs
|
||||
up to date with any bugs found between ``docker`` code releases.
|
||||
|
||||
**Warning**: When *reading* the docs, the ``master`` documentation may
|
||||
**Warning**: When *reading* the docs, the [http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may
|
||||
include features not yet part of any official docker
|
||||
release. ``Master`` docs should be used only for understanding
|
||||
bleeding-edge development and ``latest`` (which points to the ``doc*``
|
||||
release. The ``beta-docs`` site should be used only for understanding
|
||||
bleeding-edge development and ``docs.docker.io`` (which points to the ``docs``
|
||||
branch``) should be used for the latest official release.
|
||||
|
||||
If you need to manually trigger a build of an existing branch, then
|
||||
you can do that through the [readthedocs
|
||||
interface](https://readthedocs.org/builds/docker/). If you would like
|
||||
to add new build targets, including new branches or tags, then you
|
||||
must contact one of the existing maintainers and get your
|
||||
readthedocs.org account added to the maintainers list, or just file an
|
||||
issue on GitHub describing the branch/tag and why it needs to be added
|
||||
to the docs, and one of the maintainers will add it for you.
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
To edit and test the docs, you'll need to install the Sphinx tool and
|
||||
its dependencies. There are two main ways to install this tool:
|
||||
|
||||
### Native Installation
|
||||
|
||||
Install dependencies from `requirements.txt` file in your `docker/docs`
|
||||
directory:
|
||||
|
||||
* Linux: `pip install -r docs/requirements.txt`
|
||||
|
||||
* Mac OS X: `[sudo] pip-2.7 install -r docs/requirements.txt`
|
||||
|
||||
### Alternative Installation: Docker Container
|
||||
|
||||
If you're running ``docker`` on your development machine then you may
|
||||
find it easier and cleaner to use the docs Dockerfile. This installs Sphinx
|
||||
in a container, adds the local ``docs/`` directory and builds the HTML
|
||||
docs inside the container, even starting a simple HTTP server on port
|
||||
8000 so that you can connect and see your changes.
|
||||
Docker documentation builds are done in a docker container, which installs all
|
||||
the required tools, adds the local ``docs/`` directory and builds the HTML
|
||||
docs. It then starts a HTTP server on port 8000 so that you can connect
|
||||
and see your changes.
|
||||
|
||||
In the ``docker`` source directory, run:
|
||||
```make docs```
|
||||
|
||||
This is the equivalent to ``make clean server`` since each container
|
||||
starts clean.
|
||||
If you have any issues you need to debug, you can use ``make docs-shell`` and
|
||||
then run ``mkdocs serve``
|
||||
|
||||
# Contributing
|
||||
|
||||
|
@ -84,8 +56,8 @@ starts clean.
|
|||
``../CONTRIBUTING.md``](../CONTRIBUTING.md)).
|
||||
* [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
* Work in your own fork of the code, we accept pull requests.
|
||||
* Change the ``.rst`` files with your favorite editor -- try to keep the
|
||||
lines short and respect RST and Sphinx conventions.
|
||||
* Change the ``.md`` files with your favorite editor -- try to keep the
|
||||
lines short (80 chars) and respect Markdown conventions.
|
||||
* Run ``make clean docs`` to clean up old files and generate new ones,
|
||||
or just ``make docs`` to update after small changes.
|
||||
* Your static website can now be found in the ``_build`` directory.
|
||||
|
@ -94,27 +66,13 @@ starts clean.
|
|||
|
||||
``make clean docs`` must complete without any warnings or errors.
|
||||
|
||||
## Special Case for RST Newbies:
|
||||
|
||||
If you want to write a new doc or make substantial changes to an
|
||||
existing doc, but **you don't know RST syntax**, we will accept pull
|
||||
requests in Markdown and plain text formats. We really want to
|
||||
encourage people to share their knowledge and don't want the markup
|
||||
syntax to be the obstacle. So when you make the Pull Request, please
|
||||
note in your comment that you need RST markup assistance, and we'll
|
||||
make the changes for you, and then we will make a pull request to your
|
||||
pull request so that you can get all the changes and learn about the
|
||||
markup. You still need to follow the
|
||||
[``CONTRIBUTING``](../CONTRIBUTING) guidelines, so please sign your
|
||||
commits.
|
||||
|
||||
Working using GitHub's file editor
|
||||
----------------------------------
|
||||
|
||||
Alternatively, for small changes and typos you might want to use
|
||||
GitHub's built in file editor. It allows you to preview your changes
|
||||
right online (though there can be some differences between GitHub
|
||||
markdown and Sphinx RST). Just be careful not to create many commits.
|
||||
Markdown and mkdocs Markdown). Just be careful not to create many commits.
|
||||
And you must still [sign your work!](../CONTRIBUTING.md#sign-your-work)
|
||||
|
||||
Images
|
||||
|
@ -122,62 +80,25 @@ Images
|
|||
|
||||
When you need to add images, try to make them as small as possible
|
||||
(e.g. as gif). Usually images should go in the same directory as the
|
||||
.rst file which references them, or in a subdirectory if one already
|
||||
.md file which references them, or in a subdirectory if one already
|
||||
exists.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Publishing Documentation
|
||||
------------------------
|
||||
|
||||
* For the template the css is compiled from less. When changes are
|
||||
needed they can be compiled using
|
||||
To publish a copy of the documentation you need a ``docs/awsconfig``
|
||||
file containing AWS settings to deploy to. The release script will
|
||||
create an s3 if needed, and will then push the files to it.
|
||||
|
||||
lessc ``lessc main.less`` or watched using watch-lessc ``watch-lessc -i main.less -o main.css``
|
||||
```
|
||||
[profile dowideit-docs]
|
||||
aws_access_key_id = IHOIUAHSIDH234rwf....
|
||||
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
|
||||
region = ap-southeast-2
|
||||
```
|
||||
|
||||
Guides on using sphinx
|
||||
----------------------
|
||||
* To make links to certain sections create a link target like so:
|
||||
The ``profile`` name must be the same as the name of the bucket you are
|
||||
deploying to - which you call from the docker directory:
|
||||
|
||||
```
|
||||
.. _hello_world:
|
||||
|
||||
Hello world
|
||||
===========
|
||||
|
||||
This is a reference to :ref:`hello_world` and will work even if we
|
||||
move the target to another file or change the title of the section.
|
||||
```
|
||||
|
||||
The ``_hello_world:`` will make it possible to link to this position
|
||||
(page and section heading) from all other pages. See the [Sphinx
|
||||
docs](http://sphinx-doc.org/markup/inline.html#role-ref) for more
|
||||
information and examples.
|
||||
|
||||
* Notes, warnings and alarms
|
||||
|
||||
```
|
||||
# a note (use when something is important)
|
||||
.. note::
|
||||
|
||||
# a warning (orange)
|
||||
.. warning::
|
||||
|
||||
# danger (red, use sparsely)
|
||||
.. danger::
|
||||
|
||||
* Code examples
|
||||
|
||||
* Start typed commands with ``$ `` (dollar space) so that they
|
||||
are easily differentiated from program output.
|
||||
* Use "sudo" with docker to ensure that your command is runnable
|
||||
even if they haven't [used the *docker*
|
||||
group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
|
||||
|
||||
Manpages
|
||||
--------
|
||||
|
||||
* To make the manpages, run ``make man``. Please note there is a bug
|
||||
in Sphinx 1.1.3 which makes this fail. Upgrade to the latest version
|
||||
of Sphinx.
|
||||
* Then preview the manpage by running ``man _build/man/docker.1``,
|
||||
where ``_build/man/docker.1`` is the path to the generated manfile
|
||||
``make AWS_S3_BUCKET=dowideit-docs docs-release``
|
||||
|
||||
|
|
86
docs/asciinema.patch
Normal file
86
docs/asciinema.patch
Normal file
|
@ -0,0 +1,86 @@
|
|||
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
index 6e072f6..5a4537d 100644
|
||||
--- a/docs/sources/examples/hello_world.md
|
||||
+++ b/docs/sources/examples/hello_world.md
|
||||
@@ -59,6 +59,9 @@ standard out.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
+
|
||||
+
|
||||
## Hello World Daemon
|
||||
|
||||
Note
|
||||
@@ -142,6 +145,8 @@ Make sure it is really stopped.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
+
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
diff --git a/docs/asciinema.patch b/docs/asciinema.patch
|
||||
index e240bf3..e69de29 100644
|
||||
--- a/docs/asciinema.patch
|
||||
+++ b/docs/asciinema.patch
|
||||
@@ -1,23 +0,0 @@
|
||||
-diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
-index 6e072f6..5a4537d 100644
|
||||
---- a/docs/sources/examples/hello_world.md
|
||||
-+++ b/docs/sources/examples/hello_world.md
|
||||
-@@ -59,6 +59,9 @@ standard out.
|
||||
-
|
||||
- See the example in action
|
||||
-
|
||||
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
-+
|
||||
-+
|
||||
- ## Hello World Daemon
|
||||
-
|
||||
- Note
|
||||
-@@ -142,6 +145,8 @@ Make sure it is really stopped.
|
||||
-
|
||||
- See the example in action
|
||||
-
|
||||
-+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
-+
|
||||
- The next example in the series is a [*Node.js Web
|
||||
- App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
- any of the other examples:
|
||||
diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md
|
||||
index 6e072f6..c277f38 100644
|
||||
--- a/docs/sources/examples/hello_world.md
|
||||
+++ b/docs/sources/examples/hello_world.md
|
||||
@@ -59,6 +59,8 @@ standard out.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
+
|
||||
## Hello World Daemon
|
||||
|
||||
Note
|
||||
@@ -142,6 +144,8 @@ Make sure it is really stopped.
|
||||
|
||||
See the example in action
|
||||
|
||||
+<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
+
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md
|
||||
index 2122b8d..49edbc8 100644
|
||||
--- a/docs/sources/use/workingwithrepository.md
|
||||
+++ b/docs/sources/use/workingwithrepository.md
|
||||
@@ -199,6 +199,8 @@ searchable (or indexed at all) in the Central Index, and there will be
|
||||
no user name checking performed. Your registry will function completely
|
||||
independently from the Central Index.
|
||||
|
||||
+<iframe width="640" height="360" src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0" allowfullscreen></iframe>
|
||||
+
|
||||
See also
|
||||
|
||||
[Docker Blog: How to use your own
|
53
docs/convert.sh
Executable file
53
docs/convert.sh
Executable file
|
@ -0,0 +1,53 @@
|
|||
#!/bin/sh
|
||||
|
||||
cd /
|
||||
|
||||
#run the sphinx build first
|
||||
make -C /docs clean docs
|
||||
|
||||
cd /docs
|
||||
|
||||
#find sources -name '*.md*' -exec rm '{}' \;
|
||||
|
||||
# convert from rst to md for mkdocs.org
|
||||
# TODO: we're using a sphinx specific rst thing to do between docs links, which we then need to convert to mkdocs specific markup (and pandoc loses it when converting to html / md)
|
||||
HTML_FILES=$(find _build -name '*.html' | sed 's/_build\/html\/\(.*\)\/index.html/\1/')
|
||||
|
||||
for name in ${HTML_FILES}
|
||||
do
|
||||
echo $name
|
||||
# lets not use gratuitious unicode quotes that cause terrible copy and paste issues
|
||||
sed -i 's/“/"/g' _build/html/${name}/index.html
|
||||
sed -i 's/”/"/g' _build/html/${name}/index.html
|
||||
pandoc -f html -t markdown --atx-headers -o sources/${name}.md1 _build/html/${name}/index.html
|
||||
|
||||
#add the meta-data from the rst
|
||||
egrep ':(title|description|keywords):' sources/${name}.rst | sed 's/^:/page_/' > sources/${name}.md
|
||||
echo >> sources/${name}.md
|
||||
#cat sources/${name}.md1 >> sources/${name}.md
|
||||
# remove the paragraph links from the source
|
||||
cat sources/${name}.md1 | sed 's/\[..\](#.*)//' >> sources/${name}.md
|
||||
|
||||
rm sources/${name}.md1
|
||||
|
||||
sed -i 's/{.docutils .literal}//g' sources/${name}.md
|
||||
sed -i 's/{.docutils$//g' sources/${name}.md
|
||||
sed -i 's/^.literal} //g' sources/${name}.md
|
||||
sed -i 's/`{.descname}`//g' sources/${name}.md
|
||||
sed -i 's/{.descname}//g' sources/${name}.md
|
||||
sed -i 's/{.xref}//g' sources/${name}.md
|
||||
sed -i 's/{.xref .doc .docutils .literal}//g' sources/${name}.md
|
||||
sed -i 's/{.xref .http .http-post .docutils$//g' sources/${name}.md
|
||||
sed -i 's/^ .literal}//g' sources/${name}.md
|
||||
|
||||
sed -i 's/\\\$container\\_id/\$container_id/' sources/examples/hello_world.md
|
||||
sed -i 's/\\\$TESTFLAGS/\$TESTFLAGS/' sources/contributing/devenvironment.md
|
||||
sed -i 's/\\\$MYVAR1/\$MYVAR1/g' sources/reference/commandline/cli.md
|
||||
|
||||
# git it all so we can test
|
||||
# git add ${name}.md
|
||||
done
|
||||
|
||||
#annoyingly, there are lots of failures
|
||||
patch --fuzz 50 -t -p2 < pr4923.patch || true
|
||||
patch --fuzz 50 -t -p2 < asciinema.patch || true
|
197
docs/convert_with_sphinx.patch
Normal file
197
docs/convert_with_sphinx.patch
Normal file
|
@ -0,0 +1,197 @@
|
|||
diff --git a/docs/Dockerfile b/docs/Dockerfile
|
||||
index bc2b73b..b9808b2 100644
|
||||
--- a/docs/Dockerfile
|
||||
+++ b/docs/Dockerfile
|
||||
@@ -4,14 +4,24 @@ MAINTAINER SvenDowideit@docker.com
|
||||
# docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
|
||||
#
|
||||
|
||||
-RUN apt-get update && apt-get install -yq make python-pip python-setuptools
|
||||
-
|
||||
+RUN apt-get update && apt-get install -yq make python-pip python-setuptools
|
||||
RUN pip install mkdocs
|
||||
|
||||
+RUN apt-get install -yq vim-tiny git pandoc
|
||||
+
|
||||
+# pip installs from docs/requirements.txt, but here to increase cacheability
|
||||
+RUN pip install Sphinx==1.2.1
|
||||
+RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
+
|
||||
ADD . /docs
|
||||
+
|
||||
+#build the sphinx html
|
||||
+RUN make -C /docs clean docs
|
||||
+
|
||||
WORKDIR /docs
|
||||
|
||||
-CMD ["mkdocs", "serve"]
|
||||
+#CMD ["mkdocs", "serve"]
|
||||
+CMD bash
|
||||
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html
|
||||
index 7d78fb9..0dac9e0 100755
|
||||
--- a/docs/theme/docker/layout.html
|
||||
+++ b/docs/theme/docker/layout.html
|
||||
@@ -63,48 +63,6 @@
|
||||
|
||||
<body>
|
||||
|
||||
-<div id="wrap">
|
||||
-<div class="navbar navbar-static-top navbar-inner navbar-fixed-top ">
|
||||
- <div class="navbar-dotcloud">
|
||||
- <div class="container">
|
||||
-
|
||||
- <div style="float: right" class="pull-right">
|
||||
- <ul class="nav">
|
||||
- <li id="nav-introduction"><a href="http://www.docker.io/" title="Docker Homepage">Home</a></li>
|
||||
- <li id="nav-about"><a href="http://www.docker.io/about/" title="About">About</a></li>
|
||||
- <li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
|
||||
- <li id="nav-community"><a href="http://www.docker.io/community/" title="Community">Community</a></li>
|
||||
- <li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
|
||||
- <li id="nav-blog"><a href="http://blog.docker.io/" title="Docker Blog">Blog</a></li>
|
||||
- <li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" alt="link to external site" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
|
||||
- </ul>
|
||||
- </div>
|
||||
-
|
||||
- <div class="brand-logo">
|
||||
- <a href="http://www.docker.io" title="Docker Homepage"><img src="{{ pathto('_static/img/docker-top-logo.png', 1) }}" alt="Docker logo"></a>
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
-</div>
|
||||
-
|
||||
-<div class="container-fluid">
|
||||
-
|
||||
- <!-- Docs nav
|
||||
- ================================================== -->
|
||||
- <div class="row-fluid main-row">
|
||||
-
|
||||
- <div class="sidebar bs-docs-sidebar">
|
||||
- <div class="page-title" >
|
||||
- <h4>DOCUMENTATION</h4>
|
||||
- </div>
|
||||
-
|
||||
- {{ toctree(collapse=False, maxdepth=3) }}
|
||||
- <form>
|
||||
- <input type="text" id="st-search-input" class="st-search-input span3" placeholder="search in documentation" style="width:210px;" />
|
||||
- <div id="st-results-container"></div>
|
||||
- </form>
|
||||
- </div>
|
||||
-
|
||||
<!-- body block -->
|
||||
<div class="main-content">
|
||||
|
||||
@@ -114,111 +72,7 @@
|
||||
{% block body %}{% endblock %}
|
||||
</section>
|
||||
|
||||
- <div class="pull-right"><a href="https://github.com/dotcloud/docker/blob/{{ github_tag }}/docs/sources/{{ pagename }}.rst" title="edit this article">Edit this article on GitHub</a></div>
|
||||
</div>
|
||||
- </div>
|
||||
-</div>
|
||||
-
|
||||
-<div id="push-the-footer"></div>
|
||||
-</div> <!-- end wrap for pushing footer -->
|
||||
-
|
||||
-<div id="footer">
|
||||
- <div class="footer-landscape">
|
||||
- <div class="footer-landscape-image">
|
||||
- <!-- footer -->
|
||||
- <div class="container">
|
||||
- <div class="row footer">
|
||||
- <div class="span12 tbox">
|
||||
- <div class="tbox">
|
||||
- <p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
|
||||
- <p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
|
||||
- </div>
|
||||
-
|
||||
- <div class="social links">
|
||||
- <a title="Docker on Twitter" class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
- <a title="Docker on GitHub" class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
- <a title="Docker on Reddit" class="reddit" href="http://www.reddit.com/r/Docker/">Reddit</a>
|
||||
- <a title="Docker on Google+" class="googleplus" href="https://plus.google.com/u/0/b/100381662757235514581/communities/108146856671494713993">Google+</a>
|
||||
- <a title="Docker on Facebook" class="facebook" href="https://www.facebook.com/docker.run">Facebook</a>
|
||||
- <a title="Docker on SlideShare" class="slideshare" href="http://www.slideshare.net/dotCloud">Slideshare</a>
|
||||
- <a title="Docker on Youtube" class="youtube" href="http://www.youtube.com/user/dockerrun/">Youtube</a>
|
||||
- <a title="Docker on Flickr" class="flickr" href="http://www.flickr.com/photos/99741659@N08/">Flickr</a>
|
||||
- <a title="Docker on LinkedIn" class="linkedin" href="http://www.linkedin.com/company/dotcloud">LinkedIn</a>
|
||||
- </div>
|
||||
-
|
||||
- <div class="tbox version-flyer ">
|
||||
- <div class="content">
|
||||
- <p class="version-note">Note: You are currently browsing the development documentation. The current release may work differently.</p>
|
||||
-
|
||||
- <small>Available versions:</small>
|
||||
- <ul class="inline">
|
||||
- {% for slug, url in versions %}
|
||||
- <li class="alternative"><a href="{{ url }}{%- for word in pagename.split('/') -%}
|
||||
- {%- if word != 'index' -%}
|
||||
- {%- if word != '' -%}
|
||||
- {{ word }}/
|
||||
- {%- endif -%}
|
||||
- {%- endif -%}
|
||||
- {%- endfor -%}"
|
||||
- title="Switch to {{ slug }}">{{ slug }}</a></li>
|
||||
- {% endfor %}
|
||||
- </ul>
|
||||
- </div>
|
||||
- </div>
|
||||
-
|
||||
-
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
- </div>
|
||||
- <!-- end of footer -->
|
||||
- </div>
|
||||
-
|
||||
-</div>
|
||||
-
|
||||
-
|
||||
-<script type="text/javascript" src="{{ pathto('_static/js/docs.js', 1) }}"></script>
|
||||
-
|
||||
-<!-- Swiftype search -->
|
||||
-
|
||||
-<script type="text/javascript">
|
||||
- var Swiftype = window.Swiftype || {};
|
||||
- (function() {
|
||||
- Swiftype.key = 'pWPnnyvwcfpcrw1o51Sz';
|
||||
- Swiftype.inputElement = '#st-search-input';
|
||||
- Swiftype.resultContainingElement = '#st-results-container';
|
||||
- Swiftype.attachElement = '#st-search-input';
|
||||
- Swiftype.renderStyle = "overlay";
|
||||
- // from https://swiftype.com/questions/how-can-i-make-more-popular-content-rank-higher
|
||||
- // Use "page" for now -- they don't subgroup by document type yet.
|
||||
- Swiftype.searchFunctionalBoosts = {"page": {"popularity": "linear"}};
|
||||
-
|
||||
- var script = document.createElement('script');
|
||||
- script.type = 'text/javascript';
|
||||
- script.async = true;
|
||||
- script.src = "//swiftype.com/embed.js";
|
||||
- var entry = document.getElementsByTagName('script')[0];
|
||||
- entry.parentNode.insertBefore(script, entry);
|
||||
- }());
|
||||
-</script>
|
||||
-
|
||||
-
|
||||
-<!-- Google analytics -->
|
||||
-<script type="text/javascript">
|
||||
-
|
||||
- var _gaq = _gaq || [];
|
||||
- _gaq.push(['_setAccount', 'UA-6096819-11']);
|
||||
- _gaq.push(['_setDomainName', 'docker.io']);
|
||||
- _gaq.push(['_setAllowLinker', true]);
|
||||
- _gaq.push(['_trackPageview']);
|
||||
-
|
||||
- (function() {
|
||||
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
|
||||
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
|
||||
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
|
||||
- })();
|
||||
-
|
||||
-</script>
|
||||
|
||||
</body>
|
||||
</html>
|
126
docs/mkdocs.yml
Executable file
126
docs/mkdocs.yml
Executable file
|
@ -0,0 +1,126 @@
|
|||
site_name: Docker Documentation
|
||||
#site_url: http://docs.docker.io/
|
||||
site_url: /
|
||||
site_description: Documentation for fast and lightweight Docker container based virtualization framework.
|
||||
site_favicon: img/favicon.png
|
||||
|
||||
dev_addr: '0.0.0.0:8000'
|
||||
|
||||
repo_url: https://github.com/dotcloud/docker/
|
||||
|
||||
docs_dir: sources
|
||||
|
||||
include_search: true
|
||||
|
||||
use_absolute_urls: true
|
||||
|
||||
# theme: docker
|
||||
theme_dir: ./theme/mkdocs/
|
||||
theme_center_lead: false
|
||||
include_search: true
|
||||
|
||||
copyright: Copyright © 2014, Docker, Inc.
|
||||
google_analytics: ['UA-6096819-11', 'docker.io']
|
||||
|
||||
pages:
|
||||
|
||||
# Introduction:
|
||||
- ['index.md', 'About', 'Docker']
|
||||
- ['introduction/index.md', '**HIDDEN**']
|
||||
- ['introduction/understanding-docker.md', 'About', 'Understanding Docker']
|
||||
- ['introduction/technology.md', 'About', 'The Technology']
|
||||
- ['introduction/working-with-docker.md', 'About', 'Working with Docker']
|
||||
- ['introduction/get-docker.md', 'About', 'Get Docker']
|
||||
|
||||
# Installation:
|
||||
- ['installation/index.md', '**HIDDEN**']
|
||||
- ['installation/mac.md', 'Installation', 'Mac OS X']
|
||||
- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu']
|
||||
- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux']
|
||||
- ['installation/gentoolinux.md', 'Installation', 'Gentoo']
|
||||
- ['installation/google.md', 'Installation', 'Google Cloud Platform']
|
||||
- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud']
|
||||
- ['installation/amazon.md', 'Installation', 'Amazon EC2']
|
||||
- ['installation/softlayer.md', 'Installation', 'IBM Softlayer']
|
||||
- ['installation/archlinux.md', 'Installation', 'Arch Linux']
|
||||
- ['installation/frugalware.md', 'Installation', 'FrugalWare']
|
||||
- ['installation/fedora.md', 'Installation', 'Fedora']
|
||||
- ['installation/openSUSE.md', 'Installation', 'openSUSE']
|
||||
- ['installation/cruxlinux.md', 'Installation', 'CRUX Linux']
|
||||
- ['installation/windows.md', 'Installation', 'Microsoft Windows']
|
||||
- ['installation/binaries.md', 'Installation', 'Binaries']
|
||||
|
||||
# Examples:
|
||||
- ['use/index.md', '**HIDDEN**']
|
||||
- ['use/basics.md', 'Examples', 'First steps with Docker']
|
||||
- ['examples/index.md', '**HIDDEN**']
|
||||
- ['examples/hello_world.md', 'Examples', 'Hello World']
|
||||
- ['examples/nodejs_web_app.md', 'Examples', 'Node.js web application']
|
||||
- ['examples/python_web_app.md', 'Examples', 'Python web application']
|
||||
- ['examples/mongodb.md', 'Examples', 'MongoDB service']
|
||||
- ['examples/running_redis_service.md', 'Examples', 'Redis service']
|
||||
- ['examples/postgresql_service.md', 'Examples', 'PostgreSQL service']
|
||||
- ['examples/running_riak_service.md', 'Examples', 'Running a Riak service']
|
||||
- ['examples/running_ssh_service.md', 'Examples', 'Running an SSH service']
|
||||
- ['examples/couchdb_data_volumes.md', 'Examples', 'CouchDB service']
|
||||
- ['examples/apt-cacher-ng.md', 'Examples', 'Apt-Cacher-ng service']
|
||||
- ['examples/https.md', 'Examples', 'Running Docker with HTTPS']
|
||||
- ['examples/using_supervisord.md', 'Examples', 'Using Supervisor']
|
||||
- ['examples/cfengine_process_management.md', 'Examples', 'Process management with CFEngine']
|
||||
- ['use/working_with_links_names.md', 'Examples', 'Linking containers together']
|
||||
- ['use/working_with_volumes.md', 'Examples', 'Sharing Directories using volumes']
|
||||
- ['use/puppet.md', 'Examples', 'Using Puppet']
|
||||
- ['use/chef.md', 'Examples', 'Using Chef']
|
||||
- ['use/workingwithrepository.md', 'Examples', 'Working with a Docker Repository']
|
||||
- ['use/port_redirection.md', 'Examples', 'Redirect ports']
|
||||
- ['use/ambassador_pattern_linking.md', 'Examples', 'Cross-Host linking using Ambassador Containers']
|
||||
- ['use/host_integration.md', 'Examples', 'Automatically starting Containers']
|
||||
|
||||
#- ['user-guide/index.md', '**HIDDEN**']
|
||||
# - ['user-guide/writing-your-docs.md', 'User Guide', 'Writing your docs']
|
||||
# - ['user-guide/styling-your-docs.md', 'User Guide', 'Styling your docs']
|
||||
# - ['user-guide/configuration.md', 'User Guide', 'Configuration']
|
||||
# ./faq.md
|
||||
|
||||
# Docker Index docs:
|
||||
- ['index/index.md', '**HIDDEN**']
|
||||
# - ['index/home.md', 'Docker Index', 'Help']
|
||||
- ['index/accounts.md', 'Docker Index', 'Accounts']
|
||||
- ['index/repos.md', 'Docker Index', 'Repositories']
|
||||
- ['index/builds.md', 'Docker Index', 'Trusted Builds']
|
||||
|
||||
# Reference
|
||||
- ['reference/index.md', '**HIDDEN**']
|
||||
- ['reference/commandline/cli.md', 'Reference', 'Command line']
|
||||
- ['reference/builder.md', 'Reference', 'Dockerfile']
|
||||
- ['reference/run.md', 'Reference', 'Run Reference']
|
||||
- ['articles/index.md', '**HIDDEN**']
|
||||
- ['articles/runmetrics.md', 'Reference', 'Runtime metrics']
|
||||
- ['articles/security.md', 'Reference', 'Security']
|
||||
- ['articles/baseimages.md', 'Reference', 'Creating a Base Image']
|
||||
- ['use/networking.md', 'Reference', 'Advanced networking']
|
||||
- ['reference/api/index_api.md', 'Reference', 'Docker Index API']
|
||||
- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API']
|
||||
- ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec']
|
||||
- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
|
||||
- ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10']
|
||||
- ['reference/api/docker_remote_api_v1.9.md', 'Reference', 'Docker Remote API v1.9']
|
||||
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
|
||||
|
||||
# Contribute:
|
||||
- ['contributing/index.md', '**HIDDEN**']
|
||||
- ['contributing/contributing.md', 'Contribute', 'Contributing']
|
||||
- ['contributing/devenvironment.md', 'Contribute', 'Development environment']
|
||||
# - ['about/license.md', 'About', 'License']
|
||||
|
||||
- ['jsearch.md', '**HIDDEN**']
|
||||
|
||||
# - ['static_files/README.md', 'static_files', 'README']
|
||||
#- ['terms/index.md', '**HIDDEN**']
|
||||
# - ['terms/layer.md', 'terms', 'layer']
|
||||
# - ['terms/index.md', 'terms', 'Home']
|
||||
# - ['terms/registry.md', 'terms', 'registry']
|
||||
# - ['terms/container.md', 'terms', 'container']
|
||||
# - ['terms/repository.md', 'terms', 'repository']
|
||||
# - ['terms/filesystem.md', 'terms', 'filesystem']
|
||||
# - ['terms/image.md', 'terms', 'image']
|
12836
docs/pr4923.patch
Normal file
12836
docs/pr4923.patch
Normal file
File diff suppressed because it is too large
Load diff
63
docs/release.sh
Executable file
63
docs/release.sh
Executable file
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
|
||||
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
|
||||
and set the AWS_S3_BUCKET env var to the name of your bucket.
|
||||
|
||||
make AWS_S3_BUCKET=beta-docs.docker.io docs-release
|
||||
|
||||
will then push the documentation site to your s3 bucket.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$AWS_S3_BUCKET" ] || usage
|
||||
|
||||
#VERSION=$(cat VERSION)
|
||||
BUCKET=$AWS_S3_BUCKET
|
||||
|
||||
export AWS_CONFIG_FILE=$(pwd)/awsconfig
|
||||
[ -e "$AWS_CONFIG_FILE" ] || usage
|
||||
export AWS_DEFAULT_PROFILE=$BUCKET
|
||||
|
||||
echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
|
||||
|
||||
setup_s3() {
|
||||
echo "Create $BUCKET"
|
||||
# Try creating the bucket. Ignore errors (it might already exist).
|
||||
aws s3 mb s3://$BUCKET 2>/dev/null || true
|
||||
# Check access to the bucket.
|
||||
echo "test $BUCKET exists"
|
||||
aws s3 ls s3://$BUCKET
|
||||
# Make the bucket accessible through website endpoints.
|
||||
echo "make $BUCKET accessible as a website"
|
||||
#aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
|
||||
s3conf=$(cat s3_website.json)
|
||||
aws s3api put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
|
||||
}
|
||||
|
||||
build_current_documentation() {
|
||||
mkdocs build
|
||||
}
|
||||
|
||||
upload_current_documentation() {
|
||||
src=site/
|
||||
dst=s3://$BUCKET
|
||||
|
||||
echo
|
||||
echo "Uploading $src"
|
||||
echo " to $dst"
|
||||
echo
|
||||
#s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst"
|
||||
aws s3 sync --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst"
|
||||
}
|
||||
|
||||
setup_s3
|
||||
build_current_documentation
|
||||
upload_current_documentation
|
||||
|
15
docs/s3_website.json
Normal file
15
docs/s3_website.json
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"ErrorDocument": {
|
||||
"Key": "jsearch/index.html"
|
||||
},
|
||||
"IndexDocument": {
|
||||
"Suffix": "index.html"
|
||||
},
|
||||
"RoutingRules": [
|
||||
{ "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } },
|
||||
{ "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }
|
||||
]
|
||||
}
|
||||
|
3
docs/sources/article-img/architecture.svg
Normal file
3
docs/sources/article-img/architecture.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.2 KiB |
8
docs/sources/articles.md
Normal file
8
docs/sources/articles.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Articles
|
||||
|
||||
## Contents:
|
||||
|
||||
- [Docker Security](security/)
|
||||
- [Create a Base Image](baseimages/)
|
||||
- [Runtime Metrics](runmetrics/)
|
||||
|
60
docs/sources/articles/baseimages.md
Normal file
60
docs/sources/articles/baseimages.md
Normal file
|
@ -0,0 +1,60 @@
|
|||
page_title: Create a Base Image
|
||||
page_description: How to create base images
|
||||
page_keywords: Examples, Usage, base image, docker, documentation, examples
|
||||
|
||||
# Create a Base Image
|
||||
|
||||
So you want to create your own [*Base
|
||||
Image*](../../terms/image/#base-image-def)? Great!
|
||||
|
||||
The specific process will depend heavily on the Linux distribution you
|
||||
want to package. We have some examples below, and you are encouraged to
|
||||
submit pull requests to contribute new ones.
|
||||
|
||||
## Create a full image using tar
|
||||
|
||||
In general, you’ll want to start with a working machine that is running
|
||||
the distribution you’d like to package as a base image, though that is
|
||||
not required for some tools like Debian’s
|
||||
[Debootstrap](https://wiki.debian.org/Debootstrap), which you can also
|
||||
use to build Ubuntu images.
|
||||
|
||||
It can be as simple as this to create an Ubuntu base image:
|
||||
|
||||
$ sudo debootstrap raring raring > /dev/null
|
||||
$ sudo tar -C raring -c . | sudo docker import - raring
|
||||
a29c15f1bf7a
|
||||
$ sudo docker run raring cat /etc/lsb-release
|
||||
DISTRIB_ID=Ubuntu
|
||||
DISTRIB_RELEASE=13.04
|
||||
DISTRIB_CODENAME=raring
|
||||
DISTRIB_DESCRIPTION="Ubuntu 13.04"
|
||||
|
||||
There are more example scripts for creating base images in the Docker
|
||||
GitHub Repo:
|
||||
|
||||
- [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh)
|
||||
- CentOS / Scientific Linux CERN (SLC) [on
|
||||
Debian/Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh)
|
||||
or [on
|
||||
CentOS/RHEL/SLC/etc.](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh)
|
||||
- [Debian /
|
||||
Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh)
|
||||
|
||||
## Creating a simple base image using `scratch`
|
||||
|
||||
There is a special repository in the Docker registry called
|
||||
`scratch`, which was created using an empty tar
|
||||
file:
|
||||
|
||||
$ tar cv --files-from /dev/null | docker import - scratch
|
||||
|
||||
which you can `docker pull`. You can then use that
|
||||
image to base your new minimal containers `FROM`:
|
||||
|
||||
FROM scratch
|
||||
ADD true-asm /true
|
||||
CMD ["/true"]
|
||||
|
||||
The Dockerfile above is from extremely minimal image -
|
||||
[tianon/true](https://github.com/tianon/dockerfiles/tree/master/true).
|
434
docs/sources/articles/runmetrics.md
Normal file
434
docs/sources/articles/runmetrics.md
Normal file
|
@ -0,0 +1,434 @@
|
|||
page_title: Runtime Metrics
|
||||
page_description: Measure the behavior of running containers
|
||||
page_keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
|
||||
|
||||
# Runtime Metrics
|
||||
|
||||
Linux Containers rely on [control
|
||||
groups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt)
|
||||
which not only track groups of processes, but also expose metrics about
|
||||
CPU, memory, and block I/O usage. You can access those metrics and
|
||||
obtain network usage metrics as well. This is relevant for "pure" LXC
|
||||
containers, as well as for Docker containers.
|
||||
|
||||
## Control Groups
|
||||
|
||||
Control groups are exposed through a pseudo-filesystem. In recent
|
||||
distros, you should find this filesystem under
|
||||
`/sys/fs/cgroup`. Under that directory, you will see
|
||||
multiple sub-directories, called devices, freezer, blkio, etc.; each
|
||||
sub-directory actually corresponds to a different cgroup hierarchy.
|
||||
|
||||
On older systems, the control groups might be mounted on
|
||||
`/cgroup`, without distinct hierarchies. In that
|
||||
case, instead of seeing the sub-directories, you will see a bunch of
|
||||
files in that directory, and possibly some directories corresponding to
|
||||
existing containers.
|
||||
|
||||
To figure out where your control groups are mounted, you can run:
|
||||
|
||||
grep cgroup /proc/mounts
|
||||
|
||||
## Enumerating Cgroups
|
||||
|
||||
You can look into `/proc/cgroups` to see the
|
||||
different control group subsystems known to the system, the hierarchy
|
||||
they belong to, and how many groups they contain.
|
||||
|
||||
You can also look at `/proc/<pid>/cgroup` to see
|
||||
which control groups a process belongs to. The control group will be
|
||||
shown as a path relative to the root of the hierarchy mountpoint; e.g.
|
||||
`/` means “this process has not been assigned into a
|
||||
particular group”, while `/lxc/pumpkin` means that
|
||||
the process is likely to be a member of a container named
|
||||
`pumpkin`.
|
||||
|
||||
## Finding the Cgroup for a Given Container
|
||||
|
||||
For each container, one cgroup will be created in each hierarchy. On
|
||||
older systems with older versions of the LXC userland tools, the name of
|
||||
the cgroup will be the name of the container. With more recent versions
|
||||
of the LXC tools, the cgroup will be `lxc/<container_name>.`
|
||||
|
||||
For Docker containers using cgroups, the container name will be the full
|
||||
ID or long ID of the container. If a container shows up as ae836c95b4c3
|
||||
in `docker ps`, its long ID might be something like
|
||||
`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can look it up with `docker inspect`
|
||||
or `docker ps -notrunc`.
|
||||
|
||||
Putting everything together to look at the memory metrics for a Docker
|
||||
container, take a look at
|
||||
`/sys/fs/cgroup/memory/lxc/<longid>/`.
|
||||
|
||||
## Metrics from Cgroups: Memory, CPU, Block IO
|
||||
|
||||
For each subsystem (memory, CPU, and block I/O), you will find one or
|
||||
more pseudo-files containing statistics.
|
||||
|
||||
### Memory Metrics: `memory.stat`
|
||||
|
||||
Memory metrics are found in the "memory" cgroup. Note that the memory
|
||||
control group adds a little overhead, because it does very fine-grained
|
||||
accounting of the memory usage on your host. Therefore, many distros
|
||||
chose to not enable it by default. Generally, to enable it, all you have
|
||||
to do is to add some kernel command-line parameters:
|
||||
`cgroup_enable=memory swapaccount=1`.
|
||||
|
||||
The metrics are in the pseudo-file `memory.stat`.
|
||||
Here is what it will look like:
|
||||
|
||||
cache 11492564992
|
||||
rss 1930993664
|
||||
mapped_file 306728960
|
||||
pgpgin 406632648
|
||||
pgpgout 403355412
|
||||
swap 0
|
||||
pgfault 728281223
|
||||
pgmajfault 1724
|
||||
inactive_anon 46608384
|
||||
active_anon 1884520448
|
||||
inactive_file 7003344896
|
||||
active_file 4489052160
|
||||
unevictable 32768
|
||||
hierarchical_memory_limit 9223372036854775807
|
||||
hierarchical_memsw_limit 9223372036854775807
|
||||
total_cache 11492564992
|
||||
total_rss 1930993664
|
||||
total_mapped_file 306728960
|
||||
total_pgpgin 406632648
|
||||
total_pgpgout 403355412
|
||||
total_swap 0
|
||||
total_pgfault 728281223
|
||||
total_pgmajfault 1724
|
||||
total_inactive_anon 46608384
|
||||
total_active_anon 1884520448
|
||||
total_inactive_file 7003344896
|
||||
total_active_file 4489052160
|
||||
total_unevictable 32768
|
||||
|
||||
The first half (without the `total_` prefix)
|
||||
contains statistics relevant to the processes within the cgroup,
|
||||
excluding sub-cgroups. The second half (with the `total_`
|
||||
prefix) includes sub-cgroups as well.
|
||||
|
||||
Some metrics are "gauges", i.e. values that can increase or decrease
|
||||
(e.g. swap, the amount of swap space used by the members of the cgroup).
|
||||
Some others are "counters", i.e. values that can only go up, because
|
||||
they represent occurrences of a specific event (e.g. pgfault, which
|
||||
indicates the number of page faults which happened since the creation of
|
||||
the cgroup; this number can never decrease).
|
||||
|
||||
cache
|
||||
: the amount of memory used by the processes of this control group
|
||||
that can be associated precisely with a block on a block device.
|
||||
When you read from and write to files on disk, this amount will
|
||||
increase. This will be the case if you use "conventional" I/O
|
||||
(`open`, `read`,
|
||||
`write` syscalls) as well as mapped files (with
|
||||
`mmap`). It also accounts for the memory used by
|
||||
`tmpfs` mounts, though the reasons are unclear.
|
||||
rss
|
||||
: the amount of memory that *doesn’t* correspond to anything on disk:
|
||||
stacks, heaps, and anonymous memory maps.
|
||||
mapped\_file
|
||||
: indicates the amount of memory mapped by the processes in the
|
||||
control group. It doesn’t give you information about *how much*
|
||||
memory is used; it rather tells you *how* it is used.
|
||||
pgfault and pgmajfault
|
||||
: indicate the number of times that a process of the cgroup triggered
|
||||
a "page fault" and a "major fault", respectively. A page fault
|
||||
happens when a process accesses a part of its virtual memory space
|
||||
which is nonexistent or protected. The former can happen if the
|
||||
process is buggy and tries to access an invalid address (it will
|
||||
then be sent a `SIGSEGV` signal, typically
|
||||
killing it with the famous `Segmentation fault`
|
||||
message). The latter can happen when the process reads from a memory
|
||||
zone which has been swapped out, or which corresponds to a mapped
|
||||
file: in that case, the kernel will load the page from disk, and let
|
||||
the CPU complete the memory access. It can also happen when the
|
||||
process writes to a copy-on-write memory zone: likewise, the kernel
|
||||
will preempt the process, duplicate the memory page, and resume the
|
||||
write operation on the process’ own copy of the page. "Major" faults
|
||||
happen when the kernel actually has to read the data from disk. When
|
||||
it just has to duplicate an existing page, or allocate an empty
|
||||
page, it’s a regular (or "minor") fault.
|
||||
swap
|
||||
: the amount of swap currently used by the processes in this cgroup.
|
||||
active\_anon and inactive\_anon
|
||||
: the amount of *anonymous* memory that has been identified has
|
||||
respectively *active* and *inactive* by the kernel. "Anonymous"
|
||||
memory is the memory that is *not* linked to disk pages. In other
|
||||
words, that’s the equivalent of the rss counter described above. In
|
||||
fact, the very definition of the rss counter is **active\_anon** +
|
||||
**inactive\_anon** - **tmpfs** (where tmpfs is the amount of memory
|
||||
used up by `tmpfs` filesystems mounted by this
|
||||
control group). Now, what’s the difference between "active" and
|
||||
"inactive"? Pages are initially "active"; and at regular intervals,
|
||||
the kernel sweeps over the memory, and tags some pages as
|
||||
"inactive". Whenever they are accessed again, they are immediately
|
||||
retagged "active". When the kernel is almost out of memory, and time
|
||||
comes to swap out to disk, the kernel will swap "inactive" pages.
|
||||
active\_file and inactive\_file
|
||||
: cache memory, with *active* and *inactive* similar to the *anon*
|
||||
memory above. The exact formula is cache = **active\_file** +
|
||||
**inactive\_file** + **tmpfs**. The exact rules used by the kernel
|
||||
to move memory pages between active and inactive sets are different
|
||||
from the ones used for anonymous memory, but the general principle
|
||||
is the same. Note that when the kernel needs to reclaim memory, it
|
||||
is cheaper to reclaim a clean (=non modified) page from this pool,
|
||||
since it can be reclaimed immediately (while anonymous pages and
|
||||
dirty/modified pages have to be written to disk first).
|
||||
unevictable
|
||||
: the amount of memory that cannot be reclaimed; generally, it will
|
||||
account for memory that has been "locked" with `mlock`
|
||||
. It is often used by crypto frameworks to make sure that
|
||||
secret keys and other sensitive material never gets swapped out to
|
||||
disk.
|
||||
memory and memsw limits
|
||||
: These are not really metrics, but a reminder of the limits applied
|
||||
to this cgroup. The first one indicates the maximum amount of
|
||||
physical memory that can be used by the processes of this control
|
||||
group; the second one indicates the maximum amount of RAM+swap.
|
||||
|
||||
Accounting for memory in the page cache is very complex. If two
|
||||
processes in different control groups both read the same file
|
||||
(ultimately relying on the same blocks on disk), the corresponding
|
||||
memory charge will be split between the control groups. It’s nice, but
|
||||
it also means that when a cgroup is terminated, it could increase the
|
||||
memory usage of another cgroup, because they are not splitting the cost
|
||||
anymore for those memory pages.
|
||||
|
||||
### CPU metrics: `cpuacct.stat`
|
||||
|
||||
Now that we’ve covered memory metrics, everything else will look very
|
||||
simple in comparison. CPU metrics will be found in the
|
||||
`cpuacct` controller.
|
||||
|
||||
For each container, you will find a pseudo-file `cpuacct.stat`,
|
||||
containing the CPU usage accumulated by the processes of the container,
|
||||
broken down between `user` and `system` time. If you’re not familiar
|
||||
with the distinction, `user` is the time during which the processes were
|
||||
in direct control of the CPU (i.e. executing process code), and `system`
|
||||
is the time during which the CPU was executing system calls on behalf of
|
||||
those processes.
|
||||
|
||||
Those times are expressed in ticks of 1/100th of a second. Actually,
|
||||
they are expressed in "user jiffies". There are `USER_HZ`
|
||||
*"jiffies"* per second, and on x86 systems,
|
||||
`USER_HZ` is 100. This used to map exactly to the
|
||||
number of scheduler "ticks" per second; but with the advent of higher
|
||||
frequency scheduling, as well as [tickless
|
||||
kernels](http://lwn.net/Articles/549580/), the number of kernel ticks
|
||||
wasn’t relevant anymore. It stuck around anyway, mainly for legacy and
|
||||
compatibility reasons.
|
||||
|
||||
### Block I/O metrics
|
||||
|
||||
Block I/O is accounted in the `blkio` controller.
|
||||
Different metrics are scattered across different files. While you can
|
||||
find in-depth details in the
|
||||
[blkio-controller](https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt)
|
||||
file in the kernel documentation, here is a short list of the most
|
||||
relevant ones:
|
||||
|
||||
blkio.sectors
|
||||
: contain the number of 512-bytes sectors read and written by the
|
||||
processes member of the cgroup, device by device. Reads and writes
|
||||
are merged in a single counter.
|
||||
blkio.io\_service\_bytes
|
||||
: indicates the number of bytes read and written by the cgroup. It has
|
||||
4 counters per device, because for each device, it differentiates
|
||||
between synchronous vs. asynchronous I/O, and reads vs. writes.
|
||||
blkio.io\_serviced
|
||||
: the number of I/O operations performed, regardless of their size. It
|
||||
also has 4 counters per device.
|
||||
blkio.io\_queued
|
||||
: indicates the number of I/O operations currently queued for this
|
||||
cgroup. In other words, if the cgroup isn’t doing any I/O, this will
|
||||
be zero. Note that the opposite is not true. In other words, if
|
||||
there is no I/O queued, it does not mean that the cgroup is idle
|
||||
(I/O-wise). It could be doing purely synchronous reads on an
|
||||
otherwise quiescent device, which is therefore able to handle them
|
||||
immediately, without queuing. Also, while it is helpful to figure
|
||||
out which cgroup is putting stress on the I/O subsystem, keep in
|
||||
mind that is is a relative quantity. Even if a process group does
|
||||
not perform more I/O, its queue size can increase just because the
|
||||
device load increases because of other devices.
|
||||
|
||||
## Network Metrics
|
||||
|
||||
Network metrics are not exposed directly by control groups. There is a
|
||||
good explanation for that: network interfaces exist within the context
|
||||
of *network namespaces*. The kernel could probably accumulate metrics
|
||||
about packets and bytes sent and received by a group of processes, but
|
||||
those metrics wouldn’t be very useful. You want per-interface metrics
|
||||
(because traffic happening on the local `lo`
|
||||
interface doesn’t really count). But since processes in a single cgroup
|
||||
can belong to multiple network namespaces, those metrics would be harder
|
||||
to interpret: multiple network namespaces means multiple `lo`
|
||||
interfaces, potentially multiple `eth0`
|
||||
interfaces, etc.; so this is why there is no easy way to gather network
|
||||
metrics with control groups.
|
||||
|
||||
Instead we can gather network metrics from other sources:
|
||||
|
||||
### IPtables
|
||||
|
||||
IPtables (or rather, the netfilter framework for which iptables is just
|
||||
an interface) can do some serious accounting.
|
||||
|
||||
For instance, you can setup a rule to account for the outbound HTTP
|
||||
traffic on a web server:
|
||||
|
||||
iptables -I OUTPUT -p tcp --sport 80
|
||||
|
||||
There is no `-j` or `-g` flag,
|
||||
so the rule will just count matched packets and go to the following
|
||||
rule.
|
||||
|
||||
Later, you can check the values of the counters, with:
|
||||
|
||||
iptables -nxvL OUTPUT
|
||||
|
||||
Technically, `-n` is not required, but it will
|
||||
prevent iptables from doing DNS reverse lookups, which are probably
|
||||
useless in this scenario.
|
||||
|
||||
Counters include packets and bytes. If you want to setup metrics for
|
||||
container traffic like this, you could execute a `for`
|
||||
loop to add two `iptables` rules per
|
||||
container IP address (one in each direction), in the `FORWARD`
|
||||
chain. This will only meter traffic going through the NAT
|
||||
layer; you will also have to add traffic going through the userland
|
||||
proxy.
|
||||
|
||||
Then, you will need to check those counters on a regular basis. If you
|
||||
happen to use `collectd`, there is a nice plugin to
|
||||
automate iptables counters collection.
|
||||
|
||||
### Interface-level counters
|
||||
|
||||
Since each container has a virtual Ethernet interface, you might want to
|
||||
check directly the TX and RX counters of this interface. You will notice
|
||||
that each container is associated to a virtual Ethernet interface in
|
||||
your host, with a name like `vethKk8Zqi`. Figuring
|
||||
out which interface corresponds to which container is, unfortunately,
|
||||
difficult.
|
||||
|
||||
But for now, the best way is to check the metrics *from within the
|
||||
containers*. To accomplish this, you can run an executable from the host
|
||||
environment within the network namespace of a container using **ip-netns
|
||||
magic**.
|
||||
|
||||
The `ip-netns exec` command will let you execute any
|
||||
program (present in the host system) within any network namespace
|
||||
visible to the current process. This means that your host will be able
|
||||
to enter the network namespace of your containers, but your containers
|
||||
won’t be able to access the host, nor their sibling containers.
|
||||
Containers will be able to “see” and affect their sub-containers,
|
||||
though.
|
||||
|
||||
The exact format of the command is:
|
||||
|
||||
ip netns exec <nsname> <command...>
|
||||
|
||||
For example:
|
||||
|
||||
ip netns exec mycontainer netstat -i
|
||||
|
||||
`ip netns` finds the "mycontainer" container by
|
||||
using namespaces pseudo-files. Each process belongs to one network
|
||||
namespace, one PID namespace, one `mnt` namespace,
|
||||
etc., and those namespaces are materialized under
|
||||
`/proc/<pid>/ns/`. For example, the network
|
||||
namespace of PID 42 is materialized by the pseudo-file
|
||||
`/proc/42/ns/net`.
|
||||
|
||||
When you run `ip netns exec mycontainer ...`, it
|
||||
expects `/var/run/netns/mycontainer` to be one of
|
||||
those pseudo-files. (Symlinks are accepted.)
|
||||
|
||||
In other words, to execute a command within the network namespace of a
|
||||
container, we need to:
|
||||
|
||||
- Find out the PID of any process within the container that we want to
|
||||
investigate;
|
||||
- Create a symlink from `/var/run/netns/<somename>`
|
||||
to `/proc/<thepid>/ns/net`
|
||||
- Execute `ip netns exec <somename> ....`
|
||||
|
||||
Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find
|
||||
the cgroup of a pprocess running in the container of which you want to
|
||||
measure network usage. From there, you can examine the pseudo-file named
|
||||
`tasks`, which containes the PIDs that are in the
|
||||
control group (i.e. in the container). Pick any one of them.
|
||||
|
||||
Putting everything together, if the "short ID" of a container is held in
|
||||
the environment variable `$CID`, then you can do
|
||||
this:
|
||||
|
||||
TASKS=/sys/fs/cgroup/devices/$CID*/tasks
|
||||
PID=$(head -n 1 $TASKS)
|
||||
mkdir -p /var/run/netns
|
||||
ln -sf /proc/$PID/ns/net /var/run/netns/$CID
|
||||
ip netns exec $CID netstat -i
|
||||
|
||||
## Tips for high-performance metric collection
|
||||
|
||||
Note that running a new process each time you want to update metrics is
|
||||
(relatively) expensive. If you want to collect metrics at high
|
||||
resolutions, and/or over a large number of containers (think 1000
|
||||
containers on a single host), you do not want to fork a new process each
|
||||
time.
|
||||
|
||||
Here is how to collect metrics from a single process. You will have to
|
||||
write your metric collector in C (or any language that lets you do
|
||||
low-level system calls). You need to use a special system call,
|
||||
`setns()`, which lets the current process enter any
|
||||
arbitrary namespace. It requires, however, an open file descriptor to
|
||||
the namespace pseudo-file (remember: that’s the pseudo-file in
|
||||
`/proc/<pid>/ns/net`).
|
||||
|
||||
However, there is a catch: you must not keep this file descriptor open.
|
||||
If you do, when the last process of the control group exits, the
|
||||
namespace will not be destroyed, and its network resources (like the
|
||||
virtual interface of the container) will stay around for ever (or until
|
||||
you close that file descriptor).
|
||||
|
||||
The right approach would be to keep track of the first PID of each
|
||||
container, and re-open the namespace pseudo-file each time.
|
||||
|
||||
## Collecting metrics when a container exits
|
||||
|
||||
Sometimes, you do not care about real time metric collection, but when a
|
||||
container exits, you want to know how much CPU, memory, etc. it has
|
||||
used.
|
||||
|
||||
Docker makes this difficult because it relies on `lxc-start`, which
|
||||
carefully cleans up after itself, but it is still possible. It is
|
||||
usually easier to collect metrics at regular intervals (e.g. every
|
||||
minute, with the collectd LXC plugin) and rely on that instead.
|
||||
|
||||
But, if you’d still like to gather the stats when a container stops,
|
||||
here is how:
|
||||
|
||||
For each container, start a collection process, and move it to the
|
||||
control groups that you want to monitor by writing its PID to the tasks
|
||||
file of the cgroup. The collection process should periodically re-read
|
||||
the tasks file to check if it’s the last process of the control group.
|
||||
(If you also want to collect network statistics as explained in the
|
||||
previous section, you should also move the process to the appropriate
|
||||
network namespace.)
|
||||
|
||||
When the container exits, `lxc-start` will try to
|
||||
delete the control groups. It will fail, since the control group is
|
||||
still in use; but that’s fine. You process should now detect that it is
|
||||
the only one remaining in the group. Now is the right time to collect
|
||||
all the metrics you need!
|
||||
|
||||
Finally, your process should move itself back to the root control group,
|
||||
and remove the container control group. To remove a control group, just
|
||||
`rmdir` its directory. It’s counter-intuitive to
|
||||
`rmdir` a directory as it still contains files; but
|
||||
remember that this is a pseudo-filesystem, so usual rules don’t apply.
|
||||
After the cleanup is done, the collection process can exit safely.
|
258
docs/sources/articles/security.md
Normal file
258
docs/sources/articles/security.md
Normal file
|
@ -0,0 +1,258 @@
|
|||
page_title: Docker Security
|
||||
page_description: Review of the Docker Daemon attack surface
|
||||
page_keywords: Docker, Docker documentation, security
|
||||
|
||||
# Docker Security
|
||||
|
||||
> *Adapted from* [Containers & Docker: How Secure are
|
||||
> They?](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/)
|
||||
|
||||
There are three major areas to consider when reviewing Docker security:
|
||||
|
||||
- the intrinsic security of containers, as implemented by kernel
|
||||
namespaces and cgroups;
|
||||
- the attack surface of the Docker daemon itself;
|
||||
- the "hardening" security features of the kernel and how they
|
||||
interact with containers.
|
||||
|
||||
## Kernel Namespaces
|
||||
|
||||
Docker containers are essentially LXC containers, and they come with the
|
||||
same security features. When you start a container with
|
||||
`docker run`, behind the scenes Docker uses
|
||||
`lxc-start` to execute the Docker container. This
|
||||
creates a set of namespaces and control groups for the container. Those
|
||||
namespaces and control groups are not created by Docker itself, but by
|
||||
`lxc-start`. This means that as the LXC userland
|
||||
tools evolve (and provide additional namespaces and isolation features),
|
||||
Docker will automatically make use of them.
|
||||
|
||||
**Namespaces provide the first and most straightforward form of
|
||||
isolation**: processes running within a container cannot see, and even
|
||||
less affect, processes running in another container, or in the host
|
||||
system.
|
||||
|
||||
**Each container also gets its own network stack**, meaning that a
|
||||
container doesn’t get a privileged access to the sockets or interfaces
|
||||
of another container. Of course, if the host system is setup
|
||||
accordingly, containers can interact with each other through their
|
||||
respective network interfaces — just like they can interact with
|
||||
external hosts. When you specify public ports for your containers or use
|
||||
[*links*](../../use/working_with_links_names/#working-with-links-names)
|
||||
then IP traffic is allowed between containers. They can ping each other,
|
||||
send/receive UDP packets, and establish TCP connections, but that can be
|
||||
restricted if necessary. From a network architecture point of view, all
|
||||
containers on a given Docker host are sitting on bridge interfaces. This
|
||||
means that they are just like physical machines connected through a
|
||||
common Ethernet switch; no more, no less.
|
||||
|
||||
How mature is the code providing kernel namespaces and private
|
||||
networking? Kernel namespaces were introduced [between kernel version
|
||||
2.6.15 and
|
||||
2.6.26](http://lxc.sourceforge.net/index.php/about/kernel-namespaces/).
|
||||
This means that since July 2008 (date of the 2.6.26 release, now 5 years
|
||||
ago), namespace code has been exercised and scrutinized on a large
|
||||
number of production systems. And there is more: the design and
|
||||
inspiration for the namespaces code are even older. Namespaces are
|
||||
actually an effort to reimplement the features of
|
||||
[OpenVZ](http://en.wikipedia.org/wiki/OpenVZ) in such a way that they
|
||||
could be merged within the mainstream kernel. And OpenVZ was initially
|
||||
released in 2005, so both the design and the implementation are pretty
|
||||
mature.
|
||||
|
||||
## Control Groups
|
||||
|
||||
Control Groups are the other key component of Linux Containers. They
|
||||
implement resource accounting and limiting. They provide a lot of very
|
||||
useful metrics, but they also help to ensure that each container gets
|
||||
its fair share of memory, CPU, disk I/O; and, more importantly, that a
|
||||
single container cannot bring the system down by exhausting one of those
|
||||
resources.
|
||||
|
||||
So while they do not play a role in preventing one container from
|
||||
accessing or affecting the data and processes of another container, they
|
||||
are essential to fend off some denial-of-service attacks. They are
|
||||
particularly important on multi-tenant platforms, like public and
|
||||
private PaaS, to guarantee a consistent uptime (and performance) even
|
||||
when some applications start to misbehave.
|
||||
|
||||
Control Groups have been around for a while as well: the code was
|
||||
started in 2006, and initially merged in kernel 2.6.24.
|
||||
|
||||
## Docker Daemon Attack Surface
|
||||
|
||||
Running containers (and applications) with Docker implies running the
|
||||
Docker daemon. This daemon currently requires root privileges, and you
|
||||
should therefore be aware of some important details.
|
||||
|
||||
First of all, **only trusted users should be allowed to control your
|
||||
Docker daemon**. This is a direct consequence of some powerful Docker
|
||||
features. Specifically, Docker allows you to share a directory between
|
||||
the Docker host and a guest container; and it allows you to do so
|
||||
without limiting the access rights of the container. This means that you
|
||||
can start a container where the `/host` directory
|
||||
will be the `/` directory on your host; and the
|
||||
container will be able to alter your host filesystem without any
|
||||
restriction. This sounds crazy? Well, you have to know that **all
|
||||
virtualization systems allowing filesystem resource sharing behave the
|
||||
same way**. Nothing prevents you from sharing your root filesystem (or
|
||||
even your root block device) with a virtual machine.
|
||||
|
||||
This has a strong security implication: if you instrument Docker from
|
||||
e.g. a web server to provision containers through an API, you should be
|
||||
even more careful than usual with parameter checking, to make sure that
|
||||
a malicious user cannot pass crafted parameters causing Docker to create
|
||||
arbitrary containers.
|
||||
|
||||
For this reason, the REST API endpoint (used by the Docker CLI to
|
||||
communicate with the Docker daemon) changed in Docker 0.5.2, and now
|
||||
uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the
|
||||
latter being prone to cross-site-scripting attacks if you happen to run
|
||||
Docker directly on your local machine, outside of a VM). You can then
|
||||
use traditional UNIX permission checks to limit access to the control
|
||||
socket.
|
||||
|
||||
You can also expose the REST API over HTTP if you explicitly decide so.
|
||||
However, if you do that, being aware of the abovementioned security
|
||||
implication, you should ensure that it will be reachable only from a
|
||||
trusted network or VPN; or protected with e.g. `stunnel`
|
||||
and client SSL certificates.
|
||||
|
||||
Recent improvements in Linux namespaces will soon allow to run
|
||||
full-featured containers without root privileges, thanks to the new user
|
||||
namespace. This is covered in detail
|
||||
[here](http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/).
|
||||
Moreover, this will solve the problem caused by sharing filesystems
|
||||
between host and guest, since the user namespace allows users within
|
||||
containers (including the root user) to be mapped to other users in the
|
||||
host system.
|
||||
|
||||
The end goal for Docker is therefore to implement two additional
|
||||
security improvements:
|
||||
|
||||
- map the root user of a container to a non-root user of the Docker
|
||||
host, to mitigate the effects of a container-to-host privilege
|
||||
escalation;
|
||||
- allow the Docker daemon to run without root privileges, and delegate
|
||||
operations requiring those privileges to well-audited sub-processes,
|
||||
each with its own (very limited) scope: virtual network setup,
|
||||
filesystem management, etc.
|
||||
|
||||
Finally, if you run Docker on a server, it is recommended to run
|
||||
exclusively Docker in the server, and move all other services within
|
||||
containers controlled by Docker. Of course, it is fine to keep your
|
||||
favorite admin tools (probably at least an SSH server), as well as
|
||||
existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
|
||||
|
||||
## Linux Kernel Capabilities
|
||||
|
||||
By default, Docker starts containers with a very restricted set of
|
||||
capabilities. What does that mean?
|
||||
|
||||
Capabilities turn the binary "root/non-root" dichotomy into a
|
||||
fine-grained access control system. Processes (like web servers) that
|
||||
just need to bind on a port below 1024 do not have to run as root: they
|
||||
can just be granted the `net_bind_service`
|
||||
capability instead. And there are many other capabilities, for almost
|
||||
all the specific areas where root privileges are usually needed.
|
||||
|
||||
This means a lot for container security; let’s see why!
|
||||
|
||||
Your average server (bare metal or virtual machine) needs to run a bunch
|
||||
of processes as root. Those typically include SSH, cron, syslogd;
|
||||
hardware management tools (to e.g. load modules), network configuration
|
||||
tools (to handle e.g. DHCP, WPA, or VPNs), and much more. A container is
|
||||
very different, because almost all of those tasks are handled by the
|
||||
infrastructure around the container:
|
||||
|
||||
- SSH access will typically be managed by a single server running in
|
||||
the Docker host;
|
||||
- `cron`, when necessary, should run as a user
|
||||
process, dedicated and tailored for the app that needs its
|
||||
scheduling service, rather than as a platform-wide facility;
|
||||
- log management will also typically be handed to Docker, or by
|
||||
third-party services like Loggly or Splunk;
|
||||
- hardware management is irrelevant, meaning that you never need to
|
||||
run `udevd` or equivalent daemons within
|
||||
containers;
|
||||
- network management happens outside of the containers, enforcing
|
||||
separation of concerns as much as possible, meaning that a container
|
||||
should never need to perform `ifconfig`,
|
||||
`route`, or ip commands (except when a container
|
||||
is specifically engineered to behave like a router or firewall, of
|
||||
course).
|
||||
|
||||
This means that in most cases, containers will not need "real" root
|
||||
privileges *at all*. And therefore, containers can run with a reduced
|
||||
capability set; meaning that "root" within a container has much less
|
||||
privileges than the real "root". For instance, it is possible to:
|
||||
|
||||
- deny all "mount" operations;
|
||||
- deny access to raw sockets (to prevent packet spoofing);
|
||||
- deny access to some filesystem operations, like creating new device
|
||||
nodes, changing the owner of files, or altering attributes
|
||||
(including the immutable flag);
|
||||
- deny module loading;
|
||||
- and many others.
|
||||
|
||||
This means that even if an intruder manages to escalate to root within a
|
||||
container, it will be much harder to do serious damage, or to escalate
|
||||
to the host.
|
||||
|
||||
This won’t affect regular web apps; but malicious users will find that
|
||||
the arsenal at their disposal has shrunk considerably! You can see [the
|
||||
list of dropped capabilities in the Docker
|
||||
code](https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97),
|
||||
and a full list of available capabilities in [Linux
|
||||
manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
|
||||
|
||||
Of course, you can always enable extra capabilities if you really need
|
||||
them (for instance, if you want to use a FUSE-based filesystem), but by
|
||||
default, Docker containers will be locked down to ensure maximum safety.
|
||||
|
||||
## Other Kernel Security Features
|
||||
|
||||
Capabilities are just one of the many security features provided by
|
||||
modern Linux kernels. It is also possible to leverage existing,
|
||||
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
|
||||
Docker.
|
||||
|
||||
While Docker currently only enables capabilities, it doesn’t interfere
|
||||
with the other systems. This means that there are many different ways to
|
||||
harden a Docker host. Here are a few examples.
|
||||
|
||||
- You can run a kernel with GRSEC and PAX. This will add many safety
|
||||
checks, both at compile-time and run-time; it will also defeat many
|
||||
exploits, thanks to techniques like address randomization. It
|
||||
doesn’t require Docker-specific configuration, since those security
|
||||
features apply system-wide, independently of containers.
|
||||
- If your distribution comes with security model templates for LXC
|
||||
containers, you can use them out of the box. For instance, Ubuntu
|
||||
comes with AppArmor templates for LXC, and those templates provide
|
||||
an extra safety net (even though it overlaps greatly with
|
||||
capabilities).
|
||||
- You can define your own policies using your favorite access control
|
||||
mechanism. Since Docker containers are standard LXC containers,
|
||||
there is nothing “magic” or specific to Docker.
|
||||
|
||||
Just like there are many third-party tools to augment Docker containers
|
||||
with e.g. special network topologies or shared filesystems, you can
|
||||
expect to see tools to harden existing Docker containers without
|
||||
affecting Docker’s core.
|
||||
|
||||
## Conclusions
|
||||
|
||||
Docker containers are, by default, quite secure; especially if you take
|
||||
care of running your processes inside the containers as non-privileged
|
||||
users (i.e. non root).
|
||||
|
||||
You can add an extra layer of safety by enabling Apparmor, SELinux,
|
||||
GRSEC, or your favorite hardening solution.
|
||||
|
||||
Last but not least, if you see interesting security features in other
|
||||
containerization systems, you will be able to implement them as well
|
||||
with Docker, since everything is provided by the kernel anyway.
|
||||
|
||||
For more context and especially for comparisons with VMs and other
|
||||
container systems, please also see the [original blog
|
||||
post](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/).
|
7
docs/sources/contributing.md
Normal file
7
docs/sources/contributing.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Contributing
|
||||
|
||||
## Contents:
|
||||
|
||||
- [Contributing to Docker](contributing/)
|
||||
- [Setting Up a Dev Environment](devenvironment/)
|
||||
|
24
docs/sources/contributing/contributing.md
Normal file
24
docs/sources/contributing/contributing.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
page_title: Contribution Guidelines
|
||||
page_description: Contribution guidelines: create issues, conventions, pull requests
|
||||
page_keywords: contributing, docker, documentation, help, guideline
|
||||
|
||||
# Contributing to Docker
|
||||
|
||||
Want to hack on Docker? Awesome!
|
||||
|
||||
The repository includes [all the instructions you need to get
|
||||
started](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
|
||||
|
||||
The [developer environment
|
||||
Dockerfile](https://github.com/dotcloud/docker/blob/master/Dockerfile)
|
||||
specifies the tools and versions used to test and build Docker.
|
||||
|
||||
If you’re making changes to the documentation, see the
|
||||
[README.md](https://github.com/dotcloud/docker/blob/master/docs/README.md).
|
||||
|
||||
The [documentation environment
|
||||
Dockerfile](https://github.com/dotcloud/docker/blob/master/docs/Dockerfile)
|
||||
specifies the tools and versions used to build the Documentation.
|
||||
|
||||
Further interesting details can be found in the [Packaging
|
||||
hints](https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md).
|
147
docs/sources/contributing/devenvironment.md
Normal file
147
docs/sources/contributing/devenvironment.md
Normal file
|
@ -0,0 +1,147 @@
|
|||
page_title: Setting Up a Dev Environment
|
||||
page_description: Guides on how to contribute to docker
|
||||
page_keywords: Docker, documentation, developers, contributing, dev environment
|
||||
|
||||
# Setting Up a Dev Environment
|
||||
|
||||
To make it easier to contribute to Docker, we provide a standard
|
||||
development environment. It is important that the same environment be
|
||||
used for all tests, builds and releases. The standard development
|
||||
environment defines all build dependencies: system libraries and
|
||||
binaries, go environment, go dependencies, etc.
|
||||
|
||||
## Install Docker
|
||||
|
||||
Docker’s build environment itself is a Docker container, so the first
|
||||
step is to install Docker on your system.
|
||||
|
||||
You can follow the [install instructions most relevant to your
|
||||
system](https://docs.docker.io/en/latest/installation/). Make sure you
|
||||
have a working, up-to-date docker installation, then continue to the
|
||||
next step.
|
||||
|
||||
## Install tools used for this tutorial
|
||||
|
||||
Install `git`; honest, it’s very good. You can use
|
||||
other ways to get the Docker source, but they’re not anywhere near as
|
||||
easy.
|
||||
|
||||
Install `make`. This tutorial uses our base Makefile
|
||||
to kick off the docker containers in a repeatable and consistent way.
|
||||
Again, you can do it in other ways but you need to do more work.
|
||||
|
||||
## Check out the Source
|
||||
|
||||
git clone http://git@github.com/dotcloud/docker
|
||||
cd docker
|
||||
|
||||
To checkout a different revision just use `git checkout`
|
||||
with the name of branch or revision number.
|
||||
|
||||
## Build the Environment
|
||||
|
||||
This following command will build a development environment using the
|
||||
Dockerfile in the current directory. Essentially, it will install all
|
||||
the build and runtime dependencies necessary to build and test Docker.
|
||||
This command will take some time to complete when you first execute it.
|
||||
|
||||
sudo make build
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean
|
||||
build of docker, neatly encapsulated in a standard build environment.
|
||||
|
||||
## Build the Docker Binary
|
||||
|
||||
To create the Docker binary, run this command:
|
||||
|
||||
sudo make binary
|
||||
|
||||
This will create the Docker binary in
|
||||
`./bundles/<version>-dev/binary/`
|
||||
|
||||
### Using your built Docker binary
|
||||
|
||||
The binary is available outside the container in the directory
|
||||
`./bundles/<version>-dev/binary/`. You can swap your
|
||||
host docker executable with this binary for live testing - for example,
|
||||
on ubuntu:
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
> **Note**:
|
||||
> Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
||||
## Run the Tests
|
||||
|
||||
To execute the test cases, run this command:
|
||||
|
||||
sudo make test
|
||||
|
||||
If the test are successful then the tail of the output should look
|
||||
something like this
|
||||
|
||||
--- PASS: TestWriteBroadcaster (0.00 seconds)
|
||||
=== RUN TestRaceWriteBroadcaster
|
||||
--- PASS: TestRaceWriteBroadcaster (0.00 seconds)
|
||||
=== RUN TestTruncIndex
|
||||
--- PASS: TestTruncIndex (0.00 seconds)
|
||||
=== RUN TestCompareKernelVersion
|
||||
--- PASS: TestCompareKernelVersion (0.00 seconds)
|
||||
=== RUN TestHumanSize
|
||||
--- PASS: TestHumanSize (0.00 seconds)
|
||||
=== RUN TestParseHost
|
||||
--- PASS: TestParseHost (0.00 seconds)
|
||||
=== RUN TestParseRepositoryTag
|
||||
--- PASS: TestParseRepositoryTag (0.00 seconds)
|
||||
=== RUN TestGetResolvConf
|
||||
--- PASS: TestGetResolvConf (0.00 seconds)
|
||||
=== RUN TestCheckLocalDns
|
||||
--- PASS: TestCheckLocalDns (0.00 seconds)
|
||||
=== RUN TestParseRelease
|
||||
--- PASS: TestParseRelease (0.00 seconds)
|
||||
=== RUN TestDependencyGraphCircular
|
||||
--- PASS: TestDependencyGraphCircular (0.00 seconds)
|
||||
=== RUN TestDependencyGraph
|
||||
--- PASS: TestDependencyGraph (0.00 seconds)
|
||||
PASS
|
||||
ok github.com/dotcloud/docker/utils 0.017s
|
||||
|
||||
If $TESTFLAGS is set in the environment, it is passed as extra
|
||||
arguments to ‘go test’. You can use this to select certain tests to run,
|
||||
eg.
|
||||
|
||||
> TESTFLAGS=’-run \^TestBuild\$’ make test
|
||||
|
||||
If the output indicates "FAIL" and you see errors like this:
|
||||
|
||||
server.go:1302 Error: Insertion failed because database is full: database or disk is full
|
||||
|
||||
utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device
|
||||
|
||||
Then you likely don’t have enough memory available the test suite. 2GB
|
||||
is recommended.
|
||||
|
||||
## Use Docker
|
||||
|
||||
You can run an interactive session in the newly built container:
|
||||
|
||||
sudo make shell
|
||||
|
||||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
## Build And View The Documentation
|
||||
|
||||
If you want to read the documentation from a local website, or are
|
||||
making changes to it, you can build the documentation and then serve it
|
||||
by:
|
||||
|
||||
sudo make docs
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the [\#docker-dev IRC
|
||||
channel](irc://chat.freenode.net#docker-dev) or post a message on the
|
||||
[Docker developer mailing
|
||||
list](https://groups.google.com/d/forum/docker-dev).
|
25
docs/sources/examples.md
Normal file
25
docs/sources/examples.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
|
||||
# Examples
|
||||
|
||||
## Introduction:
|
||||
|
||||
Here are some examples of how to use Docker to create running processes,
|
||||
starting from a very simple *Hello World* and progressing to more
|
||||
substantial services like those which you might find in production.
|
||||
|
||||
## Contents:
|
||||
|
||||
- [Check your Docker install](hello_world/)
|
||||
- [Hello World](hello_world/#hello-world)
|
||||
- [Hello World Daemon](hello_world/#hello-world-daemon)
|
||||
- [Node.js Web App](nodejs_web_app/)
|
||||
- [Redis Service](running_redis_service/)
|
||||
- [SSH Daemon Service](running_ssh_service/)
|
||||
- [CouchDB Service](couchdb_data_volumes/)
|
||||
- [PostgreSQL Service](postgresql_service/)
|
||||
- [Building an Image with MongoDB](mongodb/)
|
||||
- [Riak Service](running_riak_service/)
|
||||
- [Using Supervisor with Docker](using_supervisord/)
|
||||
- [Process Management with CFEngine](cfengine_process_management/)
|
||||
- [Python Web App](python_web_app/)
|
||||
|
112
docs/sources/examples/apt-cacher-ng.md
Normal file
112
docs/sources/examples/apt-cacher-ng.md
Normal file
|
@ -0,0 +1,112 @@
|
|||
page_title: Running an apt-cacher-ng service
|
||||
page_description: Installing and running an apt-cacher-ng service
|
||||
page_keywords: docker, example, package installation, networking, debian, ubuntu
|
||||
|
||||
# Apt-Cacher-ng Service
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup).
|
||||
> - **If you’re using OS X or docker via TCP** then you shouldn’t use
|
||||
> sudo.
|
||||
|
||||
When you have multiple Docker servers, or build unrelated Docker
|
||||
containers which can’t make use of the Docker build cache, it can be
|
||||
useful to have a caching proxy for your packages. This container makes
|
||||
the second download of any package almost instant.
|
||||
|
||||
Use the following Dockerfile:
|
||||
|
||||
#
|
||||
# Build: docker build -t apt-cacher .
|
||||
# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
|
||||
#
|
||||
# and then you can run containers with:
|
||||
# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
|
||||
#
|
||||
FROM ubuntu
|
||||
MAINTAINER SvenDowideit@docker.com
|
||||
|
||||
VOLUME ["/var/cache/apt-cacher-ng"]
|
||||
RUN apt-get update ; apt-get install -yq apt-cacher-ng
|
||||
|
||||
EXPOSE 3142
|
||||
CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
|
||||
|
||||
To build the image using:
|
||||
|
||||
$ sudo docker build -t eg_apt_cacher_ng .
|
||||
|
||||
Then run it, mapping the exposed port to one on the host
|
||||
|
||||
$ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
|
||||
|
||||
To see the logfiles that are ‘tailed’ in the default command, you can
|
||||
use:
|
||||
|
||||
$ sudo docker logs -f test_apt_cacher_ng
|
||||
|
||||
To get your Debian-based containers to use the proxy, you can do one of
|
||||
three things
|
||||
|
||||
1. Add an apt Proxy setting
|
||||
`echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`
|
||||
|
||||
2. Set an environment variable:
|
||||
`http_proxy=http://dockerhost:3142/`
|
||||
3. Change your `sources.list` entries to start with
|
||||
`http://dockerhost:3142/`
|
||||
|
||||
**Option 1** injects the settings safely into your apt configuration in
|
||||
a local version of a common base:
|
||||
|
||||
FROM ubuntu
|
||||
RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
|
||||
RUN apt-get update ; apt-get install vim git
|
||||
|
||||
# docker build -t my_ubuntu .
|
||||
|
||||
**Option 2** is good for testing, but will break other HTTP clients
|
||||
which obey `http_proxy`, such as `curl`, `wget` and others:
|
||||
|
||||
$ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
|
||||
|
||||
**Option 3** is the least portable, but there will be times when you
|
||||
might need to do it and you can do it from your `Dockerfile`
|
||||
too.
|
||||
|
||||
Apt-cacher-ng has some tools that allow you to manage the repository,
|
||||
and they can be used by leveraging the `VOLUME`
|
||||
instruction, and the image we built to run the service:
|
||||
|
||||
$ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
|
||||
|
||||
$$ /usr/lib/apt-cacher-ng/distkill.pl
|
||||
Scanning /var/cache/apt-cacher-ng, please wait...
|
||||
Found distributions:
|
||||
bla, taggedcount: 0
|
||||
1. precise-security (36 index files)
|
||||
2. wheezy (25 index files)
|
||||
3. precise-updates (36 index files)
|
||||
4. precise (36 index files)
|
||||
5. wheezy-updates (18 index files)
|
||||
|
||||
Found architectures:
|
||||
6. amd64 (36 index files)
|
||||
7. i386 (24 index files)
|
||||
|
||||
WARNING: The removal action may wipe out whole directories containing
|
||||
index files. Select d to see detailed list.
|
||||
|
||||
(Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
|
||||
|
||||
Finally, clean up after your test by stopping and removing the
|
||||
container, and then removing the image.
|
||||
|
||||
$ sudo docker stop test_apt_cacher_ng
|
||||
$ sudo docker rm test_apt_cacher_ng
|
||||
$ sudo docker rmi eg_apt_cacher_ng
|
152
docs/sources/examples/cfengine_process_management.md
Normal file
152
docs/sources/examples/cfengine_process_management.md
Normal file
|
@ -0,0 +1,152 @@
|
|||
page_title: Process Management with CFEngine
|
||||
page_description: Managing containerized processes with CFEngine
|
||||
page_keywords: cfengine, process, management, usage, docker, documentation
|
||||
|
||||
# Process Management with CFEngine
|
||||
|
||||
Create Docker containers with managed processes.
|
||||
|
||||
Docker monitors one process in each running container and the container
|
||||
lives or dies with that process. By introducing CFEngine inside Docker
|
||||
containers, we can alleviate a few of the issues that may arise:
|
||||
|
||||
- It is possible to easily start multiple processes within a
|
||||
container, all of which will be managed automatically, with the
|
||||
normal `docker run` command.
|
||||
- If a managed process dies or crashes, CFEngine will start it again
|
||||
within 1 minute.
|
||||
- The container itself will live as long as the CFEngine scheduling
|
||||
daemon (cf-execd) lives. With CFEngine, we are able to decouple the
|
||||
life of the container from the uptime of the service it provides.
|
||||
|
||||
## How it works
|
||||
|
||||
CFEngine, together with the cfe-docker integration policies, are
|
||||
installed as part of the Dockerfile. This builds CFEngine into our
|
||||
Docker image.
|
||||
|
||||
The Dockerfile’s `ENTRYPOINT` takes an arbitrary
|
||||
amount of commands (with any desired arguments) as parameters. When we
|
||||
run the Docker container these parameters get written to CFEngine
|
||||
policies and CFEngine takes over to ensure that the desired processes
|
||||
are running in the container.
|
||||
|
||||
CFEngine scans the process table for the `basename`
|
||||
of the commands given to the `ENTRYPOINT` and runs
|
||||
the command to start the process if the `basename`
|
||||
is not found. For example, if we start the container with
|
||||
`docker run "/path/to/my/application parameters"`,
|
||||
CFEngine will look for a process named `application`
|
||||
and run the command. If an entry for `application`
|
||||
is not found in the process table at any point in time, CFEngine will
|
||||
execute `/path/to/my/application parameters` to
|
||||
start the application once again. The check on the process table happens
|
||||
every minute.
|
||||
|
||||
Note that it is therefore important that the command to start your
|
||||
application leaves a process with the basename of the command. This can
|
||||
be made more flexible by making some minor adjustments to the CFEngine
|
||||
policies, if desired.
|
||||
|
||||
## Usage
|
||||
|
||||
This example assumes you have Docker installed and working. We will
|
||||
install and manage `apache2` and `sshd`
|
||||
in a single container.
|
||||
|
||||
There are three steps:
|
||||
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the
|
||||
containerized CFEngine installation.
|
||||
3. Start your application processes as part of the
|
||||
`docker run` command.
|
||||
|
||||
### Building the container image
|
||||
|
||||
The first two steps can be done as part of a Dockerfile, as follows.
|
||||
|
||||
FROM ubuntu
|
||||
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
|
||||
|
||||
RUN apt-get -y install wget lsb-release unzip ca-certificates
|
||||
|
||||
# install latest CFEngine
|
||||
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
|
||||
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install cfengine-community
|
||||
|
||||
# install cfe-docker process management policy
|
||||
RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
|
||||
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
|
||||
|
||||
# apache2 and openssh are just for testing purposes, install your own apps here
|
||||
RUN apt-get -y install openssh-server apache2
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN echo "root:password" | chpasswd # need a password for ssh
|
||||
|
||||
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
|
||||
|
||||
By saving this file as `Dockerfile` to a working
|
||||
directory, you can then build your container with the docker build
|
||||
command, e.g. `docker build -t managed_image`.
|
||||
|
||||
### Testing the container
|
||||
|
||||
Start the container with `apache2` and
|
||||
`sshd` running and managed, forwarding a port to our
|
||||
SSH instance:
|
||||
|
||||
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
|
||||
We now clearly see one of the benefits of the cfe-docker integration: it
|
||||
allows to start several processes as part of a normal
|
||||
`docker run` command.
|
||||
|
||||
We can now log in to our new container and see that both
|
||||
`apache2` and `sshd` are
|
||||
running. We have set the root password to "password" in the Dockerfile
|
||||
above and can use that to log in with ssh:
|
||||
|
||||
ssh -p222 root@127.0.0.1
|
||||
|
||||
ps -ef
|
||||
UID PID PPID C STIME TTY TIME CMD
|
||||
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
|
||||
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
|
||||
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
|
||||
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
|
||||
root 105 93 0 07:48 pts/0 00:00:00 -bash
|
||||
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
|
||||
|
||||
If we stop apache2, it will be started again within a minute by
|
||||
CFEngine.
|
||||
|
||||
service apache2 status
|
||||
Apache2 is running (pid 32).
|
||||
service apache2 stop
|
||||
* Stopping web server apache2 ... waiting [ OK ]
|
||||
service apache2 status
|
||||
Apache2 is NOT running.
|
||||
# ... wait up to 1 minute...
|
||||
service apache2 status
|
||||
Apache2 is running (pid 173).
|
||||
|
||||
## Adapting to your applications
|
||||
|
||||
To make sure your applications get managed in the same manner, there are
|
||||
just two things you need to adjust from the above example:
|
||||
|
||||
- In the Dockerfile used above, install your applications instead of
|
||||
`apache2` and `sshd`.
|
||||
- When you start the container with `docker run`,
|
||||
specify the command line arguments to your applications rather than
|
||||
`apache2` and `sshd`.
|
||||
|
48
docs/sources/examples/couchdb_data_volumes.md
Normal file
48
docs/sources/examples/couchdb_data_volumes.md
Normal file
|
@ -0,0 +1,48 @@
|
|||
page_title: Sharing data between 2 couchdb databases
|
||||
page_description: Sharing data between 2 couchdb databases
|
||||
page_keywords: docker, example, package installation, networking, couchdb, data volumes
|
||||
|
||||
# CouchDB Service
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
|
||||
Here’s an example of using data volumes to share the same data between
|
||||
two CouchDB containers. This could be used for hot upgrades, testing
|
||||
different versions of CouchDB on the same data, etc.
|
||||
|
||||
## Create first database
|
||||
|
||||
Note that we’re marking `/var/lib/couchdb` as a data
|
||||
volume.
|
||||
|
||||
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
|
||||
## Add data to the first database
|
||||
|
||||
We’re assuming your Docker host is reachable at `localhost`. If not,
|
||||
replace `localhost` with the public IP of your Docker host.
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
||||
|
||||
## Create second database
|
||||
|
||||
This time, we’re requesting shared access to `$COUCH1`'s volumes.
|
||||
|
||||
COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
|
||||
## Browse data on the second database
|
||||
|
||||
HOST=localhost
|
||||
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
|
||||
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
||||
|
||||
Congratulations, you are now running two Couchdb containers, completely
|
||||
isolated from each other *except* for their data.
|
166
docs/sources/examples/hello_world.md
Normal file
166
docs/sources/examples/hello_world.md
Normal file
|
@ -0,0 +1,166 @@
|
|||
page_title: Hello world example
|
||||
page_description: A simple hello world example with Docker
|
||||
page_keywords: docker, example, hello world
|
||||
|
||||
# Check your Docker installation
|
||||
|
||||
This guide assumes you have a working installation of Docker. To check
|
||||
your Docker install, run the following command:
|
||||
|
||||
# Check that you have a working install
|
||||
$ sudo docker info
|
||||
|
||||
If you get `docker: command not found` or something
|
||||
like `/var/lib/docker/repositories: permission denied`
|
||||
you may have an incomplete Docker installation or insufficient
|
||||
privileges to access docker on your machine.
|
||||
|
||||
Please refer to [*Installation*](../../installation/)
|
||||
for installation instructions.
|
||||
|
||||
## Hello World
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](#check-your-docker-installation).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
|
||||
This is the most basic example available for using Docker.
|
||||
|
||||
Download the small base image named `busybox`:
|
||||
|
||||
# Download a busybox image
|
||||
$ sudo docker pull busybox
|
||||
|
||||
The `busybox` image is a minimal Linux system. You
|
||||
can do the same with any number of other images, such as
|
||||
`debian`, `ubuntu` or
|
||||
`centos`. The images can be found and retrieved
|
||||
using the [Docker index](http://index.docker.io).
|
||||
|
||||
$ sudo docker run busybox /bin/echo hello world
|
||||
|
||||
This command will run a simple `echo` command, that
|
||||
will echo `hello world` back to the console over
|
||||
standard out.
|
||||
|
||||
**Explanation:**
|
||||
|
||||
- **"sudo"** execute the following commands as user *root*
|
||||
- **"docker run"** run a command in a new container
|
||||
- **"busybox"** is the image we are running the command in.
|
||||
- **"/bin/echo"** is the command we want to run in the container
|
||||
- **"hello world"** is the input for the echo command
|
||||
|
||||
**Video:**
|
||||
|
||||
See the example in action
|
||||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
|
||||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/7658.js"id="asciicast-7658" async></script></body>"></iframe>
|
||||
|
||||
## Hello World Daemon
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](#check-your-docker-installation).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
|
||||
And now for the most boring daemon ever written!
|
||||
|
||||
We will use the Ubuntu image to run a simple hello world daemon that
|
||||
will just print hello world to standard out every second. It will
|
||||
continue to do this until we stop it.
|
||||
|
||||
**Steps:**
|
||||
|
||||
CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
||||
|
||||
We are going to run a simple hello world daemon in a new container made
|
||||
from the `ubuntu` image.
|
||||
|
||||
- **"sudo docker run -d "** run a command in a new container. We pass
|
||||
"-d" so it runs as a daemon.
|
||||
- **"ubuntu"** is the image we want to run the command inside of.
|
||||
- **"/bin/sh -c"** is the command we want to run in the container
|
||||
- **"while true; do echo hello world; sleep 1; done"** is the mini
|
||||
script we want to run, that will just print hello world once a
|
||||
second until we stop it.
|
||||
- **$container_id** the output of the run command will return a
|
||||
container id, we can use in future commands to see what is going on
|
||||
with this process.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker logs $container_id
|
||||
|
||||
Check the logs make sure it is working correctly.
|
||||
|
||||
- **"docker logs**" This will return the logs for a container
|
||||
- **$container_id** The Id of the container we want the logs for.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker attach --sig-proxy=false $container_id
|
||||
|
||||
Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"–sig-proxy=false"** Do not forward signals to the container;
|
||||
allows us to exit the attachment using Control-C without stopping
|
||||
the container.
|
||||
- **$container_id** The Id of the container we want to attach to.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
sudo docker ps
|
||||
|
||||
Check the process list to make sure it is running.
|
||||
|
||||
- **"docker ps"** this shows all running process managed by docker
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker stop $container_id
|
||||
|
||||
Stop the container, since we don’t need it anymore.
|
||||
|
||||
- **"docker stop"** This stops a container
|
||||
- **$container_id** The Id of the container we want to stop.
|
||||
|
||||
<!-- -->
|
||||
|
||||
sudo docker ps
|
||||
|
||||
Make sure it is really stopped.
|
||||
|
||||
**Video:**
|
||||
|
||||
See the example in action
|
||||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
|
||||
<iframe width="640" height="480" frameborder="0" sandbox="allow-same-origin allow-scripts" srcdoc="<body><script type="text/javascript"src="https://asciinema.org/a/2562.js"id="asciicast-2562" async></script></body>"></iframe>
|
||||
|
||||
The next example in the series is a [*Node.js Web
|
||||
App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to
|
||||
any of the other examples:
|
||||
|
||||
- [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app)
|
||||
- [*Redis Service*](../running_redis_service/#running-redis-service)
|
||||
- [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service)
|
||||
- [*CouchDB
|
||||
Service*](../couchdb_data_volumes/#running-couchdb-service)
|
||||
- [*PostgreSQL Service*](../postgresql_service/#postgresql-service)
|
||||
- [*Building an Image with MongoDB*](../mongodb/#mongodb-image)
|
||||
- [*Python Web App*](../python_web_app/#python-web-app)
|
||||
|
107
docs/sources/examples/https.md
Normal file
107
docs/sources/examples/https.md
Normal file
|
@ -0,0 +1,107 @@
|
|||
page_title: Docker HTTPS Setup
|
||||
page_description: How to setup docker with https
|
||||
page_keywords: docker, example, https, daemon
|
||||
|
||||
# Running Docker with https
|
||||
|
||||
By default, Docker runs via a non-networked Unix socket. It can also
|
||||
optionally communicate using a HTTP socket.
|
||||
|
||||
If you need Docker reachable via the network in a safe manner, you can
|
||||
enable TLS by specifying the tlsverify flag and pointing Docker’s
|
||||
tlscacert flag to a trusted CA certificate.
|
||||
|
||||
In daemon mode, it will only allow connections from clients
|
||||
authenticated by a certificate signed by that CA. In client mode, it
|
||||
will only connect to servers with a certificate signed by that CA.
|
||||
|
||||
> **Warning**:
|
||||
> Using TLS and managing a CA is an advanced topic. Please make you self
|
||||
> familiar with openssl, x509 and tls before using it in production.
|
||||
|
||||
## Create a CA, server and client keys with OpenSSL
|
||||
|
||||
First, initialize the CA serial file and generate CA private and public
|
||||
keys:
|
||||
|
||||
$ echo 01 > ca.srl
|
||||
$ openssl genrsa -des3 -out ca-key.pem
|
||||
$ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
|
||||
|
||||
Now that we have a CA, you can create a server key and certificate
|
||||
signing request. Make sure that "Common Name (e.g. server FQDN or YOUR
|
||||
name)" matches the hostname you will use to connect to Docker or just
|
||||
use ‘\*’ for a certificate valid for any hostname:
|
||||
|
||||
$ openssl genrsa -des3 -out server-key.pem
|
||||
$ openssl req -new -key server-key.pem -out server.csr
|
||||
|
||||
Next we’re going to sign the key with our CA:
|
||||
|
||||
$ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-out server-cert.pem
|
||||
|
||||
For client authentication, create a client key and certificate signing
|
||||
request:
|
||||
|
||||
$ openssl genrsa -des3 -out client-key.pem
|
||||
$ openssl req -new -key client-key.pem -out client.csr
|
||||
|
||||
To make the key suitable for client authentication, create a extensions
|
||||
config file:
|
||||
|
||||
$ echo extendedKeyUsage = clientAuth > extfile.cnf
|
||||
|
||||
Now sign the key:
|
||||
|
||||
$ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-out client-cert.pem -extfile extfile.cnf
|
||||
|
||||
Finally you need to remove the passphrase from the client and server
|
||||
key:
|
||||
|
||||
$ openssl rsa -in server-key.pem -out server-key.pem
|
||||
$ openssl rsa -in client-key.pem -out client-key.pem
|
||||
|
||||
Now you can make the Docker daemon only accept connections from clients
|
||||
providing a certificate trusted by our CA:
|
||||
|
||||
$ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
|
||||
-H=0.0.0.0:4243
|
||||
|
||||
To be able to connect to Docker and validate its certificate, you now
|
||||
need to provide your client keys, certificates and trusted CA:
|
||||
|
||||
$ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
|
||||
-H=dns-name-of-docker-host:4243
|
||||
|
||||
> **Warning**:
|
||||
> As shown in the example above, you don’t have to run the
|
||||
> `docker` client with `sudo` or
|
||||
> the `docker` group when you use certificate
|
||||
> authentication. That means anyone with the keys can give any
|
||||
> instructions to your Docker daemon, giving them root access to the
|
||||
> machine hosting the daemon. Guard these keys as you would a root
|
||||
> password!
|
||||
|
||||
## Other modes
|
||||
|
||||
If you don’t want to have complete two-way authentication, you can run
|
||||
Docker in various other modes by mixing the flags.
|
||||
|
||||
### Daemon modes
|
||||
|
||||
- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
|
||||
- tls, tlscert, tlskey: Do not authenticate clients
|
||||
|
||||
### Client modes
|
||||
|
||||
- tls: Authenticate server based on public/default CA pool
|
||||
- tlsverify, tlscacert: Authenticate server based on given CA
|
||||
- tls, tlscert, tlskey: Authenticate with client certificate, do not
|
||||
authenticate server based on given CA
|
||||
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client
|
||||
certificate, authenticate server based on given CA
|
||||
|
||||
The client will send its client certificate if found, so you just need
|
||||
to drop your keys into \~/.docker/\<ca, cert or key\>.pem
|
89
docs/sources/examples/mongodb.md
Normal file
89
docs/sources/examples/mongodb.md
Normal file
|
@ -0,0 +1,89 @@
|
|||
page_title: Building a Docker Image with MongoDB
|
||||
page_description: How to build a Docker image with MongoDB pre-installed
|
||||
page_keywords: docker, example, package installation, networking, mongodb
|
||||
|
||||
# Building an Image with MongoDB
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - This example assumes you have Docker running in daemon mode. For
|
||||
> more information please see [*Check your Docker
|
||||
> install*](../hello_world/#running-examples).
|
||||
> - **If you don’t like sudo** then see [*Giving non-root
|
||||
> access*](../../installation/binaries/#dockergroup)
|
||||
|
||||
The goal of this example is to show how you can build your own Docker
|
||||
images with MongoDB pre-installed. We will do that by constructing a
|
||||
`Dockerfile` that downloads a base image, adds an
|
||||
apt source and installs the database software on Ubuntu.
|
||||
|
||||
## Creating a `Dockerfile`
|
||||
|
||||
Create an empty file called `Dockerfile`:
|
||||
|
||||
touch Dockerfile
|
||||
|
||||
Next, define the parent image you want to use to build your own image on
|
||||
top of. Here, we’ll use [Ubuntu](https://index.docker.io/_/ubuntu/)
|
||||
(tag: `latest`) available on the [docker
|
||||
index](http://index.docker.io):
|
||||
|
||||
FROM ubuntu:latest
|
||||
|
||||
Since we want to be running the latest version of MongoDB we’ll need to
|
||||
add the 10gen repo to our apt sources list.
|
||||
|
||||
# Add 10gen official apt source to the sources list
|
||||
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
|
||||
RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
|
||||
|
||||
Then, we don’t want Ubuntu to complain about init not being available so
|
||||
we’ll divert `/sbin/initctl` to
|
||||
`/bin/true` so it thinks everything is working.
|
||||
|
||||
# Hack for initctl not being available in Ubuntu
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -s /bin/true /sbin/initctl
|
||||
|
||||
Afterwards we’ll be able to update our apt repositories and install
|
||||
MongoDB
|
||||
|
||||
# Install MongoDB
|
||||
RUN apt-get update
|
||||
RUN apt-get install mongodb-10gen
|
||||
|
||||
To run MongoDB we’ll have to create the default data directory (because
|
||||
we want it to run without needing to provide a special configuration
|
||||
file)
|
||||
|
||||
# Create the MongoDB data directory
|
||||
RUN mkdir -p /data/db
|
||||
|
||||
Finally, we’ll expose the standard port that MongoDB runs on, 27107, as
|
||||
well as define an `ENTRYPOINT` instruction for the
|
||||
container.
|
||||
|
||||
EXPOSE 27017
|
||||
ENTRYPOINT ["usr/bin/mongod"]
|
||||
|
||||
Now, lets build the image which will go through the
|
||||
`Dockerfile` we made and run all of the commands.
|
||||
|
||||
sudo docker build -t <yourname>/mongodb .
|
||||
|
||||
Now you should be able to run `mongod` as a daemon
|
||||
and be able to connect on the local port!
|
||||
|
||||
# Regular style
|
||||
MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
|
||||
|
||||
# Lean and mean
|
||||
MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
|
||||
|
||||
# Check the logs out
|
||||
sudo docker logs $MONGO_ID
|
||||
|
||||
# Connect and play around
|
||||
mongo --port <port you get from `docker ps`>
|
||||
|
||||
Sweet!
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue