rebase master

This commit is contained in:
Victor Vieux 2013-07-24 14:38:40 +00:00
commit 066873ebd2
52 changed files with 1610 additions and 766 deletions

View file

@ -76,6 +76,7 @@ Shawn Siefkas <shawn.siefkas@meredith.com>
Silas Sewell <silas@sewell.org>
Solomon Hykes <solomon@dotcloud.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Stefan Praszalowicz <stefan@greplin.com>
Thatcher Peskens <thatcher@dotcloud.com>
Thomas Bikeev <thomas.bikeev@mac.com>
Thomas Hansen <thomas.hansen@gmail.com>

View file

@ -1,5 +1,18 @@
# Changelog
## 0.5.0 (2013-07-17)
+ Runtime: List all processes running inside a container with 'docker top'
+ Runtime: Host directories can be mounted as volumes with 'docker run -v'
+ Runtime: Containers can expose public UDP ports (eg, '-p 123/udp')
+ Runtime: Optionally specify an exact public port (eg. '-p 80:4500')
+ Registry: New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
+ Builder: ENTRYPOINT instruction sets a default binary entry point to a container
+ Builder: VOLUME instruction marks a part of the container as persistent data
* Builder: 'docker build' displays the full output of a build by default
* Runtime: 'docker login' supports additional options
- Runtime: Dont save a container's hostname when committing an image.
- Registry: Fix issues when uploading images to a private registry
## 0.4.8 (2013-07-01)
+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container.
- Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.

View file

@ -11,7 +11,7 @@ BUILD_DIR := $(CURDIR)/.gopath
GOPATH ?= $(BUILD_DIR)
export GOPATH
GO_OPTIONS ?=
GO_OPTIONS ?= -a -ldflags='-w -d'
ifeq ($(VERBOSE), 1)
GO_OPTIONS += -v
endif
@ -80,10 +80,10 @@ test:
tar --exclude=${BUILD_SRC} -cz . | tar -xz -C ${BUILD_PATH}
GOPATH=${CURDIR}/${BUILD_SRC} go get -d
# Do the test
sudo -E GOPATH=${CURDIR}/${BUILD_SRC} go test ${GO_OPTIONS}
sudo -E GOPATH=${CURDIR}/${BUILD_SRC} CGO_ENABLED=0 go test ${GO_OPTIONS}
testall: all
@(cd $(DOCKER_DIR); sudo -E go test ./... $(GO_OPTIONS))
@(cd $(DOCKER_DIR); CGO_ENABLED=0 sudo -E go test ./... $(GO_OPTIONS))
fmt:
@gofmt -s -l -w .

128
api.go
View file

@ -81,54 +81,15 @@ func getBoolParam(value string) (bool, error) {
return ret, nil
}
func getAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version > 1.1 {
w.WriteHeader(http.StatusNotFound)
return nil
}
authConfig, err := auth.LoadConfig(srv.runtime.root)
if err != nil {
if err != auth.ErrConfigFileMissing {
return err
}
authConfig = &auth.AuthConfig{}
}
b, err := json.Marshal(&auth.AuthConfig{Username: authConfig.Username, Email: authConfig.Email})
if err != nil {
return err
}
writeJSON(w, b)
return nil
}
func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
authConfig := &auth.AuthConfig{}
err := json.NewDecoder(r.Body).Decode(authConfig)
if err != nil {
return err
}
status := ""
if version > 1.1 {
status, err = auth.Login(authConfig, false)
if err != nil {
return err
}
} else {
localAuthConfig, err := auth.LoadConfig(srv.runtime.root)
if err != nil {
if err != auth.ErrConfigFileMissing {
return err
}
}
if authConfig.Username == localAuthConfig.Username {
authConfig.Password = localAuthConfig.Password
}
newAuthConfig := auth.NewAuthConfig(authConfig.Username, authConfig.Password, authConfig.Email, srv.runtime.root)
status, err = auth.Login(newAuthConfig, true)
if err != nil {
return err
}
status, err := auth.Login(authConfig)
if err != nil {
return err
}
if status != "" {
b, err := json.Marshal(&APIAuth{Status: status})
@ -217,6 +178,64 @@ func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Reques
return nil
}
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
sendEvent := func(wf *utils.WriteFlusher, event *utils.JSONMessage) error {
b, err := json.Marshal(event)
if err != nil {
return fmt.Errorf("JSON error")
}
_, err = wf.Write(b)
if err != nil {
// On error, evict the listener
utils.Debugf("%s", err)
srv.Lock()
delete(srv.listeners, r.RemoteAddr)
srv.Unlock()
return err
}
return nil
}
if err := parseForm(r); err != nil {
return err
}
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners[r.RemoteAddr] = listener
srv.Unlock()
since, err := strconv.ParseInt(r.Form.Get("since"), 10, 0)
if err != nil {
since = 0
}
w.Header().Set("Content-Type", "application/json")
wf := utils.NewWriteFlusher(w)
if since != 0 {
// If since, send previous events that happened after the timestamp
for _, event := range srv.events {
if event.Time >= since {
err := sendEvent(wf, &event)
if err != nil && err.Error() == "JSON error" {
continue
}
if err != nil {
return err
}
}
}
}
for {
event := <-listener
err := sendEvent(wf, &event)
if err != nil && err.Error() == "JSON error" {
continue
}
if err != nil {
return err
}
}
return nil
}
func getImagesHistory(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
@ -429,16 +448,8 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
authConfig := &auth.AuthConfig{}
if version > 1.1 {
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return err
}
} else {
localAuthConfig, err := auth.LoadConfig(srv.runtime.root)
if err != nil && err != auth.ErrConfigFileMissing {
return err
}
authConfig = localAuthConfig
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return err
}
if err := parseForm(r); err != nil {
return err
@ -774,6 +785,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
remoteURL := r.FormValue("remote")
repoName := r.FormValue("t")
rawSuppressOutput := r.FormValue("q")
tag := ""
if strings.Contains(repoName, ":") {
remoteParts := strings.Split(repoName, ":")
@ -820,7 +832,13 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
context = c
}
b := NewBuildFile(srv, utils.NewWriteFlusher(w))
suppressOutput, err := getBoolParam(rawSuppressOutput)
if err != nil {
return err
}
b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput)
id, err := b.Build(context)
if err != nil {
fmt.Fprintf(w, "Error build: %s\n", err)
@ -847,9 +865,9 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
m := map[string]map[string]func(*Server, float64, http.ResponseWriter, *http.Request, map[string]string) error{
"GET": {
"/auth": getAuth,
"/version": getVersion,
"/events": getEvents,
"/info": getInfo,
"/version": getVersion,
"/images/json": getImagesJSON,
"/images/viz": getImagesViz,
"/images/search": getImagesSearch,

View file

@ -17,13 +17,14 @@ type APIImages struct {
}
type APIInfo struct {
Debug bool
Containers int
Images int
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
Debug bool
Containers int
Images int
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
NEventsListener int `json:",omitempty"`
}
type APITop struct {

View file

@ -89,6 +89,44 @@ func TestGetInfo(t *testing.T) {
}
}
func TestGetEvents(t *testing.T) {
runtime := mkRuntime(t)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid")
srv.LogEvent("fakeaction2", "fakeid")
req, err := http.NewRequest("GET", "/events?since=1", nil)
if err != nil {
t.Fatal(err)
}
r := httptest.NewRecorder()
setTimeout(t, "", 500*time.Millisecond, func() {
if err := getEvents(srv, APIVERSION, r, req, nil); err != nil {
t.Fatal(err)
}
})
dec := json.NewDecoder(r.Body)
for i := 0; i < 2; i++ {
var jm utils.JSONMessage
if err := dec.Decode(&jm); err == io.EOF {
break
} else if err != nil {
t.Fatal(err)
}
if jm != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
}
func TestGetImagesJSON(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)

View file

@ -25,19 +25,15 @@ var (
)
type AuthConfig struct {
Username string `json:"username"`
Password string `json:"password"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth"`
Email string `json:"email"`
rootPath string
}
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
return &AuthConfig{
Username: username,
Password: password,
Email: email,
rootPath: rootPath,
}
type ConfigFile struct {
Configs map[string]AuthConfig `json:"configs,omitempty"`
rootPath string
}
func IndexServerAddress() string {
@ -54,61 +50,84 @@ func encodeAuth(authConfig *AuthConfig) string {
}
// decode the auth string
func decodeAuth(authStr string) (*AuthConfig, error) {
func decodeAuth(authStr string) (string, string, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return nil, err
return "", "", err
}
if n > decLen {
return nil, fmt.Errorf("Something went wrong decoding auth config")
return "", "", fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.Split(string(decoded), ":")
if len(arr) != 2 {
return nil, fmt.Errorf("Invalid auth configuration file")
return "", "", fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return &AuthConfig{Username: arr[0], Password: password}, nil
return arr[0], password, nil
}
// load up the auth config information and return values
// FIXME: use the internal golang config parser
func LoadConfig(rootPath string) (*AuthConfig, error) {
func LoadConfig(rootPath string) (*ConfigFile, error) {
configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath}
confFile := path.Join(rootPath, CONFIGFILE)
if _, err := os.Stat(confFile); err != nil {
return &AuthConfig{rootPath: rootPath}, ErrConfigFileMissing
return &configFile, ErrConfigFileMissing
}
b, err := ioutil.ReadFile(confFile)
if err != nil {
return nil, err
}
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
return nil, fmt.Errorf("The Auth config file is empty")
if err := json.Unmarshal(b, &configFile.Configs); err != nil {
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
return nil, fmt.Errorf("The Auth config file is empty")
}
authConfig := AuthConfig{}
origAuth := strings.Split(arr[0], " = ")
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
if err != nil {
return nil, err
}
origEmail := strings.Split(arr[1], " = ")
authConfig.Email = origEmail[1]
configFile.Configs[IndexServerAddress()] = authConfig
} else {
for k, authConfig := range configFile.Configs {
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
if err != nil {
return nil, err
}
authConfig.Auth = ""
configFile.Configs[k] = authConfig
}
}
origAuth := strings.Split(arr[0], " = ")
origEmail := strings.Split(arr[1], " = ")
authConfig, err := decodeAuth(origAuth[1])
if err != nil {
return nil, err
}
authConfig.Email = origEmail[1]
authConfig.rootPath = rootPath
return authConfig, nil
return &configFile, nil
}
// save the auth config
func SaveConfig(authConfig *AuthConfig) error {
confFile := path.Join(authConfig.rootPath, CONFIGFILE)
if len(authConfig.Email) == 0 {
func SaveConfig(configFile *ConfigFile) error {
confFile := path.Join(configFile.rootPath, CONFIGFILE)
if len(configFile.Configs) == 0 {
os.Remove(confFile)
return nil
}
lines := "auth = " + encodeAuth(authConfig) + "\n" + "email = " + authConfig.Email + "\n"
b := []byte(lines)
err := ioutil.WriteFile(confFile, b, 0600)
for k, authConfig := range configFile.Configs {
authConfig.Auth = encodeAuth(&authConfig)
authConfig.Username = ""
authConfig.Password = ""
configFile.Configs[k] = authConfig
}
b, err := json.Marshal(configFile.Configs)
if err != nil {
return err
}
err = ioutil.WriteFile(confFile, b, 0600)
if err != nil {
return err
}
@ -116,8 +135,7 @@ func SaveConfig(authConfig *AuthConfig) error {
}
// try to register/login to the registry server
func Login(authConfig *AuthConfig, store bool) (string, error) {
storeConfig := false
func Login(authConfig *AuthConfig) (string, error) {
client := &http.Client{}
reqStatusCode := 0
var status string
@ -143,7 +161,6 @@ func Login(authConfig *AuthConfig, store bool) (string, error) {
if reqStatusCode == 201 {
status = "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
storeConfig = true
} else if reqStatusCode == 403 {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please check your e-mail for a confirmation link.")
@ -162,14 +179,7 @@ func Login(authConfig *AuthConfig, store bool) (string, error) {
}
if resp.StatusCode == 200 {
status = "Login Succeeded"
storeConfig = true
} else if resp.StatusCode == 401 {
if store {
authConfig.Email = ""
if err := SaveConfig(authConfig); err != nil {
return "", err
}
}
return "", fmt.Errorf("Wrong login/password, please try again")
} else {
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
@ -181,10 +191,5 @@ func Login(authConfig *AuthConfig, store bool) (string, error) {
} else {
return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
}
if storeConfig && store {
if err := SaveConfig(authConfig); err != nil {
return "", err
}
}
return status, nil
}

View file

@ -11,7 +11,9 @@ import (
func TestEncodeAuth(t *testing.T) {
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
authStr := encodeAuth(newAuthConfig)
decAuthConfig, err := decodeAuth(authStr)
decAuthConfig := &AuthConfig{}
var err error
decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr)
if err != nil {
t.Fatal(err)
}
@ -29,8 +31,8 @@ func TestEncodeAuth(t *testing.T) {
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := NewAuthConfig("unittester", "surlautrerivejetattendrai", "noise+unittester@dotcloud.com", "/tmp")
status, err := Login(authConfig, false)
authConfig := &AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := Login(authConfig)
if err != nil {
t.Fatal(err)
}
@ -49,8 +51,8 @@ func TestCreateAccount(t *testing.T) {
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := NewAuthConfig(username, "test42", "docker-ut+"+token+"@example.com", "/tmp")
status, err := Login(authConfig, false)
authConfig := &AuthConfig{Username: username, Password: "test42", Email: "docker-ut+"+token+"@example.com"}
status, err := Login(authConfig)
if err != nil {
t.Fatal(err)
}
@ -60,7 +62,7 @@ func TestCreateAccount(t *testing.T) {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = Login(authConfig, false)
status, err = Login(authConfig)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}

View file

@ -7,6 +7,7 @@ import (
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net/url"
"os"
"path"
"reflect"
@ -28,8 +29,8 @@ type buildFile struct {
maintainer string
config *Config
context string
verbose bool
lastContainer *Container
tmpContainers map[string]struct{}
tmpImages map[string]struct{}
@ -201,6 +202,24 @@ func (b *buildFile) addRemote(container *Container, orig, dest string) error {
}
defer file.Body.Close()
// If the destination is a directory, figure out the filename.
if strings.HasSuffix(dest, "/") {
u, err := url.Parse(orig)
if err != nil {
return err
}
path := u.Path
if strings.HasSuffix(path, "/") {
path = path[:len(path)-1]
}
parts := strings.Split(path, "/")
filename := parts[len(parts)-1]
if filename == "" {
return fmt.Errorf("cannot determine filename from url: %s", u)
}
dest = dest + filename
}
return container.Inject(file.Body, dest)
}
@ -208,7 +227,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
origPath := path.Join(b.context, orig)
destPath := path.Join(container.RootfsPath(), dest)
// Preserve the trailing '/'
if dest[len(dest)-1] == '/' {
if strings.HasSuffix(dest, "/") {
destPath = destPath + "/"
}
fi, err := os.Stat(origPath)
@ -254,7 +273,6 @@ func (b *buildFile) CmdAdd(args string) error {
return err
}
b.tmpContainers[container.ID] = struct{}{}
b.lastContainer = container
if err := container.EnsureMounted(); err != nil {
return err
@ -290,7 +308,6 @@ func (b *buildFile) run() (string, error) {
return "", err
}
b.tmpContainers[c.ID] = struct{}{}
b.lastContainer = c
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
// override the entry point that may have been picked up from the base image
@ -303,6 +320,13 @@ func (b *buildFile) run() (string, error) {
return "", err
}
if b.verbose {
err = <-c.Attach(nil, nil, b.out, b.out)
if err != nil {
return "", err
}
}
// Wait for it to finish
if ret := c.Wait(); ret != 0 {
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
@ -337,7 +361,6 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
return err
}
b.tmpContainers[container.ID] = struct{}{}
b.lastContainer = container
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
id = container.ID
if err := container.EnsureMounted(); err != nil {
@ -365,29 +388,6 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
}
func (b *buildFile) Build(context io.Reader) (string, error) {
defer func() {
// If we have an error and a container, the display the logs
if b.lastContainer != nil {
fmt.Fprintf(b.out, "******** Logs from last container (%s) *******\n", b.lastContainer.ShortID())
cLog, err := b.lastContainer.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
}
if _, err := io.Copy(b.out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
}
cLog, err = b.lastContainer.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
}
if _, err := io.Copy(b.out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
}
fmt.Fprintf(b.out, "************* End of logs for %s *************\n", b.lastContainer.ShortID())
}
}()
// FIXME: @creack any reason for using /tmp instead of ""?
// FIXME: @creack "name" is a terrible variable name
name, err := ioutil.TempDir("/tmp", "docker-build")
@ -440,7 +440,6 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
return "", ret.(error)
}
b.lastContainer = nil
fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
}
if b.image != "" {
@ -450,7 +449,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
return "", fmt.Errorf("An error occured during the build\n")
}
func NewBuildFile(srv *Server, out io.Writer) BuildFile {
func NewBuildFile(srv *Server, out io.Writer, verbose bool) BuildFile {
return &buildFile{
builder: NewBuilder(srv.runtime),
runtime: srv.runtime,
@ -459,5 +458,6 @@ func NewBuildFile(srv *Server, out io.Writer) BuildFile {
out: out,
tmpContainers: make(map[string]struct{}),
tmpImages: make(map[string]struct{}),
verbose: verbose,
}
}

View file

@ -3,13 +3,17 @@ package docker
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
// mkTestContext generates a build context from the contents of the provided dockerfile.
// This context is suitable for use as an argument to BuildFile.Build()
func mkTestContext(dockerfile string, files [][2]string, t *testing.T) Archive {
context, err := mkBuildContext(fmt.Sprintf(dockerfile, unitTestImageID), files)
context, err := mkBuildContext(dockerfile, files)
if err != nil {
t.Fatal(err)
}
@ -22,6 +26,8 @@ type testContextTemplate struct {
dockerfile string
// Additional files in the context, eg [][2]string{"./passwd", "gordon"}
files [][2]string
// Additional remote files to host on a local HTTP server.
remoteFiles [][2]string
}
// A table of all the contexts to build and test.
@ -29,27 +35,31 @@ type testContextTemplate struct {
var testContexts = []testContextTemplate{
{
`
from %s
from {IMAGE}
run sh -c 'echo root:testpass > /tmp/passwd'
run mkdir -p /var/run/sshd
run [ "$(cat /tmp/passwd)" = "root:testpass" ]
run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
`,
nil,
nil,
},
{
`
from %s
from {IMAGE}
add foo /usr/lib/bla/bar
run [ "$(cat /usr/lib/bla/bar)" = 'hello world!' ]
run [ "$(cat /usr/lib/bla/bar)" = 'hello' ]
add http://{SERVERADDR}/baz /usr/lib/baz/quux
run [ "$(cat /usr/lib/baz/quux)" = 'world!' ]
`,
[][2]string{{"foo", "hello world!"}},
[][2]string{{"foo", "hello"}},
[][2]string{{"/baz", "world!"}},
},
{
`
from %s
from {IMAGE}
add f /
run [ "$(cat /f)" = "hello" ]
add f /abc
@ -71,57 +81,93 @@ run [ "$(cat /somewheeeere/over/the/rainbooow/ga)" = "bu" ]
{"f", "hello"},
{"d/ga", "bu"},
},
nil,
},
{
`
from %s
from {IMAGE}
add http://{SERVERADDR}/x /a/b/c
run [ "$(cat /a/b/c)" = "hello" ]
add http://{SERVERADDR}/x?foo=bar /
run [ "$(cat /x)" = "hello" ]
add http://{SERVERADDR}/x /d/
run [ "$(cat /d/x)" = "hello" ]
add http://{SERVERADDR} /e
run [ "$(cat /e)" = "blah" ]
`,
nil,
[][2]string{{"/x", "hello"}, {"/", "blah"}},
},
{
`
from {IMAGE}
env FOO BAR
run [ "$FOO" = "BAR" ]
`,
nil,
},
{
`
from %s
ENTRYPOINT /bin/echo
CMD Hello world
`,
nil,
},
{
`
from %s
from {IMAGE}
ENTRYPOINT /bin/echo
CMD Hello world
`,
nil,
nil,
},
{
`
from {IMAGE}
VOLUME /test
CMD Hello world
`,
nil,
nil,
},
}
// FIXME: test building with 2 successive overlapping ADD commands
func constructDockerfile(template string, ip net.IP, port string) string {
serverAddr := fmt.Sprintf("%s:%s", ip, port)
replacer := strings.NewReplacer("{IMAGE}", unitTestImageID, "{SERVERADDR}", serverAddr)
return replacer.Replace(template)
}
func mkTestingFileServer(files [][2]string) (*httptest.Server, error) {
mux := http.NewServeMux()
for _, file := range files {
name, contents := file[0], file[1]
mux.HandleFunc(name, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(contents))
})
}
// This is how httptest.NewServer sets up a net.Listener, except that our listener must accept remote
// connections (from the container).
listener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, err
}
s := httptest.NewUnstartedServer(mux)
s.Listener = listener
s.Start()
return s, nil
}
func TestBuild(t *testing.T) {
for _, ctx := range testContexts {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
buildfile := NewBuildFile(srv, ioutil.Discard)
if _, err := buildfile.Build(mkTestContext(ctx.dockerfile, ctx.files, t)); err != nil {
t.Fatal(err)
}
buildImage(ctx, t)
}
}
func TestVolume(t *testing.T) {
func buildImage(context testContextTemplate, t *testing.T) *Image {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
@ -134,25 +180,110 @@ func TestVolume(t *testing.T) {
pushingPool: make(map[string]struct{}),
}
buildfile := NewBuildFile(srv, ioutil.Discard)
imgId, err := buildfile.Build(mkTestContext(`
from %s
VOLUME /test
CMD Hello world
`, nil, t))
httpServer, err := mkTestingFileServer(context.remoteFiles)
if err != nil {
t.Fatal(err)
}
img, err := srv.ImageInspect(imgId)
defer httpServer.Close()
idx := strings.LastIndex(httpServer.URL, ":")
if idx < 0 {
t.Fatalf("could not get port from test http server address %s", httpServer.URL)
}
port := httpServer.URL[idx+1:]
ip := runtime.networkManager.bridgeNetwork.IP
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false)
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err != nil {
t.Fatal(err)
}
img, err := srv.ImageInspect(id)
if err != nil {
t.Fatal(err)
}
return img
}
func TestVolume(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
volume /test
cmd Hello world
`, nil, nil}, t)
if len(img.Config.Volumes) == 0 {
t.Fail()
}
for key, _ := range img.Config.Volumes {
for key := range img.Config.Volumes {
if key != "/test" {
t.Fail()
}
}
}
func TestBuildMaintainer(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
maintainer dockerio
`, nil, nil}, t)
if img.Author != "dockerio" {
t.Fail()
}
}
func TestBuildEnv(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
env port 4243
`,
nil, nil}, t)
if img.Config.Env[0] != "port=4243" {
t.Fail()
}
}
func TestBuildCmd(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
cmd ["/bin/echo", "Hello World"]
`,
nil, nil}, t)
if img.Config.Cmd[0] != "/bin/echo" {
t.Log(img.Config.Cmd[0])
t.Fail()
}
if img.Config.Cmd[1] != "Hello World" {
t.Log(img.Config.Cmd[1])
t.Fail()
}
}
func TestBuildExpose(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
expose 4243
`,
nil, nil}, t)
if img.Config.PortSpecs[0] != "4243" {
t.Fail()
}
}
func TestBuildEntrypoint(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
entrypoint ["/bin/echo"]
`,
nil, nil}, t)
if img.Config.Entrypoint[0] != "/bin/echo" {
}
}

View file

@ -27,7 +27,7 @@ import (
"unicode"
)
const VERSION = "0.4.8"
const VERSION = "0.5.0-dev"
var (
GITCOMMIT string
@ -78,6 +78,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"build", "Build a container from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
@ -94,8 +95,8 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove a container"},
{"rmi", "Remove an image"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
@ -158,6 +159,8 @@ func mkBuildContext(dockerfile string, files [][2]string) (Archive, error) {
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String("t", "", "Tag to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool("q", false, "Suppress verbose build output")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -183,6 +186,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
if _, err := os.Stat(cmd.Arg(0)); err != nil {
return err
}
context, err = Tar(cmd.Arg(0), Uncompressed)
}
var body io.Reader
@ -195,6 +201,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
@ -304,16 +314,21 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
email string
)
authconfig, ok := cli.configFile.Configs[auth.IndexServerAddress()]
if !ok {
authconfig = auth.AuthConfig{}
}
if *flUsername == "" {
fmt.Fprintf(cli.out, "Username (%s): ", cli.authConfig.Username)
fmt.Fprintf(cli.out, "Username (%s): ", authconfig.Username)
username = readAndEchoString(cli.in, cli.out)
if username == "" {
username = cli.authConfig.Username
username = authconfig.Username
}
} else {
username = *flUsername
}
if username != cli.authConfig.Username {
if username != authconfig.Username {
if *flPassword == "" {
fmt.Fprintf(cli.out, "Password: ")
password = readString(cli.in, cli.out)
@ -325,31 +340,30 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
}
if *flEmail == "" {
fmt.Fprintf(cli.out, "Email (%s): ", cli.authConfig.Email)
fmt.Fprintf(cli.out, "Email (%s): ", authconfig.Email)
email = readAndEchoString(cli.in, cli.out)
if email == "" {
email = cli.authConfig.Email
email = authconfig.Email
}
} else {
email = *flEmail
}
} else {
password = cli.authConfig.Password
email = cli.authConfig.Email
password = authconfig.Password
email = authconfig.Email
}
if oldState != nil {
term.RestoreTerminal(cli.terminalFd, oldState)
}
cli.authConfig.Username = username
cli.authConfig.Password = password
cli.authConfig.Email = email
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
cli.configFile.Configs[auth.IndexServerAddress()] = authconfig
body, statusCode, err := cli.call("POST", "/auth", cli.authConfig)
body, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[auth.IndexServerAddress()])
if statusCode == 401 {
cli.authConfig.Username = ""
cli.authConfig.Password = ""
cli.authConfig.Email = ""
auth.SaveConfig(cli.authConfig)
delete(cli.configFile.Configs, auth.IndexServerAddress())
auth.SaveConfig(cli.configFile)
return err
}
if err != nil {
@ -359,10 +373,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
var out2 APIAuth
err = json.Unmarshal(body, &out2)
if err != nil {
auth.LoadConfig(os.Getenv("HOME"))
cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
return err
}
auth.SaveConfig(cli.authConfig)
auth.SaveConfig(cli.configFile)
if out2.Status != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Status)
}
@ -457,6 +471,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", out.NFd)
fmt.Fprintf(cli.out, "Goroutines: %d\n", out.NGoroutines)
fmt.Fprintf(cli.out, "EventsListeners: %d\n", out.NEventsListener)
}
if !out.MemoryLimit {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
@ -469,7 +484,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -494,7 +509,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
nSeconds := cmd.Int("t", 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -638,7 +653,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove an image")
cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -703,7 +718,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove a container")
cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool("v", false, "Remove the volumes associated to the container")
if err := cmd.Parse(args); err != nil {
return nil
@ -773,7 +788,7 @@ func (cli *DockerCli) CmdImport(args ...string) error {
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := Subcmd("push", "[OPTION] NAME", "Push an image or a repository to the registry")
cmd := Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -793,10 +808,10 @@ func (cli *DockerCli) CmdPush(args ...string) error {
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", cli.authConfig.Username, name)
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", cli.configFile.Configs[auth.IndexServerAddress()].Username, name)
}
buf, err := json.Marshal(cli.authConfig)
buf, err := json.Marshal(cli.configFile.Configs[auth.IndexServerAddress()])
if err != nil {
return err
}
@ -1046,6 +1061,29 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String("since", "", "Show events previously created (used for polling).")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
v := url.Values{}
if *since != "" {
v.Set("since", *since)
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive")
if err := cmd.Parse(args); err != nil {
@ -1099,10 +1137,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
return nil
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stdout=1", false, nil, cli.out); err != nil {
return err
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stderr=1", false, nil, cli.err); err != nil {
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stdout=1&stderr=1", false, nil, cli.out); err != nil {
return err
}
return nil
@ -1249,10 +1284,22 @@ func (opts PathOpts) String() string {
}
func (opts PathOpts) Set(val string) error {
if !filepath.IsAbs(val) {
return fmt.Errorf("%s is not an absolute path", val)
var containerPath string
splited := strings.SplitN(val, ":", 2)
if len(splited) == 1 {
containerPath = splited[0]
val = filepath.Clean(splited[0])
} else {
containerPath = splited[1]
val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1]))
}
opts[filepath.Clean(val)] = struct{}{}
if !filepath.IsAbs(containerPath) {
utils.Debugf("%s is not an absolute path", containerPath)
return fmt.Errorf("%s is not an absolute path", containerPath)
}
opts[val] = struct{}{}
return nil
}
@ -1293,6 +1340,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
return nil
}
var containerIDFile *os.File
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := ioutil.ReadFile(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
containerIDFile, err = os.Create(hostConfig.ContainerIDFile)
if err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
//create the container
body, statusCode, err := cli.call("POST", "/containers/create", config)
//if image not found try to pull it
@ -1323,6 +1382,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.WriteString(runResult.ID); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
//start the container
if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil {
@ -1375,11 +1439,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
func (cli *DockerCli) checkIfLogged(action string) error {
// If condition AND the login failed
if cli.authConfig.Username == "" {
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
if err := cli.CmdLogin(""); err != nil {
return err
}
if cli.authConfig.Username == "" {
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
return fmt.Errorf("Please login prior to %s. ('docker login')", action)
}
}
@ -1474,19 +1538,13 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) e
if resp.Header.Get("Content-Type") == "application/json" {
dec := json.NewDecoder(resp.Body)
for {
var m utils.JSONMessage
if err := dec.Decode(&m); err == io.EOF {
var jm utils.JSONMessage
if err := dec.Decode(&jm); err == io.EOF {
break
} else if err != nil {
return err
}
if m.Progress != "" {
fmt.Fprintf(out, "%s %s\r", m.Status, m.Progress)
} else if m.Error != "" {
return fmt.Errorf(m.Error)
} else {
fmt.Fprintf(out, "%s\n", m.Status)
}
jm.Display(out)
}
} else {
if _, err := io.Copy(out, resp.Body); err != nil {
@ -1635,11 +1693,11 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc
err = out
}
authConfig, _ := auth.LoadConfig(os.Getenv("HOME"))
configFile, _ := auth.LoadConfig(os.Getenv("HOME"))
return &DockerCli{
proto: proto,
addr: addr,
authConfig: authConfig,
configFile: configFile,
in: in,
out: out,
err: err,
@ -1651,7 +1709,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc
type DockerCli struct {
proto string
addr string
authConfig *auth.AuthConfig
configFile *auth.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer

View file

@ -38,7 +38,7 @@ func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
f()
c <- false
}()
if <-c {
if <-c && msg != "" {
t.Fatal(msg)
}
}
@ -59,7 +59,6 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
return nil
}
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
@ -91,7 +90,6 @@ func TestRunHostname(t *testing.T) {
}
// TestAttachStdin checks attaching to stdin without stdout and stderr.
// 'docker run -i -a stdin' should sends the client's stdin to the command,
// then detach from it and print the container id.
@ -144,15 +142,17 @@ func TestRunAttachStdin(t *testing.T) {
})
// Check logs
if cmdLogs, err := container.ReadLog("stdout"); err != nil {
if cmdLogs, err := container.ReadLog("json"); err != nil {
t.Fatal(err)
} else {
if output, err := ioutil.ReadAll(cmdLogs); err != nil {
t.Fatal(err)
} else {
expectedLog := "hello\nhi there\n"
if string(output) != expectedLog {
t.Fatalf("Unexpected logs: should be '%s', not '%s'\n", expectedLog, output)
expectedLogs := []string{"{\"log\":\"hello\\n\",\"stream\":\"stdout\"", "{\"log\":\"hi there\\n\",\"stream\":\"stdout\""}
for _, expectedLog := range expectedLogs {
if !strings.Contains(string(output), expectedLog) {
t.Fatalf("Unexpected logs: should contains '%s', it is not '%s'\n", expectedLog, output)
}
}
}
}

View file

@ -58,29 +58,31 @@ type Container struct {
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
CpuShares int64 // CPU shares (relative weight vs. other containers)
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
Entrypoint []string
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
CpuShares int64 // CPU shares (relative weight vs. other containers)
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
Entrypoint []string
NetworkDisabled bool
}
type HostConfig struct {
Binds []string
Binds []string
ContainerIDFile string
}
type BindMap struct {
@ -93,6 +95,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
}
flHostname := cmd.String("h", "", "Container host name")
@ -103,6 +106,8 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
@ -121,14 +126,11 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
var flBinds ListOpts
cmd.Var(&flBinds, "b", "Bind mount a volume from the host (e.g. -b /host:/container)")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
@ -146,11 +148,17 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
}
}
var binds []string
// add any bind targets to the list of container volumes
for _, bind := range flBinds {
for bind := range flVolumes {
arr := strings.Split(bind, ":")
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
if len(arr) > 1 {
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
binds = append(binds, bind)
delete(flVolumes, bind)
}
}
parsedArgs := cmd.Args()
@ -168,26 +176,28 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
Entrypoint: entrypoint,
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: *flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
Entrypoint: entrypoint,
}
hostConfig := &HostConfig{
Binds: flBinds,
Binds: binds,
ContainerIDFile: *flContainerIDFile,
}
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@ -493,6 +503,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
func (container *Container) Start(hostConfig *HostConfig) error {
container.State.Lock()
defer container.State.Unlock()
if len(hostConfig.Binds) == 0 {
hostConfig, _ = container.ReadHostConfig()
}
@ -503,8 +514,12 @@ func (container *Container) Start(hostConfig *HostConfig) error {
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
if container.runtime.networkManager.disabled {
container.Config.NetworkDisabled = true
} else {
if err := container.allocateNetwork(); err != nil {
return err
}
}
// Make sure the config is compatible with the current kernel
@ -516,8 +531,6 @@ func (container *Container) Start(hostConfig *HostConfig) error {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
// Create the requested bind mounts
binds := make(map[string]BindMap)
@ -557,30 +570,35 @@ func (container *Container) Start(hostConfig *HostConfig) error {
// FIXME: evaluate volumes-from before individual volumes, so that the latter can override the former.
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
container.Volumes[volPath] = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
container.VolumesRW[volPath] = true
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
container.Volumes[volPath] = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
container.VolumesRW[volPath] = true
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
if err != nil {
return err
}
srcPath, err := c.layer()
if err != nil {
return err
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = true // RW by default
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
if err != nil {
return err
// Create the mountpoint
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
srcPath, err := c.layer()
if err != nil {
return err
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = true // RW by default
}
// Create the mountpoint
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
}
@ -615,7 +633,9 @@ func (container *Container) Start(hostConfig *HostConfig) error {
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
if !container.Config.NetworkDisabled {
params = append(params, "-g", container.network.Gateway.String())
}
// User
if container.Config.User != "" {
@ -630,6 +650,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"-e", "container=lxc",
)
for _, elem := range container.Config.Env {
@ -643,10 +664,10 @@ func (container *Container) Start(hostConfig *HostConfig) error {
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
return err
}
@ -705,17 +726,21 @@ func (container *Container) StdinPipe() (io.WriteCloser, error) {
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
container.stdout.AddWriter(writer, "")
return utils.NewBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
container.stderr.AddWriter(writer, "")
return utils.NewBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
if container.Config.NetworkDisabled {
return nil
}
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
@ -742,6 +767,9 @@ func (container *Container) allocateNetwork() error {
}
func (container *Container) releaseNetwork() {
if container.Config.NetworkDisabled {
return
}
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
@ -777,7 +805,9 @@ func (container *Container) monitor() {
}
}
utils.Debugf("Process finished")
if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ShortID())
}
exitCode := -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()

View file

@ -39,16 +39,11 @@ func TestIDFormat(t *testing.T) {
func TestMultipleAttachRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c",
"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
},
container, hostConfig, _ := mkContainer(
runtime,
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
t,
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Simulate 3 client attaching to the container and stop/restart
@ -65,7 +60,6 @@ func TestMultipleAttachRestart(t *testing.T) {
if err != nil {
t.Fatal(err)
}
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
t.Fatal(err)
}
@ -140,19 +134,8 @@ func TestMultipleAttachRestart(t *testing.T) {
func TestDiff(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
builder := NewBuilder(runtime)
// Create a container and remove a file
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/rm", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
defer runtime.Destroy(container1)
if err := container1.Run(); err != nil {
@ -185,15 +168,7 @@ func TestDiff(t *testing.T) {
}
// Create a new container from the commited image
container2, err := builder.Create(
&Config{
Image: img.ID,
Cmd: []string{"cat", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
@ -212,15 +187,7 @@ func TestDiff(t *testing.T) {
}
// Create a new containere
container3, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"rm", "/bin/httpd"},
},
)
if err != nil {
t.Fatal(err)
}
container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
defer runtime.Destroy(container3)
if err := container3.Run(); err != nil {
@ -246,17 +213,7 @@ func TestDiff(t *testing.T) {
func TestCommitAutoRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
if container1.State.Running {
@ -279,14 +236,7 @@ func TestCommitAutoRun(t *testing.T) {
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.ID,
},
)
if err != nil {
t.Fatal(err)
}
container2, hostConfig, _ := mkContainer(runtime, []string{img.ID}, t)
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
@ -296,7 +246,6 @@ func TestCommitAutoRun(t *testing.T) {
if err != nil {
t.Fatal(err)
}
hostConfig := &HostConfig{}
if err := container2.Start(hostConfig); err != nil {
t.Fatal(err)
}
@ -324,17 +273,7 @@ func TestCommitRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
container1, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
if container1.State.Running {
@ -357,16 +296,7 @@ func TestCommitRun(t *testing.T) {
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := builder.Create(
&Config{
Image: img.ID,
Cmd: []string{"cat", "/world"},
},
)
if err != nil {
t.Fatal(err)
}
container2, hostConfig, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
@ -376,7 +306,6 @@ func TestCommitRun(t *testing.T) {
if err != nil {
t.Fatal(err)
}
hostConfig := &HostConfig{}
if err := container2.Start(hostConfig); err != nil {
t.Fatal(err)
}
@ -403,18 +332,7 @@ func TestCommitRun(t *testing.T) {
func TestStart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).ID,
Memory: 33554432,
CpuShares: 1000,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
container, hostConfig, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
defer runtime.Destroy(container)
cStdin, err := container.StdinPipe()
@ -422,7 +340,6 @@ func TestStart(t *testing.T) {
t.Fatal(err)
}
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
t.Fatal(err)
}
@ -445,15 +362,7 @@ func TestStart(t *testing.T) {
func TestRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container)
if container.State.Running {
@ -1050,6 +959,7 @@ func TestEnv(t *testing.T) {
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/",
"container=lxc",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
@ -1231,19 +1141,18 @@ func TestBindMounts(t *testing.T) {
writeFile(path.Join(tmpDir, "touch-me"), "", t)
// Test reading from a read-only bind mount
stdout, _ := runContainer(r, []string{"-b", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
if !strings.Contains(stdout, "touch-me") {
t.Fatal("Container failed to read from bind mount")
}
// test writing to bind mount
runContainer(r, []string{"-b", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory
if _, err := runContainer(r, []string{"-b", fmt.Sprintf("%s:.", tmpDir), "ls", "."}, nil); err == nil {
if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "ls", "."}, nil); err == nil {
t.Fatal("Container bind mounted illegal directory")
}
}
@ -1300,3 +1209,84 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
t.Fail()
}
}
// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
for key := range container.Config.Volumes {
if key != "/test" {
t.Fail()
}
}
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
expected := container.Volumes["/test"]
if expected == "" {
t.Fail()
}
// Run the container again to verify the volume path persists
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
actual := container.Volumes["/test"]
if expected != actual {
t.Fatalf("Expected volume path: %s Actual path: %s", expected, actual)
}
}
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
if err != nil {
t.Fatal(err)
}
c, err := NewBuilder(runtime).Create(config)
if err != nil {
t.Fatal(err)
}
stdout, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
if err := c.Start(hc); err != nil {
t.Fatal(err)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(string(output), -1)
if len(interfaces) != 1 {
t.Fatalf("Wrong interface count in test container: expected [1: lo], got [%s]", interfaces)
}
if interfaces[0] != "1: lo" {
t.Fatalf("Wrong interface in test container: expected [1: lo], got [%s]", interfaces)
}
}

View file

@ -28,7 +28,7 @@ func main() {
flDaemon := flag.Bool("d", false, "Daemon mode")
flDebug := flag.Bool("D", false, "Debug mode")
flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge")
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge. Use 'none' to disable container networking")
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
flGraphPath := flag.String("g", "/var/lib/docker", "Path to graph storage base dir.")
flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")

View file

@ -2,6 +2,9 @@
:description: API Documentation for Docker
:keywords: API, Docker, rcli, REST, documentation
.. COMMENT use http://pythonhosted.org/sphinxcontrib-httpdomain/ to
.. document the REST API.
=================
Docker Remote API
=================
@ -13,15 +16,23 @@ Docker Remote API
- The Remote API is replacing rcli
- Default port in the docker deamon is 4243
- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr
- Since API version 1.2, the auth configuration is now handled client side, so the client has to send the authConfig as POST in /images/(name)/push
- The API tends to be REST, but for some complex commands, like attach
or pull, the HTTP connection is hijacked to transport stdout stdin
and stderr
- Since API version 1.2, the auth configuration is now handled client
side, so the client has to send the authConfig as POST in
/images/(name)/push
2. Versions
===========
The current verson of the API is 1.3
Calling /images/<name>/insert is the same as calling /v1.3/images/<name>/insert
You can still call an old version of the api using /v1.0/images/<name>/insert
The current verson of the API is 1.3
Calling /images/<name>/insert is the same as calling
/v1.3/images/<name>/insert
You can still call an old version of the api using
/v1.0/images/<name>/insert
:doc:`docker_remote_api_v1.3`
*****************************
@ -29,19 +40,25 @@ You can still call an old version of the api using /v1.0/images/<name>/insert
What's new
----------
Listing processes (/top):
.. http:get:: /containers/(id)/top
- List the processes inside a container
**New!** List the processes running inside a container.
.. http:get:: /events:
**New!** Monitor docker's events via streaming or via polling
Builder (/build):
- Simplify the upload of the build context
- Simply stream a tarball instead of multipart upload with 4 intermediary buffers
- Simply stream a tarball instead of multipart upload with 4
intermediary buffers
- Simpler, less memory usage, less disk usage and faster
.. Note::
The /build improvements are not reverse-compatible. Pre 1.3 clients will break on /build.
.. Warning::
The /build improvements are not reverse-compatible. Pre 1.3 clients
will break on /build.
List containers (/containers/json):
@ -49,7 +66,8 @@ List containers (/containers/json):
Start containers (/containers/<id>/start):
- You can now pass host-specific configuration (e.g. bind mounts) in the POST body for start calls
- You can now pass host-specific configuration (e.g. bind mounts) in
the POST body for start calls
:doc:`docker_remote_api_v1.2`
*****************************
@ -60,14 +78,25 @@ What's new
----------
The auth configuration is now handled by the client.
The client should send it's authConfig as POST on each call of /images/(name)/push
.. http:get:: /auth is now deprecated
.. http:post:: /auth only checks the configuration but doesn't store it on the server
The client should send it's authConfig as POST on each call of
/images/(name)/push
Deleting an image is now improved, will only untag the image if it has chidrens and remove all the untagged parents if has any.
.. http:get:: /auth
.. http:post:: /images/<name>/delete now returns a JSON with the list of images deleted/untagged
**Deprecated.**
.. http:post:: /auth
Only checks the configuration but doesn't store it on the server
Deleting an image is now improved, will only untag the image if it
has chidren and remove all the untagged parents if has any.
.. http:post:: /images/<name>/delete
Now returns a JSON structure with the list of images
deleted/untagged.
:doc:`docker_remote_api_v1.1`
@ -82,7 +111,7 @@ What's new
.. http:post:: /images/(name)/insert
.. http:post:: /images/(name)/push
Uses json stream instead of HTML hijack, it looks like this:
Uses json stream instead of HTML hijack, it looks like this:
.. sourcecode:: http

View file

@ -1,3 +1,8 @@
.. use orphan to suppress "WARNING: document isn't included in any toctree"
.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata
:orphan:
:title: Remote API v1.0
:description: API Documentation for Docker
:keywords: API, Docker, rcli, REST, documentation
@ -300,8 +305,8 @@ Start a container
:statuscode 500: server error
Stop a contaier
***************
Stop a container
****************
.. http:post:: /containers/(id)/stop

View file

@ -1,3 +1,7 @@
.. use orphan to suppress "WARNING: document isn't included in any toctree"
.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata
:orphan:
:title: Remote API v1.1
:description: API Documentation for Docker

View file

@ -1,3 +1,8 @@
.. use orphan to suppress "WARNING: document isn't included in any toctree"
.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata
:orphan:
:title: Remote API v1.2
:description: API Documentation for Docker
:keywords: API, Docker, rcli, REST, documentation

View file

@ -1,3 +1,8 @@
.. use orphan to suppress "WARNING: document isn't included in any toctree"
.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata
:orphan:
:title: Remote API v1.3
:description: API Documentation for Docker
:keywords: API, Docker, rcli, REST, documentation
@ -921,6 +926,7 @@ Build an image from Dockerfile via stdin
The Content-type header should be set to "application/tar".
:query t: tag to be applied to the resulting image in case of success
:query q: suppress verbose build output
:statuscode 200: no error
:statuscode 500: server error
@ -1053,6 +1059,36 @@ Create a new image from a container's changes
:statuscode 500: server error
Monitor Docker's events
***********************
.. http:get:: /events
Get events from docker, either in real time via streaming, or via polling (using `since`)
**Example request**:
.. sourcecode:: http
POST /events?since=1374067924
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{"status":"create","id":"dfdf82bd3881","time":1374067924}
{"status":"start","id":"dfdf82bd3881","time":1374067924}
{"status":"stop","id":"dfdf82bd3881","time":1374067966}
{"status":"destroy","id":"dfdf82bd3881","time":1374067970}
:query since: timestamp used for polling
:statuscode 200: no error
:statuscode 500: server error
3. Going further
================

View file

@ -452,7 +452,7 @@ User Register
"username": "foobar"'}
:jsonparameter email: valid email address, that needs to be confirmed
:jsonparameter username: min 4 character, max 30 characters, must match the regular expression [a-z0-9_].
:jsonparameter username: min 4 character, max 30 characters, must match the regular expression [a-z0-9\_].
:jsonparameter password: min 5 characters
**Example Response**:

View file

@ -367,7 +367,8 @@ POST /v1/users
{"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
**Validation**:
- **username** : min 4 character, max 30 characters, must match the regular expression [a-z0-9_].
- **username**: min 4 character, max 30 characters, must match the regular
expression [a-z0-9\_].
- **password**: min 5 characters
**Valid**: return HTTP 200
@ -566,4 +567,4 @@ Next request::
---------------------
- 1.0 : May 6th 2013 : initial release
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace.
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace.

View file

@ -11,6 +11,7 @@
Usage: docker build [OPTIONS] PATH | URL | -
Build a new container image from the source code at PATH
-t="": Tag to be applied to the resulting image in case of success.
-q=false: Suppress verbose build output.
When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context

View file

@ -10,4 +10,4 @@
Usage: docker rm [OPTIONS] CONTAINER
Remove a container
Remove one or more containers

View file

@ -8,6 +8,6 @@
::
Usage: docker rmimage [OPTIONS] IMAGE
Usage: docker rmi IMAGE [IMAGE...]
Remove an image
Remove one or more images

View file

@ -14,16 +14,27 @@
-a=map[]: Attach to stdin, stdout or stderr.
-c=0: CPU shares (relative weight)
-cidfile="": Write the container ID to the file
-d=false: Detached mode: leave the container running in the background
-e=[]: Set environment variables
-h="": Container host name
-i=false: Keep stdin open even if not attached
-m=0: Memory limit (in bytes)
-n=true: Enable networking for this container
-p=[]: Map a network port to the container
-t=false: Allocate a pseudo-tty
-u="": Username or UID
-d=[]: Set custom dns servers for the container
-v=[]: Creates a new volume and mounts it at the specified path.
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container.
-b=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]
-entrypoint="": Overwrite the default entrypoint set by the image.
Examples
--------
.. code-block:: bash
docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
| This will create a container and print "test" to the console. The cidfile flag makes docker attempt to create a new file and write the container ID to it. If the file exists already, docker will return an error. Docker will close this file when docker run exits.

View file

@ -8,6 +8,8 @@
::
Usage: docker stop [OPTIONS] NAME
Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
Stop a running container
-t=10: Number of seconds to wait for the container to stop before killing it.

View file

@ -37,5 +37,6 @@ Contents:
start <command/start>
stop <command/stop>
tag <command/tag>
top <command/top>
version <command/version>
wait <command/wait>
wait <command/wait>

View file

@ -46,11 +46,13 @@ in a standard build environment.
You can run an interactive session in the newly built container:
::
docker run -i -t docker bash
To extract the binaries from the container:
::
docker run docker sh -c 'cat $(which docker)' > docker-build && chmod +x docker-build

View file

@ -2,8 +2,6 @@
:description: An overview of the Docker Documentation
:keywords: containers, lxc, concepts, explanation
.. _introduction:
Welcome
=======

View file

@ -51,7 +51,7 @@ For example:
.. code-block:: bash
# Run docker in daemon mode
sudo <path to>/docker -H 0.0.0.0:5555 &
sudo <path to>/docker -H 0.0.0.0:5555 -d &
# Download a base image
docker -H :5555 pull base
@ -61,7 +61,7 @@ on both tcp and a unix socket
.. code-block:: bash
# Run docker in daemon mode
sudo <path to>/docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock
sudo <path to>/docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d &
# Download a base image
docker pull base
# OR

View file

@ -1,25 +1,27 @@
:title: Docker Builder
:description: Docker Builder specifes a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Docker Builder, automation, image creation
:title: Dockerfiles for Images
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Dockerfile, automation, image creation
==============
Docker Builder
==============
==================
Dockerfile Builder
==================
**Docker can act as a builder** and read instructions from a text
Dockerfile to automate the steps you would otherwise make manually to
create an image. Executing ``docker build`` will run your steps and
commit them along the way, giving you a final image.
.. contents:: Table of Contents
Docker Builder specifes a simple DSL which allows you to automate the steps you
would normally manually take to create an image. Docker Build will run your
steps and commit them along the way, giving you a final image.
1. Usage
========
To build an image from a source repository, create a description file called `Dockerfile`
at the root of your repository. This file will describe the steps to assemble
the image.
To build an image from a source repository, create a description file
called ``Dockerfile`` at the root of your repository. This file will
describe the steps to assemble the image.
Then call `docker build` with the path of your source repository as argument:
Then call ``docker build`` with the path of your source repository as
argument:
``docker build .``
@ -28,7 +30,7 @@ build succeeds:
``docker build -t shykes/myapp .``
Docker will run your steps one-by-one, committing the result if necessary,
Docker will run your steps one-by-one, committing the result if necessary,
before finally outputting the ID of your new image.
2. Format
@ -36,136 +38,170 @@ before finally outputting the ID of your new image.
The Dockerfile format is quite simple:
``instruction arguments``
::
The Instruction is not case-sensitive, however convention is for them to be
# Comment
INSTRUCTION arguments
The Instruction is not case-sensitive, however convention is for them to be
UPPERCASE in order to distinguish them from arguments more easily.
Dockerfiles are evaluated in order, therefore the first instruction must be
`FROM` in order to specify the base image from which you are building.
Docker evaluates the instructions in a Dockerfile in order. **The first
instruction must be `FROM`** in order to specify the base image from
which you are building.
Docker will ignore lines in Dockerfiles prefixed with "`#`", so you may add
comment lines. A comment marker in the rest of the line will be treated as an
argument.
Docker will ignore **comment lines** *beginning* with ``#``. A comment
marker anywhere in the rest of the line will be treated as an argument.
3. Instructions
===============
Docker builder comes with a set of instructions, described below.
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
3.1 FROM
--------
``FROM <image>``
The `FROM` instruction sets the base image for subsequent instructions. As such,
a valid Dockerfile must have it as its first instruction.
The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
instructions. As such, a valid Dockerfile must have ``FROM`` as its
first instruction.
`FROM` can be included multiple times within a single Dockerfile in order to
create multiple images. Simply make a note of the last image id output by the
commit before each new `FROM` command.
``FROM`` must be the first non-comment instruction in the
``Dockerfile``.
``FROM`` can appear multiple times within a single Dockerfile in order
to create multiple images. Simply make a note of the last image id
output by the commit before each new ``FROM`` command.
3.2 MAINTAINER
--------------
``MAINTAINER <name>``
The `MAINTAINER` instruction allows you to set the Author field of the generated
images.
The ``MAINTAINER`` instruction allows you to set the *Author* field of
the generated images.
3.3 RUN
-------
``RUN <command>``
The `RUN` instruction will execute any commands on the current image and commit
the results. The resulting committed image will be used for the next step in the
Dockerfile.
The ``RUN`` instruction will execute any commands on the current image
and commit the results. The resulting committed image will be used for
the next step in the Dockerfile.
Layering `RUN` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be created
from any point in an image's history, much like source control.
Layering ``RUN`` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source
control.
3.4 CMD
-------
``CMD <command>``
The `CMD` instruction sets the command to be executed when running the image.
This is functionally equivalent to running
`docker commit -run '{"Cmd": <command>}'` outside the builder.
The ``CMD`` instruction sets the command to be executed when running
the image. This is functionally equivalent to running ``docker commit
-run '{"Cmd": <command>}'`` outside the builder.
.. note::
Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits
the result; `CMD` does not execute anything at build time, but specifies the
intended command for the image.
Don't confuse `RUN` with `CMD`. `RUN` actually runs a
command and commits the result; `CMD` does not execute anything at
build time, but specifies the intended command for the image.
3.5 EXPOSE
----------
``EXPOSE <port> [<port>...]``
The `EXPOSE` instruction sets ports to be publicly exposed when running the
image. This is functionally equivalent to running
`docker commit -run '{"PortSpecs": ["<port>", "<port2>"]}'` outside the builder.
The ``EXPOSE`` instruction sets ports to be publicly exposed when
running the image. This is functionally equivalent to running ``docker
commit -run '{"PortSpecs": ["<port>", "<port2>"]}'`` outside the
builder.
3.6 ENV
-------
``ENV <key> <value>``
The `ENV` instruction sets the environment variable `<key>` to the value
`<value>`. This value will be passed to all future ``RUN`` instructions. This is
functionally equivalent to prefixing the command with `<key>=<value>`
The ``ENV`` instruction sets the environment variable ``<key>`` to the
value ``<value>``. This value will be passed to all future ``RUN``
instructions. This is functionally equivalent to prefixing the command
with ``<key>=<value>``
.. note::
The environment variables will persist when a container is run from the resulting image.
The environment variables will persist when a container is run
from the resulting image.
3.7 ADD
-------
``ADD <src> <dest>``
The `ADD` instruction will copy new files from <src> and add them to the container's filesystem at path `<dest>`.
The ``ADD`` instruction will copy new files from <src> and add them to
the container's filesystem at path ``<dest>``.
`<src>` must be the path to a file or directory relative to the source directory being built (also called the
context of the build) or a remote file URL.
``<src>`` must be the path to a file or directory relative to the
source directory being built (also called the *context* of the build) or
a remote file URL.
`<dest>` is the path at which the source will be copied in the destination container.
``<dest>`` is the path at which the source will be copied in the
destination container.
The copy obeys the following rules:
If `<src>` is a directory, the entire directory is copied, including filesystem metadata.
* If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
then a file is downloaded from the URL and copied to ``<dest>``.
* If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash,
then the filename is inferred from the URL and the file is downloaded to
``<dest>/<filename>``. For instance, ``ADD http://example.com/foobar /``
would create the file ``/foobar``. The URL must have a nontrivial path
so that an appropriate filename can be discovered in this case
(``http://example.com`` will not work).
* If ``<src>`` is a directory, the entire directory is copied,
including filesystem metadata.
* If ``<src>``` is a tar archive in a recognized compression format
(identity, gzip, bzip2 or xz), it is unpacked as a directory.
If `<src>` is a tar archive in a recognized compression format (identity, gzip, bzip2 or xz), it
is unpacked as a directory.
When a directory is copied or unpacked, it has the same behavior as
``tar -x``: the result is the union of
When a directory is copied or unpacked, it has the same behavior as 'tar -x': the result is the union of
a) whatever existed at the destination path and b) the contents of the source tree, with conflicts resolved
in favor of b on a file-by-file basis.
1. whatever existed at the destination path and
2. the contents of the source tree,
If `<src>` is any other kind of file, it is copied individually along with its metadata. In this case,
if `<dst>` ends with a trailing slash '/', it will be considered a directory and the contents of `<src>`
will be written at `<dst>/base(<src>)`.
If `<dst>` does not end with a trailing slash, it will be considered a regular file and the contents
of `<src>` will be written at `<dst>`.
with conflicts resolved in favor of 2) on a file-by-file basis.
If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
files and directories are created with mode 0700, uid and gid 0.
* If ``<src>`` is any other kind of file, it is copied individually
along with its metadata. In this case, if ``<dst>`` ends with a
trailing slash ``/``, it will be considered a directory and the
contents of ``<src>`` will be written at ``<dst>/base(<src>)``.
* If ``<dst>`` does not end with a trailing slash, it will be
considered a regular file and the contents of ``<src>`` will be
written at ``<dst>``.
* If ``<dest>`` doesn't exist, it is created along with all missing
directories in its path. All new files and directories are created
with mode 0700, uid and gid 0.
3.8 ENTRYPOINT
-------------
--------------
``ENTRYPOINT /bin/echo``
``ENTRYPOINT ["/bin/echo"]``
The `ENTRYPOINT` instruction adds an entry command that will not be overwritten when arguments are passed to docker run, unlike the behavior of `CMD`. This allows arguments to be passed to the entrypoint. i.e. `docker run <image> -d` will pass the "-d" argument to the entrypoint.
The ``ENTRYPOINT`` instruction adds an entry command that will not be
overwritten when arguments are passed to docker run, unlike the
behavior of ``CMD``. This allows arguments to be passed to the
entrypoint. i.e. ``docker run <image> -d`` will pass the "-d" argument
to the entrypoint.
3.9 VOLUME
----------
``VOLUME ["/data"]``
The `VOLUME` instruction will add one or more new volumes to any container created from the image.
The ``VOLUME`` instruction will add one or more new volumes to any
container created from the image.
4. Dockerfile Examples
======================
@ -175,14 +211,14 @@ The `VOLUME` instruction will add one or more new volumes to any container creat
# Nginx
#
# VERSION 0.0.1
FROM ubuntu
MAINTAINER Guillaume J. Charmes "guillaume@dotcloud.com"
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get install -y inotify-tools nginx apache2 openssh-server
.. code-block:: bash
@ -190,12 +226,12 @@ The `VOLUME` instruction will add one or more new volumes to any container creat
# Firefox over VNC
#
# VERSION 0.3
FROM ubuntu
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
# Install vnc, xvfb in order to create a 'fake' display and firefox
RUN apt-get install -y x11vnc xvfb firefox
RUN mkdir /.vnc
@ -203,7 +239,7 @@ The `VOLUME` instruction will add one or more new volumes to any container creat
RUN x11vnc -storepasswd 1234 ~/.vnc/passwd
# Autostart firefox (might not be the best way, but it does the trick)
RUN bash -c 'echo "firefox" >> /.bashrc'
EXPOSE 5900
CMD ["x11vnc", "-forever", "-usepw", "-create"]

View file

@ -7,21 +7,69 @@
Working with Repositories
=========================
A *repository* is a hosted collection of tagged :ref:`images
<image_def>` that together create the file system for a container. The
repository's name is a tag that indicates the provenance of the
repository, i.e. who created it and where the original copy is
located.
Top-level repositories and user repositories
--------------------------------------------
You can find one or more repositories hosted on a *registry*. There
can be an implicit or explicit host name as part of the repository
tag. The implicit registry is located at ``index.docker.io``, the home
of "top-level" repositories and the Central Index. This registry may
also include public "user" repositories.
Generally, there are two types of repositories: Top-level repositories
which are controlled by the people behind Docker, and user
repositories.
So Docker is not only a tool for creating and managing your own
:ref:`containers <container_def>` -- **Docker is also a tool for
sharing**. The Docker project provides a Central Registry to host
public repositories, namespaced by user, and a Central Index which
provides user authentication and search over all the public
repositories. You can host your own Registry too! Docker acts as a
client for these services via ``docker search, pull, login`` and
``push``.
* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can generally be trusted.
* User repositories always come in the form of ``<username>/<repo_name>``. This is what your published images will look like.
* User images are not checked, it is therefore up to you whether or not you trust the creator of this image.
Top-level, User, and Your Own Repositories
------------------------------------------
There are two types of public repositories: *top-level* repositories
which are controlled by the Docker team, and *user* repositories
created by individual contributors.
Find public images available on the index
-----------------------------------------
* Top-level repositories can easily be recognized by **not** having a
``/`` (slash) in their name. These repositories can generally be
trusted.
* User repositories always come in the form of
``<username>/<repo_name>``. This is what your published images will
look like if you push to the public Central Registry.
* Only the authenticated user can push to their *username* namespace
on the Central Registry.
* User images are not checked, it is therefore up to you whether or
not you trust the creator of this image.
Right now (version 0.5), private repositories are only possible by
hosting `your own registry
<https://github.com/dotcloud/docker-registry>`_. To push or pull to a
repository on your own registry, you must prefix the tag with the
address of the registry's host, like this:
.. code-block:: bash
# Tag to create a repository with the full registry location.
# The location (e.g. localhost.localdomain:5000) becomes
# a permanent part of the repository name
docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
# Push the new repository to its home location on localhost
docker push localhost.localdomain:5000/repo_name
Once a repository has your registry's host name as part of the tag,
you can push and pull it like any other repository, but it will
**not** be searchable (or indexed at all) in the Central Index, and
there will be no user name checking performed. Your registry will
function completely independently from the Central Index.
Find public images available on the Central Index
-------------------------------------------------
Seach by name, namespace or description
@ -37,68 +85,48 @@ Download them simply by their name
docker pull <value>
Very similarly you can search for and browse the index online on https://index.docker.io
Very similarly you can search for and browse the index online on
https://index.docker.io
Connecting to the repository
----------------------------
Connecting to the Central Registry
----------------------------------
You can create a user on the central docker repository online, or by running
You can create a user on the central Docker Index online, or by running
.. code-block:: bash
docker login
This will prompt you for a username, which will become a public
namespace for your public repositories.
If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then
automatically log you in.
If your username does not exist it will prompt you to also enter a
password and your e-mail address. It will then automatically log you
in.
Committing a container to a named image
---------------------------------------
In order to commit to the repository it is required to have committed your container to an image with your namespace.
In order to commit to the repository it is required to have committed
your container to an image within your username namespace.
.. code-block:: bash
# for example docker commit $CONTAINER_ID dhrp/kickassapp
docker commit <container_id> <your username>/<some_name>
docker commit <container_id> <username>/<repo_name>
Pushing a container to the repository
-----------------------------------------
Pushing a container to its repository
-------------------------------------
In order to push an image to the repository you need to have committed your container to a named image (see above)
In order to push an image to its repository you need to have committed
your container to a named image (see above)
Now you can commit this image to the repository
.. code-block:: bash
# for example docker push dhrp/kickassapp
docker push <image-name>
Changing the server to connect to
----------------------------------
When you are running your own index and/or registry, You can change the server the docker client will connect to.
Variable
^^^^^^^^
.. code-block:: sh
DOCKER_INDEX_URL
Setting this environment variable on the docker server will change the URL docker index.
This address is used in commands such as ``docker login``, ``docker push`` and ``docker pull``.
The docker daemon doesn't need to be restarted for this parameter to take effect.
Example
^^^^^^^
.. code-block:: sh
docker -d &
export DOCKER_INDEX_URL="https://index.docker.io"
docker push <username>/<repo_name>

View file

@ -68,19 +68,18 @@
<div style="float: right" class="pull-right">
<ul class="nav">
<li id="nav-introduction"><a href="http://www.docker.io/">Introduction</a></li>
<li id="nav-introduction"><a href="http://www.docker.io/">Home</a></li>
<li id="nav-about"><a href="http://www.docker.io/">About</a></li>
<li id="nav-community"><a href="http://www.docker.io/">Community</a></li>
<li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
<li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
<li id="nav-blog"><a href="http://blog.docker.io/">Blog</a></li>
<li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
</ul>
<!--<div class="social links" style="float: right; margin-top: 14px; margin-left: 12px">-->
<!--<a class="twitter" href="http://twitter.com/getdocker">Twitter</a>-->
<!--<a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>-->
<!--</div>-->
</div>
<div style="margin-left: -12px; float: left;">
<a href="http://www.docker.io"><img style="margin-top: 12px; height: 38px" src="{{ pathto('_static/img/docker-letters-logo.gif', 1) }}"></a>
<a href="http://www.docker.io" title="Docker Homepage"><img style="margin-top: 0px; height: 60px; margin-left: 10px;" src="{{ pathto('_static/img/docker-top-logo.png', 1) }}"></a>
</div>
</div>
@ -96,7 +95,7 @@
<div class="pull-right" id="fork-us" style="margin-top: 16px; margin-right: 16px;">
<a href="http://github.com/dotcloud/docker/"><img src="{{ pathto('_static/img/fork-us.png', 1) }}"> Fork us on Github</a>
</div>
<h1 class="pageheader">DOCUMENTATION</h1>
<h1 class="pageheader"><a href="http://docs.docker.io/en/latest/" title="Documentation" style="color: white;">DOCUMENTATION</a></h1>
</div>
</div>

View file

@ -34,12 +34,12 @@ h4 {
.navbar .nav li a {
padding: 22px 15px 22px;
}
.navbar .brand {
padding: 13px 10px 13px 28px ;
}
.navbar-dotcloud .container {
border-bottom: 2px #000000 solid;
}
.inline-icon {
margin-bottom: 6px;
}
/*
* Responsive YouTube, Vimeo, Embed, and HTML5 Videos with CSS
* http://www.jonsuh.com
@ -82,7 +82,7 @@ h4 {
.btn-custom {
background-color: #292929 !important;
background-repeat: repeat-x;
filter: progid:dximagetransform.microsoft.gradient(startColorstr="#515151", endColorstr="#282828");
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr="#515151", endColorstr="#282828");
background-image: -khtml-gradient(linear, left top, left bottom, from(#515151), to(#282828));
background-image: -moz-linear-gradient(top, #515151, #282828);
background-image: -ms-linear-gradient(top, #515151, #282828);
@ -301,7 +301,7 @@ section.header {
height: 28px;
line-height: 28px;
background-color: #43484c;
filter: progid:dximagetransform.microsoft.gradient(gradientType=0, startColorstr='#FFFF6E56', endColorstr='#FFED4F35');
filter: progid:DXImageTransform.Microsoft.gradient(gradientType=0, startColorstr='#FFFF6E56', endColorstr='#FFED4F35');
background-image: -webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #747474), color-stop(100%, #43484c));
background-image: -webkit-linear-gradient(top, #747474 0%, #43484c 100%);
background-image: -moz-linear-gradient(top, #747474 0%, #43484c 100%);

View file

@ -53,13 +53,6 @@ h1, h2, h3, h4 {
padding: 22px 15px 22px;
}
}
.brand {
padding: 13px 10px 13px 28px ;
// padding-left: 30px;
}
background-color: white;
}
@ -67,6 +60,9 @@ h1, h2, h3, h4 {
border-bottom: 2px @black solid;
}
.inline-icon {
margin-bottom: 6px;
}
/*
* Responsive YouTube, Vimeo, Embed, and HTML5 Videos with CSS

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

91
hack/bootcamp/README.md Normal file
View file

@ -0,0 +1,91 @@
# Docker maintainer bootcamp
## Introduction: we need more maintainers
Docker is growing incredibly fast. At the time of writing, it has received over 200 contributions from 90 people,
and its API is used by dozens of 3rd-party tools. Over 1,000 issues have been opened. As the first production deployments
start going live, the growth will only accelerate.
Also at the time of writing, Docker has 3 full-time maintainers, and 7 part-time subsystem maintainers. If docker
is going to live up to the expectations, we need more than that.
This document describes a *bootcamp* to guide and train volunteers interested in helping the project, either with individual
contributions, maintainer work, or both.
This bootcamp is an experiment. If you decide to go through it, consider yourself an alpha-tester. You should expect quirks,
and report them to us as you encounter them to help us smooth out the process.
## How it works
The maintainer bootcamp is a 12-step program - one step for each of the maintainer's responsibilities. The aspiring maintainer must
validate all 12 steps by 1) studying it, 2) practicing it, and 3) getting endorsed for it.
Steps are all equally important and can be validated in any order. Validating all 12 steps is a pre-requisite for becoming a core
maintainer, but even 1 step will make you a better contributor!
### List of steps
#### 1) Be a power user
Use docker daily, build cool things with it, know its quirks inside and out.
#### 2) Help users
Answer questions on irc, twitter, email, in person.
#### 3) Manage the bug tracker
Help triage tickets - ask the right questions, find duplicates, reference relevant resources, know when to close a ticket when necessary, take the time to go over older tickets.
#### 4) Improve the documentation
Follow the documentation from scratch regularly and make sure it is still up-to-date. Find and fix inconsistencies. Remove stale information. Find a frequently asked question that is not documented. Simplify the content and the form.
#### 5) Evangelize the principles of docker
Understand what the underlying goals and principle of docker are. Explain design decisions based on what docker is, and what it is not. When someone is not using docker, find how docker can be valuable to them. If they are using docker, find how they can use it better.
#### 6) Fix bugs
Self-explanatory. Contribute improvements to docker which solve defects. Bugfixes should be well-tested, and prioritized by impact to the user.
#### 7) Improve the testing infrastructure
Automated testing is complicated and should be perpetually improved. Invest time to improve the current tooling. Refactor existing tests, create new ones, make testing more accessible to developers, add new testing capabilities (integration tests, mocking, stress test...), improve integration between tests and documentation...
#### 8) Contribute features
Improve docker to do more things, or get better at doing the same things. Features should be well-tested, not break existing APIs, respect the project goals. They should make the user's life measurably better. Features should be discussed ahead of time to avoid wasting time and duplicating effort.
#### 9) Refactor internals
Improve docker to repay technical debt. Simplify code layout, improve performance, add missing comments, reduce the number of files and functions, rename functions and variables to be more readable, go over FIXMEs, etc.
#### 10) Review and merge contributions
Review pull requests in a timely manner, review code in detail and offer feedback. Keep a high bar without being pedantic. Share the load of testing and merging pull requests.
#### 11) Release
Manage a release of docker from beginning to end. Tests, final review, tags, builds, upload to mirrors, distro packaging, etc.
#### 12) Train other maintainers
Contribute to training other maintainers. Give advice, delegate work, help organize the bootcamp. This also means contribute to the maintainer's manual, look for ways to improve the project organization etc.
### How to study a step
### How to practice a step
### How to get endorsed for a step

View file

@ -13,6 +13,10 @@ lxc.utsname = {{.Id}}
{{end}}
#lxc.aa_profile = unconfined
{{if .Config.NetworkDisabled}}
# network is disabled (-n=false)
lxc.network.type = empty
{{else}}
# network configuration
lxc.network.type = veth
lxc.network.flags = up
@ -20,6 +24,7 @@ lxc.network.link = {{.NetworkSettings.Bridge}}
lxc.network.name = eth0
lxc.network.mtu = 1500
lxc.network.ipv4 = {{.NetworkSettings.IPAddress}}/{{.NetworkSettings.IPPrefixLen}}
{{end}}
# root filesystem
{{$ROOTFS := .RootfsPath}}

View file

@ -17,6 +17,7 @@ var NetworkBridgeIface string
const (
DefaultNetworkBridge = "docker0"
DisableNetworkBridge = "none"
portRangeStart = 49153
portRangeEnd = 65535
)
@ -111,10 +112,29 @@ func checkRouteOverlaps(dockerNetwork *net.IPNet) error {
return nil
}
// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`,
// and attempts to configure it with an address which doesn't conflict with any other interface on the host.
// If it can't find an address which doesn't conflict, it will return an error.
func CreateBridgeIface(ifaceName string) error {
// FIXME: try more IP ranges
// FIXME: try bigger ranges! /24 is too small.
addrs := []string{"172.16.42.1/24", "10.0.42.1/24", "192.168.42.1/24"}
addrs := []string{
// Here we don't follow the convention of using the 1st IP of the range for the gateway.
// This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
// In theory this shouldn't matter - in practice there's bound to be a few scripts relying
// on the internal addressing or other stupid things like that.
// The shouldn't, but hey, let's not break them unless we really have to.
"172.16.42.1/16",
"10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive
"10.1.42.1/16",
"10.42.42.1/16",
"172.16.42.1/24",
"172.16.43.1/24",
"172.16.44.1/24",
"10.0.42.1/24",
"10.0.43.1/24",
"192.168.42.1/24",
"192.168.43.1/24",
"192.168.44.1/24",
}
var ifaceAddr string
for _, addr := range addrs {
@ -453,10 +473,16 @@ type NetworkInterface struct {
manager *NetworkManager
extPorts []*Nat
disabled bool
}
// Allocate an external TCP port and map it to the interface
func (iface *NetworkInterface) AllocatePort(spec string) (*Nat, error) {
if iface.disabled {
return nil, fmt.Errorf("Trying to allocate port for interface %v, which is disabled", iface) // FIXME
}
nat, err := parseNat(spec)
if err != nil {
return nil, err
@ -552,6 +578,11 @@ func parseNat(spec string) (*Nat, error) {
// Release: Network cleanup - release all resources
func (iface *NetworkInterface) Release() {
if iface.disabled {
return
}
for _, nat := range iface.extPorts {
utils.Debugf("Unmaping %v/%v", nat.Proto, nat.Frontend)
if err := iface.manager.portMapper.Unmap(nat.Frontend, nat.Proto); err != nil {
@ -579,10 +610,17 @@ type NetworkManager struct {
tcpPortAllocator *PortAllocator
udpPortAllocator *PortAllocator
portMapper *PortMapper
disabled bool
}
// Allocate a network interface
func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
if manager.disabled {
return &NetworkInterface{disabled: true}, nil
}
ip, err := manager.ipAllocator.Acquire()
if err != nil {
return nil, err
@ -596,6 +634,14 @@ func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
}
func newNetworkManager(bridgeIface string) (*NetworkManager, error) {
if bridgeIface == DisableNetworkBridge {
manager := &NetworkManager{
disabled: true,
}
return manager, nil
}
addr, err := getIfaceAddr(bridgeIface)
if err != nil {
// If the iface is not found, try to create it

View file

@ -68,6 +68,7 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
from.CloseWrite()
}
}
to.CloseRead()
event <- written
}
utils.Debugf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr())

View file

@ -98,6 +98,13 @@ func ResolveRepositoryName(reposName string) (string, string, error) {
return endpoint, reposName, err
}
// VersionInfo is used to model entities which has a version.
// It is basically a tupple with name and version.
type VersionInfo interface {
Name() string
Version() string
}
func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
for _, cookie := range c.Jar.Cookies(req.URL) {
req.AddCookie(cookie)
@ -105,6 +112,20 @@ func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
return c.Do(req)
}
// Set the user agent field in the header based on the versions provided
// in NewRegistry() and extra.
func (r *Registry) setUserAgent(req *http.Request, extra ...VersionInfo) {
if len(r.baseVersions)+len(extra) == 0 {
return
}
if len(extra) == 0 {
req.Header.Set("User-Agent", r.baseVersionsStr)
} else {
req.Header.Set("User-Agent", appendVersions(r.baseVersionsStr, extra...))
}
return
}
// Retrieve the history of a given image from the Registry.
// Return a list of the parent's json (requested image included)
func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
@ -113,6 +134,7 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
r.setUserAgent(req)
res, err := r.client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
@ -159,6 +181,7 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
r.setUserAgent(req)
res, err := r.client.Do(req)
if err != nil {
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
@ -186,6 +209,7 @@ func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (
return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
r.setUserAgent(req)
res, err := r.client.Do(req)
if err != nil {
return nil, err
@ -206,6 +230,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
return nil, err
}
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
r.setUserAgent(req)
res, err := r.client.Do(req)
if err != nil {
return nil, err
@ -244,6 +269,7 @@ func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, e
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
}
req.Header.Set("X-Docker-Token", "true")
r.setUserAgent(req)
res, err := r.client.Do(req)
if err != nil {
@ -307,6 +333,7 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
r.setUserAgent(req)
utils.Debugf("Setting checksum for %s: %s", imgData.ID, imgData.Checksum)
res, err := doWithCookies(r.client, req)
@ -341,6 +368,7 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
r.setUserAgent(req)
res, err := doWithCookies(r.client, req)
if err != nil {
return fmt.Errorf("Failed to upload layer: %s", err)
@ -378,6 +406,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
}
req.Header.Add("Content-type", "application/json")
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
r.setUserAgent(req)
req.ContentLength = int64(len(revision))
res, err := doWithCookies(r.client, req)
if err != nil {
@ -410,6 +439,7 @@ func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJSON))
req.Header.Set("X-Docker-Token", "true")
r.setUserAgent(req)
if validate {
req.Header["X-Docker-Endpoints"] = regs
}
@ -430,6 +460,7 @@ func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData
req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(imgListJSON))
req.Header.Set("X-Docker-Token", "true")
r.setUserAgent(req)
if validate {
req.Header["X-Docker-Endpoints"] = regs
}
@ -536,11 +567,52 @@ type ImgData struct {
}
type Registry struct {
client *http.Client
authConfig *auth.AuthConfig
client *http.Client
authConfig *auth.AuthConfig
baseVersions []VersionInfo
baseVersionsStr string
}
func NewRegistry(root string, authConfig *auth.AuthConfig) (r *Registry, err error) {
func validVersion(version VersionInfo) bool {
stopChars := " \t\r\n/"
if strings.ContainsAny(version.Name(), stopChars) {
return false
}
if strings.ContainsAny(version.Version(), stopChars) {
return false
}
return true
}
// Convert versions to a string and append the string to the string base.
//
// Each VersionInfo will be converted to a string in the format of
// "product/version", where the "product" is get from the Name() method, while
// version is get from the Version() method. Several pieces of verson information
// will be concatinated and separated by space.
func appendVersions(base string, versions ...VersionInfo) string {
if len(versions) == 0 {
return base
}
var buf bytes.Buffer
if len(base) > 0 {
buf.Write([]byte(base))
}
for _, v := range versions {
if !validVersion(v) {
continue
}
buf.Write([]byte(v.Name()))
buf.Write([]byte("/"))
buf.Write([]byte(v.Version()))
buf.Write([]byte(" "))
}
return buf.String()
}
func NewRegistry(root string, authConfig *auth.AuthConfig, baseVersions ...VersionInfo) (r *Registry, err error) {
httpTransport := &http.Transport{
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
@ -553,5 +625,10 @@ func NewRegistry(root string, authConfig *auth.AuthConfig) (r *Registry, err err
},
}
r.client.Jar, err = cookiejar.New(nil)
return r, err
if err != nil {
return nil, err
}
r.baseVersions = baseVersions
r.baseVersionsStr = appendVersions("", baseVersions...)
return r, nil
}

View file

@ -167,12 +167,12 @@ func (runtime *Runtime) Register(container *Container) error {
return nil
}
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst string) error {
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
}
src.AddWriter(log)
src.AddWriter(log, stream)
return nil
}

View file

@ -5,7 +5,6 @@ import (
"fmt"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"log"
"net"
"os"
@ -18,12 +17,12 @@ import (
)
const (
unitTestImageName = "docker-test-image"
unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
unitTestNetworkBridge = "testdockbr0"
unitTestStoreBase = "/var/lib/docker/unit-tests"
testDaemonAddr = "127.0.0.1:4270"
testDaemonProto = "tcp"
unitTestImageName = "docker-test-image"
unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
unitTestNetworkBridge = "testdockbr0"
unitTestStoreBase = "/var/lib/docker/unit-tests"
testDaemonAddr = "127.0.0.1:4270"
testDaemonProto = "tcp"
)
var globalRuntime *Runtime
@ -247,36 +246,13 @@ func TestGet(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
builder := NewBuilder(runtime)
container1, err := builder.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1)
container2, err := builder.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2)
container3, err := builder.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3)
if runtime.Get(container1.ID) != container1 {
@ -431,46 +407,14 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
}
func TestRestore(t *testing.T) {
root, err := ioutil.TempDir("", "docker-test")
if err != nil {
t.Fatal(err)
}
if err := os.Remove(root); err != nil {
t.Fatal(err)
}
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
t.Fatal(err)
}
runtime1, err := NewRuntimeFromDirectory(root, false)
if err != nil {
t.Fatal(err)
}
builder := NewBuilder(runtime1)
runtime1 := mkRuntime(t)
defer nuke(runtime1)
// Create a container with one instance of docker
container1, err := builder.Create(&Config{
Image: GetTestImage(runtime1).ID,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
container2, err := builder.Create(&Config{
Image: GetTestImage(runtime1).ID,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
@ -505,7 +449,7 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime2, err := NewRuntimeFromDirectory(root, false)
runtime2, err := NewRuntimeFromDirectory(runtime1.root, false)
if err != nil {
t.Fatal(err)
}

155
server.go
View file

@ -2,6 +2,7 @@ package docker
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/auth"
@ -18,6 +19,7 @@ import (
"runtime"
"strings"
"sync"
"time"
)
func (srv *Server) DockerVersion() APIVersion {
@ -28,11 +30,53 @@ func (srv *Server) DockerVersion() APIVersion {
}
}
// simpleVersionInfo is a simple implementation of
// the interface VersionInfo, which is used
// to provide version information for some product,
// component, etc. It stores the product name and the version
// in string and returns them on calls to Name() and Version().
type simpleVersionInfo struct {
name string
version string
}
func (v *simpleVersionInfo) Name() string {
return v.name
}
func (v *simpleVersionInfo) Version() string {
return v.version
}
// versionCheckers() returns version informations of:
// docker, go, git-commit (of the docker) and the host's kernel.
//
// Such information will be used on call to NewRegistry().
func (srv *Server) versionInfos() []registry.VersionInfo {
v := srv.DockerVersion()
ret := make([]registry.VersionInfo, 0, 4)
ret = append(ret, &simpleVersionInfo{"docker", v.Version})
if len(v.GoVersion) > 0 {
ret = append(ret, &simpleVersionInfo{"go", v.GoVersion})
}
if len(v.GitCommit) > 0 {
ret = append(ret, &simpleVersionInfo{"git-commit", v.GitCommit})
}
kernelVersion, err := utils.GetKernelVersion()
if err == nil {
ret = append(ret, &simpleVersionInfo{"kernel", kernelVersion.String()})
}
return ret
}
func (srv *Server) ContainerKill(name string) error {
if container := srv.runtime.Get(name); container != nil {
if err := container.Kill(); err != nil {
return fmt.Errorf("Error restarting container %s: %s", name, err)
return fmt.Errorf("Error killing container %s: %s", name, err)
}
srv.LogEvent("kill", name)
} else {
return fmt.Errorf("No such container: %s", name)
}
@ -51,13 +95,14 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
if _, err := io.Copy(out, data); err != nil {
return err
}
srv.LogEvent("export", name)
return nil
}
return fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ImagesSearch(term string) ([]APISearch, error) {
r, err := registry.NewRegistry(srv.runtime.root, nil)
r, err := registry.NewRegistry(srv.runtime.root, nil, srv.versionInfos()...)
if err != nil {
return nil, err
}
@ -208,13 +253,14 @@ func (srv *Server) DockerInfo() *APIInfo {
imgcount = len(images)
}
return &APIInfo{
Containers: len(srv.runtime.List()),
Images: imgcount,
MemoryLimit: srv.runtime.capabilities.MemoryLimit,
SwapLimit: srv.runtime.capabilities.SwapLimit,
Debug: os.Getenv("DEBUG") != "",
NFd: utils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
Containers: len(srv.runtime.List()),
Images: imgcount,
MemoryLimit: srv.runtime.capabilities.MemoryLimit,
SwapLimit: srv.runtime.capabilities.SwapLimit,
Debug: os.Getenv("DEBUG") != "",
NFd: utils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
NEventsListener: len(srv.events),
}
}
@ -509,7 +555,7 @@ func (srv *Server) poolRemove(kind, key string) error {
}
func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
r, err := registry.NewRegistry(srv.runtime.root, authConfig)
r, err := registry.NewRegistry(srv.runtime.root, authConfig, srv.versionInfos()...)
if err != nil {
return err
}
@ -726,7 +772,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo
out = utils.NewWriteFlusher(out)
img, err := srv.runtime.graph.Get(localName)
r, err2 := registry.NewRegistry(srv.runtime.root, authConfig)
r, err2 := registry.NewRegistry(srv.runtime.root, authConfig, srv.versionInfos()...)
if err2 != nil {
return err2
}
@ -812,6 +858,7 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
}
return "", err
}
srv.LogEvent("create", container.ShortID())
return container.ShortID(), nil
}
@ -820,6 +867,7 @@ func (srv *Server) ContainerRestart(name string, t int) error {
if err := container.Restart(t); err != nil {
return fmt.Errorf("Error restarting container %s: %s", name, err)
}
srv.LogEvent("restart", name)
} else {
return fmt.Errorf("No such container: %s", name)
}
@ -839,6 +887,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
if err := srv.runtime.Destroy(container); err != nil {
return fmt.Errorf("Error destroying container %s: %s", name, err)
}
srv.LogEvent("destroy", name)
if removeVolume {
// Retrieve all volumes from all remaining containers
@ -873,7 +922,6 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
if len(srv.runtime.repositories.ByID()[id]) != 0 {
return ErrImageReferenced
}
// If the image is not referenced but has children, go recursive
referenced := false
byParents, err := srv.runtime.graph.ByParent()
@ -906,6 +954,7 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
return err
}
*imgs = append(*imgs, APIRmi{Deleted: utils.TruncateID(id)})
srv.LogEvent("delete", utils.TruncateID(id))
return nil
}
return nil
@ -927,14 +976,29 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
}
func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
//Untag the current image
imgs := []APIRmi{}
//If delete by id, see if the id belong only to one repository
if strings.Contains(img.ID, repoName) && tag == "" {
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
parsedRepo := strings.Split(repoAndTag, ":")[0]
if strings.Contains(img.ID, repoName) {
repoName = parsedRepo
} else if repoName != parsedRepo {
// the id belongs to multiple repos, like base:latest and user:test,
// in that case return conflict
return imgs, nil
}
}
}
//Untag the current image
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
if err != nil {
return nil, err
}
if tagDeleted {
imgs = append(imgs, APIRmi{Untagged: img.ShortID()})
srv.LogEvent("untag", img.ShortID())
}
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
@ -1007,6 +1071,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
if err := container.Start(hostConfig); err != nil {
return fmt.Errorf("Error starting container %s: %s", name, err)
}
srv.LogEvent("start", name)
} else {
return fmt.Errorf("No such container: %s", name)
}
@ -1018,6 +1083,7 @@ func (srv *Server) ContainerStop(name string, t int) error {
if err := container.Stop(t); err != nil {
return fmt.Errorf("Error stopping container %s: %s", name, err)
}
srv.LogEvent("stop", name)
} else {
return fmt.Errorf("No such container: %s", name)
}
@ -1045,20 +1111,41 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
}
//logs
if logs {
if stdout {
cLog, err := container.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
cLog, err := container.ReadLog("json")
if err != nil && os.IsNotExist(err) {
// Legacy logs
utils.Debugf("Old logs format")
if stdout {
cLog, err := container.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
}
}
}
if stderr {
cLog, err := container.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
if stderr {
cLog, err := container.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
}
}
} else if err != nil {
utils.Debugf("Error reading logs (json): %s", err)
} else {
dec := json.NewDecoder(cLog)
for {
var l utils.JSONLog
if err := dec.Decode(&l); err == io.EOF {
break
} else if err != nil {
utils.Debugf("Error streaming logs: %s", err)
break
}
if (l.Stream == "stdout" && stdout) || (l.Stream == "stderr" && stderr) {
fmt.Fprintf(out, "%s", l.Log)
}
}
}
}
@ -1130,15 +1217,31 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
enableCors: enableCors,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
listeners: make(map[string]chan utils.JSONMessage),
}
runtime.srv = srv
return srv, nil
}
func (srv *Server) LogEvent(action, id string) {
now := time.Now().Unix()
jm := utils.JSONMessage{Status: action, ID: id, Time: now}
srv.events = append(srv.events, jm)
for _, c := range srv.listeners {
select { // non blocking channel
case c <- jm:
default:
}
}
}
type Server struct {
sync.Mutex
runtime *Runtime
enableCors bool
pullingPool map[string]struct{}
pushingPool map[string]struct{}
events []utils.JSONMessage
listeners map[string]chan utils.JSONMessage
}

View file

@ -1,6 +1,7 @@
package docker
import (
"github.com/dotcloud/docker/utils"
"testing"
"time"
)
@ -251,3 +252,40 @@ func TestPools(t *testing.T) {
t.Fatalf("Expected `Unkown pool type`")
}
}
func TestLogEvent(t *testing.T) {
runtime := mkRuntime(t)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}

View file

@ -2,6 +2,7 @@ import os
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.timed import Nightly
from buildbot.changes import filter
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
@ -40,12 +41,16 @@ c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
c['schedulers'].append(SingleBranchScheduler(name="all",
change_filter=filter.ChangeFilter(branch='master'),treeStableTimer=None,
builderNames=[BUILDER_NAME]))
# Schedulers
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
'coverage'])]
c['schedulers'] += [SingleBranchScheduler(name="all",
change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
builderNames=[BUILDER_NAME])]
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage'],
hour=0, minute=30)]
# Builder
# Builders
factory = BuildFactory()
factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
command=["sh", "-c", Interpolate("cd ..; rm -rf build; export GOPATH={0}; "
@ -53,6 +58,16 @@ factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
"go test -v".format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH))]))
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
factory=factory)]
# Docker coverage test
coverage_cmd = ('GOPATH=`pwd` go get -d github.com/dotcloud/docker\n'
'GOPATH=`pwd` go get github.com/axw/gocov/gocov\n'
'sudo -E GOPATH=`pwd` ./bin/gocov test github.com/dotcloud/docker | '
'./bin/gocov report')
factory = BuildFactory()
factory.addStep(ShellCommand(description='Coverage',logEnviron=False,usePTY=True,
command=coverage_cmd))
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
factory=factory)]
# Status
authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),

View file

@ -85,7 +85,7 @@ func MergeConfig(userConf, imageConf *Config) {
imageNat, _ := parseNat(imagePortSpec)
for _, userPortSpec := range userConf.PortSpecs {
userNat, _ := parseNat(userPortSpec)
if imageNat.Proto == userNat.Proto && imageNat.Frontend == userNat.Frontend {
if imageNat.Proto == userNat.Proto && imageNat.Backend == userNat.Backend {
found = true
}
}

View file

@ -248,30 +248,54 @@ func (r *bufReader) Close() error {
type WriteBroadcaster struct {
sync.Mutex
writers map[io.WriteCloser]struct{}
buf *bytes.Buffer
writers map[StreamWriter]bool
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
type StreamWriter struct {
wc io.WriteCloser
stream string
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
w.Lock()
w.writers[writer] = struct{}{}
sw := StreamWriter{wc: writer, stream: stream}
w.writers[sw] = true
w.Unlock()
}
// FIXME: Is that function used?
// FIXME: This relies on the concrete writer type used having equality operator
func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
w.Lock()
delete(w.writers, writer)
w.Unlock()
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
for writer := range w.writers {
if n, err := writer.Write(p); err != nil || n != len(p) {
w.buf.Write(p)
for sw := range w.writers {
lp := p
if sw.stream != "" {
lp = nil
for {
line, err := w.buf.ReadString('\n')
if err != nil {
w.buf.Write([]byte(line))
break
}
b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now()})
if err != nil {
// On error, evict the writer
delete(w.writers, sw)
continue
}
lp = append(lp, b...)
}
}
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
// On error, evict the writer
delete(w.writers, writer)
delete(w.writers, sw)
}
}
return len(p), nil
@ -280,15 +304,15 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
func (w *WriteBroadcaster) CloseWriters() error {
w.Lock()
defer w.Unlock()
for writer := range w.writers {
writer.Close()
for sw := range w.writers {
sw.wc.Close()
}
w.writers = make(map[io.WriteCloser]struct{})
w.writers = make(map[StreamWriter]bool)
return nil
}
func NewWriteBroadcaster() *WriteBroadcaster {
return &WriteBroadcaster{writers: make(map[io.WriteCloser]struct{})}
return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
}
func GetTotalUsedFds() int {
@ -587,8 +611,27 @@ type JSONMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
Error string `json:"error,omitempty"`
ID string `json:"id,omitempty"`
Time int64 `json:"time,omitempty"`
}
func (jm *JSONMessage) Display(out io.Writer) (error) {
if jm.Time != 0 {
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
}
if jm.Progress != "" {
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
} else if jm.Error != "" {
return fmt.Errorf(jm.Error)
} else if jm.ID != "" {
fmt.Fprintf(out, "%s: %s\n", jm.ID, jm.Status)
} else {
fmt.Fprintf(out, "%s\n", jm.Status)
}
return nil
}
type StreamFormatter struct {
json bool
used bool

View file

@ -60,9 +60,9 @@ func TestWriteBroadcaster(t *testing.T) {
// Test 1: Both bufferA and bufferB should contain "foo"
bufferA := &dummyWriter{}
writer.AddWriter(bufferA)
writer.AddWriter(bufferA, "")
bufferB := &dummyWriter{}
writer.AddWriter(bufferB)
writer.AddWriter(bufferB, "")
writer.Write([]byte("foo"))
if bufferA.String() != "foo" {
@ -76,7 +76,7 @@ func TestWriteBroadcaster(t *testing.T) {
// Test2: bufferA and bufferB should contain "foobar",
// while bufferC should only contain "bar"
bufferC := &dummyWriter{}
writer.AddWriter(bufferC)
writer.AddWriter(bufferC, "")
writer.Write([]byte("bar"))
if bufferA.String() != "foobar" {
@ -91,35 +91,22 @@ func TestWriteBroadcaster(t *testing.T) {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Test3: Test removal
writer.RemoveWriter(bufferB)
writer.Write([]byte("42"))
if bufferA.String() != "foobar42" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferB.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferB.String())
}
if bufferC.String() != "bar42" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Test4: Test eviction on failure
// Test3: Test eviction on failure
bufferA.failOnWrite = true
writer.Write([]byte("fail"))
if bufferA.String() != "foobar42" {
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "bar42fail" {
if bufferC.String() != "barfail" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Even though we reset the flag, no more writes should go in there
bufferA.failOnWrite = false
writer.Write([]byte("test"))
if bufferA.String() != "foobar42" {
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "bar42failtest" {
if bufferC.String() != "barfailtest" {
t.Errorf("Buffer contains %v", bufferC.String())
}
@ -141,7 +128,7 @@ func TestRaceWriteBroadcaster(t *testing.T) {
writer := NewWriteBroadcaster()
c := make(chan bool)
go func() {
writer.AddWriter(devNullCloser(0))
writer.AddWriter(devNullCloser(0), "")
c <- true
}()
writer.Write([]byte("hello"))

View file

@ -84,20 +84,28 @@ func readFile(src string, t *testing.T) (content string) {
}
// Create a test container from the given runtime `r` and run arguments `args`.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConfig) {
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConfig, error) {
config, hostConfig, _, err := ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
t.Fatal(err)
return nil, nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
config.Image = GetTestImage(r).ID
c, err := NewBuilder(r).Create(config)
if err != nil {
t.Fatal(err)
return nil, nil, err
}
return c, hostConfig
return c, hostConfig, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
@ -110,7 +118,10 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
t.Fatal(err)
}
}()
container, hostConfig := mkContainer(r, args, t)
container, hostConfig, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
@ -202,7 +213,7 @@ func TestMergeConfig(t *testing.T) {
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"2222:3333", "3333:3333"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
@ -219,11 +230,11 @@ func TestMergeConfig(t *testing.T) {
}
if len(configUser.PortSpecs) != 3 {
t.Fatalf("Expected 3 portSpecs, 1111:1111, 2222:3333 and 3333:3333, found %d", len(configUser.PortSpecs))
t.Fatalf("Expected 3 portSpecs, 1111:1111, 3333:2222 and 3333:3333, found %d", len(configUser.PortSpecs))
}
for _, portSpecs := range configUser.PortSpecs {
if portSpecs != "1111:1111" && portSpecs != "2222:3333" && portSpecs != "3333:3333" {
t.Fatalf("Expected 1111:1111 or 2222:3333 or 3333:3333, found %s", portSpecs)
if portSpecs != "1111:1111" && portSpecs != "3333:2222" && portSpecs != "3333:3333" {
t.Fatalf("Expected 1111:1111 or 3333:2222 or 3333:3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
@ -248,3 +259,45 @@ func TestMergeConfig(t *testing.T) {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
}
func TestMergeConfigPublicPortNotHonored(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111", "2222"},
Env: []string{"VAR1=1", "VAR2=2"},
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"1111:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
MergeConfig(configUser, configImage)
contains := func(a []string, expect string) bool {
for _, p := range a {
if p == expect {
return true
}
}
return false
}
if !contains(configUser.PortSpecs, "2222") {
t.Logf("Expected '2222' Ports: %v", configUser.PortSpecs)
t.Fail()
}
if !contains(configUser.PortSpecs, "1111:3333") {
t.Logf("Expected '1111:3333' Ports: %v", configUser.PortSpecs)
t.Fail()
}
}