Merge branch 'master' of github.com:docker/docker into kill

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)
This commit is contained in:
Dan Walsh 2015-04-29 15:05:20 -04:00
commit d0a4f310ef
157 changed files with 2861 additions and 1733 deletions

View file

@ -409,6 +409,8 @@ made through a pull request.
"fredlf",
"james",
"moxiegirl",
"thaJeztah",
"jamtur01",
"spf13",
"sven"
]

View file

@ -96,20 +96,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
} else {
root := cmd.Arg(0)
if urlutil.IsGitURL(root) {
remoteURL := cmd.Arg(0)
if !urlutil.IsGitTransport(remoteURL) {
remoteURL = "https://" + remoteURL
}
root, err = ioutil.TempDir("", "docker-build-git")
root, err = utils.GitClone(root)
if err != nil {
return err
}
defer os.RemoveAll(root)
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
}
}
if _, err := os.Stat(root); err != nil {
return err
@ -182,7 +173,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
includes = append(includes, ".dockerignore", *dockerfileName)
}
if err = utils.ValidateContextDirectory(root, excludes); err != nil {
if err := utils.ValidateContextDirectory(root, excludes); err != nil {
return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
}
options := &archive.TarOptions{

View file

@ -31,8 +31,7 @@ func (cli *DockerCli) CmdDiff(args ...string) error {
}
changes := []types.ContainerChange{}
err = json.NewDecoder(rdr).Decode(&changes)
if err != nil {
if err := json.NewDecoder(rdr).Decode(&changes); err != nil {
return err
}

View file

@ -3,7 +3,6 @@ package client
import (
"errors"
"io"
"net/url"
"os"
flag "github.com/docker/docker/pkg/mflag"
@ -34,19 +33,9 @@ func (cli *DockerCli) CmdExport(args ...string) error {
return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
}
if len(cmd.Args()) == 1 {
image := cmd.Arg(0)
if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
return err
}
} else {
v := url.Values{}
for _, arg := range cmd.Args() {
v.Add("names", arg)
}
if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil {
return err
}
image := cmd.Arg(0)
if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
return err
}
return nil

View file

@ -30,8 +30,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
}
history := []types.ImageHistory{}
err = json.NewDecoder(rdr).Decode(&history)
if err != nil {
if err := json.NewDecoder(rdr).Decode(&history); err != nil {
return err
}

View file

@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"net/url"
"strings"
"text/tabwriter"
"time"
@ -18,74 +17,6 @@ import (
"github.com/docker/docker/utils"
)
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) walkTree(noTrunc bool, images []*types.Image, byParent map[string][]*types.Image, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *types.Image, prefix string)) {
length := len(images)
if length > 1 {
for index, image := range images {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.ID]; exists {
cli.walkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"\u251C─")
if subimages, exists := byParent[image.ID]; exists {
cli.walkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
}
}
}
} else {
for _, image := range images {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.ID]; exists {
cli.walkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
}
}
}
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printVizNode(noTrunc bool, image *types.Image, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.ID
parentID = image.ParentId
} else {
imageID = stringid.TruncateID(image.ID)
parentID = stringid.TruncateID(image.ParentId)
}
if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.RepoTags[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.RepoTags, "\\n"))
}
}
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printTreeNode(noTrunc bool, image *types.Image, prefix string) {
var imageID string
if noTrunc {
imageID = image.ID
} else {
imageID = stringid.TruncateID(image.ID)
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.VirtualSize)))
if image.RepoTags[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified.
//
// Usage: docker images [OPTIONS] [REPOSITORY]
@ -95,9 +26,6 @@ func (cli *DockerCli) CmdImages(args ...string) error {
all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests")
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
flFilter := opts.NewListOpts(nil)
cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
@ -116,158 +44,83 @@ func (cli *DockerCli) CmdImages(args ...string) error {
}
matchName := cmd.Arg(0)
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
if *flViz || *flTree {
v := url.Values{
"all": []string{"1"},
}
if len(imageFilterArgs) > 0 {
filterJSON, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJSON)
}
rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil)
v := url.Values{}
if len(imageFilterArgs) > 0 {
filterJSON, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJSON)
}
images := []types.Image{}
err = json.NewDecoder(rdr).Decode(&images)
if err != nil {
return err
}
if cmd.NArg() == 1 {
// FIXME rename this parameter, to not be confused with the filters flag
v.Set("filter", matchName)
}
if *all {
v.Set("all", "1")
}
var (
printNode func(cli *DockerCli, noTrunc bool, image *types.Image, prefix string)
startImage *types.Image
rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil)
if err != nil {
return err
}
roots = []*types.Image{}
byParent = make(map[string][]*types.Image)
)
images := []types.Image{}
if err := json.NewDecoder(rdr).Decode(&images); err != nil {
return err
}
for _, image := range images {
if image.ParentId == "" {
roots = append(roots, &image)
} else {
if children, exists := byParent[image.ParentId]; exists {
children = append(children, &image)
} else {
byParent[image.ParentId] = []*types.Image{&image}
}
}
if matchName != "" {
if matchName == image.ID || matchName == stringid.TruncateID(image.ID) {
startImage = &image
}
for _, repotag := range image.RepoTags {
if repotag == matchName {
startImage = &image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
if *showDigests {
fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
} else {
printNode = (*DockerCli).printTreeNode
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
}
for _, image := range images {
ID := image.ID
if !*noTrunc {
ID = stringid.TruncateID(ID)
}
if startImage != nil {
root := []*types.Image{startImage}
cli.walkTree(*noTrunc, root, byParent, "", printNode)
} else if matchName == "" {
cli.walkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if len(imageFilterArgs) > 0 {
filterJSON, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJSON)
repoTags := image.RepoTags
repoDigests := image.RepoDigests
if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
// dangling image - clear out either repoTags or repoDigsts so we only show it once below
repoDigests = []string{}
}
if cmd.NArg() == 1 {
// FIXME rename this parameter, to not be confused with the filters flag
v.Set("filter", matchName)
}
if *all {
v.Set("all", "1")
}
rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil)
if err != nil {
return err
}
images := []types.Image{}
err = json.NewDecoder(rdr).Decode(&images)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
if *showDigests {
fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
// combine the tags and digests lists
tagsAndDigests := append(repoTags, repoDigests...)
for _, repoAndRef := range tagsAndDigests {
repo, ref := parsers.ParseRepositoryTag(repoAndRef)
// default tag and digest to none - if there's a value, it'll be set below
tag := "<none>"
digest := "<none>"
if utils.DigestReference(ref) {
digest = ref
} else {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
}
for _, image := range images {
ID := image.ID
if !*noTrunc {
ID = stringid.TruncateID(ID)
tag = ref
}
repoTags := image.RepoTags
repoDigests := image.RepoDigests
if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
// dangling image - clear out either repoTags or repoDigsts so we only show it once below
repoDigests = []string{}
}
// combine the tags and digests lists
tagsAndDigests := append(repoTags, repoDigests...)
for _, repoAndRef := range tagsAndDigests {
repo, ref := parsers.ParseRepositoryTag(repoAndRef)
// default tag and digest to none - if there's a value, it'll be set below
tag := "<none>"
digest := "<none>"
if utils.DigestReference(ref) {
digest = ref
if !*quiet {
if *showDigests {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
} else {
tag = ref
}
if !*quiet {
if *showDigests {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
} else {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
}
} else {
fmt.Fprintln(w, ID)
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
}
} else {
fmt.Fprintln(w, ID)
}
}
}
if !*quiet {
w.Flush()
}
if !*quiet {
w.Flush()
}
return nil
}

View file

@ -34,7 +34,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
indented.WriteString("[\n")
status := 0
isImage := false

View file

@ -92,8 +92,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
}
containers := []types.Container{}
err = json.NewDecoder(rdr).Decode(&containers)
if err != nil {
if err := json.NewDecoder(rdr).Decode(&containers); err != nil {
return err
}

View file

@ -1,20 +1,19 @@
package client
import "fmt"
import (
"fmt"
flag "github.com/docker/docker/pkg/mflag"
)
// CmdRename renames a container.
//
// Usage: docker rename OLD_NAME NEW_NAME
func (cli *DockerCli) CmdRename(args ...string) error {
cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true)
if err := cmd.Parse(args); err != nil {
return nil
}
cmd.Require(flag.Exact, 2)
cmd.ParseFlags(args, true)
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
oldName := cmd.Arg(0)
newName := cmd.Arg(1)

View file

@ -37,8 +37,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
dels := []types.ImageDelete{}
err = json.NewDecoder(rdr).Decode(&dels)
if err != nil {
if err := json.NewDecoder(rdr).Decode(&dels); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue

View file

@ -51,8 +51,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
}
results := ByStars{}
err = json.NewDecoder(rdr).Decode(&results)
if err != nil {
if err := json.NewDecoder(rdr).Decode(&results); err != nil {
return err
}

View file

@ -31,8 +31,7 @@ func (cli *DockerCli) CmdTop(args ...string) error {
}
procList := types.ContainerProcessList{}
err = json.NewDecoder(stream).Decode(&procList)
if err != nil {
if err := json.NewDecoder(stream).Decode(&procList); err != nil {
return err
}

View file

@ -107,8 +107,7 @@ func MatchesContentType(contentType, expectedType string) bool {
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
// otherwise generates a new one
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700)
if err != nil {
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
return nil, err
}
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)

View file

@ -277,8 +277,7 @@ func (s *Server) postContainersKill(eng *engine.Engine, version version.Version,
if vars == nil {
return fmt.Errorf("Missing parameter")
}
err := parseForm(r)
if err != nil {
if err := parseForm(r); err != nil {
return err
}
@ -289,7 +288,7 @@ func (s *Server) postContainersKill(eng *engine.Engine, version version.Version,
if sigStr := r.Form.Get("signal"); sigStr != "" {
// Check if we passed the signal as a number:
// The largest legal signal is 31, so let's parse on 5 bits
sig, err = strconv.ParseUint(sigStr, 10, 5)
sig, err := strconv.ParseUint(sigStr, 10, 5)
if err != nil {
// The signal is not a number, treat it as a string (either like
// "KILL" or like "SIGKILL")
@ -301,7 +300,7 @@ func (s *Server) postContainersKill(eng *engine.Engine, version version.Version,
}
}
if err = s.daemon.ContainerKill(name, sig); err != nil {
if err := s.daemon.ContainerKill(name, sig); err != nil {
return err
}
@ -406,15 +405,6 @@ func (s *Server) getImagesJSON(eng *engine.Engine, version version.Version, w ht
return writeJSON(w, http.StatusOK, legacyImages)
}
func (s *Server) getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version.GreaterThan("1.6") {
w.WriteHeader(http.StatusNotFound)
return fmt.Errorf("This is now implemented in the client.")
}
eng.ServeHTTP(w, r)
return nil
}
func (s *Server) getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
@ -1589,7 +1579,6 @@ func createRouter(s *Server, eng *engine.Engine) *mux.Router {
"/info": s.getInfo,
"/version": s.getVersion,
"/images/json": s.getImagesJSON,
"/images/viz": s.getImagesViz,
"/images/search": s.getImagesSearch,
"/images/get": s.getImagesGet,
"/images/{name:.*}/get": s.getImagesGet,

View file

@ -32,7 +32,6 @@ import (
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/pkg/urlutil"
@ -148,8 +147,15 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
// do the copy (e.g. hash value if cached). Don't actually do
// the copy until we've looked at all src files
for _, orig := range args[0 : len(args)-1] {
err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
if err != nil {
if err := calcCopyInfo(
b,
cmdName,
&copyInfos,
orig,
dest,
allowRemote,
allowDecompression,
); err != nil {
return err
}
}
@ -483,7 +489,7 @@ func (b *Builder) processImageFrom(img *imagepkg.Image) error {
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
}
// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
onBuildTriggers := b.Config.OnBuild
b.Config.OnBuild = []string{}
@ -646,14 +652,12 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
err error
destExists = true
origPath = path.Join(b.contextPath, orig)
destPath = path.Join(container.RootfsPath(), dest)
destPath string
)
if destPath != container.RootfsPath() {
destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
if err != nil {
return err
}
destPath, err = container.GetResourcePath(dest)
if err != nil {
return err
}
// Preserve the trailing '/'

View file

@ -6,7 +6,6 @@ import (
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"sync"
@ -22,6 +21,7 @@ import (
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
// whitelist of commands allowed for a commit/import
@ -106,19 +106,12 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
if buildConfig.RemoteURL == "" {
context = ioutil.NopCloser(buildConfig.Context)
} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
if !urlutil.IsGitTransport(buildConfig.RemoteURL) {
buildConfig.RemoteURL = "https://" + buildConfig.RemoteURL
}
root, err := ioutil.TempDir("", "docker-build-git")
root, err := utils.GitClone(buildConfig.RemoteURL)
if err != nil {
return err
}
defer os.RemoveAll(root)
if output, err := exec.Command("git", "clone", "--recursive", buildConfig.RemoteURL, root).CombinedOutput(); err != nil {
return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
}
c, err := archive.Tar(root, archive.Uncompressed)
if err != nil {
return err

View file

@ -233,7 +233,7 @@ func parseString(rest string) (*Node, map[string]bool, error) {
// parseJSON converts JSON arrays to an AST.
func parseJSON(rest string) (*Node, map[string]bool, error) {
var myJson []interface{}
if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJson); err != nil {
return nil, nil, err
}

View file

@ -166,8 +166,7 @@ func (configFile *ConfigFile) Save() error {
return err
}
err = ioutil.WriteFile(configFile.filename, data, 0600)
if err != nil {
if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil {
return err
}

View file

@ -1151,6 +1151,7 @@ _docker() {
--dns
--dns-search
--exec-driver -e
--exec-opt
--fixed-cidr
--fixed-cidr-v6
--graph -g

View file

@ -51,6 +51,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options'
complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'

View file

@ -125,7 +125,7 @@ func main() {
err = devices.ResizePool(size)
if err != nil {
fmt.Println("Error resizeing pool: ", err)
fmt.Println("Error resizing pool: ", err)
os.Exit(1)
}

View file

@ -7,6 +7,7 @@ DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log}
DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid}
DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker}
DOCKER_OPTS=${DOCKER_OPTS:-}
UNSHARE_BINARY=${UNSHARE_BINARY:-/usr/bin/unshare}
start() {
checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
@ -16,11 +17,12 @@ start() {
ebegin "Starting docker daemon"
start-stop-daemon --start --background \
--exec "$DOCKER_BINARY" \
--exec "$UNSHARE_BINARY" \
--pidfile "$DOCKER_PIDFILE" \
--stdout "$DOCKER_LOGFILE" \
--stderr "$DOCKER_LOGFILE" \
-- -d -p "$DOCKER_PIDFILE" \
-- --mount \
-- "$DOCKER_BINARY" -d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS
eend $?
}

View file

@ -30,6 +30,7 @@ DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
UNSHARE=${UNSHARE:-/usr/bin/unshare}
# Get lsb functions
. /lib/lsb/init-functions
@ -99,11 +100,11 @@ case "$1" in
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
--exec "$UNSHARE" \
--pidfile "$DOCKER_SSD_PIDFILE" \
--make-pidfile \
-- \
-d -p "$DOCKER_PIDFILE" \
-- --mount \
-- "$DOCKER" -d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
>> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?

View file

@ -37,7 +37,7 @@ script
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$DOCKER" -d $DOCKER_OPTS
exec unshare -m -- "$DOCKER" -d $DOCKER_OPTS
end script
# Don't emit "started" event until docker.sock is ready.

View file

@ -29,6 +29,7 @@ type Config struct {
GraphDriver string
GraphOptions []string
ExecDriver string
ExecOptions []string
Mtu int
SocketGroup string
EnableCors bool
@ -70,6 +71,7 @@ func (config *Config) InstallFlags() {
flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
opts.IPVar(&config.Bridge.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
// FIXME: why the inconsistency between "hosts" and "sockets"?
opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")

View file

@ -149,8 +149,7 @@ func (container *Container) toDisk() error {
return err
}
err = ioutil.WriteFile(pth, data, 0666)
if err != nil {
if err := ioutil.WriteFile(pth, data, 0666); err != nil {
return err
}
@ -211,12 +210,37 @@ func (container *Container) LogEvent(action string) {
)
}
func (container *Container) getResourcePath(path string) (string, error) {
// Evaluates `path` in the scope of the container's basefs, with proper path
// sanitisation. Symlinks are all scoped to the basefs of the container, as
// though the container's basefs was `/`.
//
// The basefs of a container is the host-facing path which is bind-mounted as
// `/` inside the container. This method is essentially used to access a
// particular path inside the container as though you were a process in that
// container.
//
// NOTE: The returned path is *only* safely scoped inside the container's basefs
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (container *Container) GetResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
}
func (container *Container) getRootResourcePath(path string) (string, error) {
// Evaluates `path` in the scope of the container's root, with proper path
// sanitisation. Symlinks are all scoped to the root of the container, as
// though the container's root was `/`.
//
// The root of a container is the host-facing configuration metadata directory.
// Only use this method to safely access the container's `container.json` or
// other metadata files. If in doubt, use container.GetResourcePath.
//
// NOTE: The returned path is *only* safely scoped inside the container's root
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (container *Container) GetRootResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
}
@ -515,7 +539,7 @@ func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser {
}
func (container *Container) buildHostnameFile() error {
hostnamePath, err := container.getRootResourcePath("hostname")
hostnamePath, err := container.GetRootResourcePath("hostname")
if err != nil {
return err
}
@ -529,7 +553,7 @@ func (container *Container) buildHostnameFile() error {
func (container *Container) buildHostsFiles(IP string) error {
hostsPath, err := container.getRootResourcePath("hosts")
hostsPath, err := container.GetRootResourcePath("hosts")
if err != nil {
return err
}
@ -759,7 +783,7 @@ func (container *Container) Unpause() error {
func (container *Container) Kill() error {
if !container.IsRunning() {
return nil
return fmt.Errorf("Container %s is not running", container.ID)
}
// 1. Send SIGKILL
@ -895,7 +919,7 @@ func (container *Container) Unmount() error {
}
func (container *Container) logPath(name string) (string, error) {
return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
return container.GetRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
@ -907,11 +931,11 @@ func (container *Container) ReadLog(name string) (io.Reader, error) {
}
func (container *Container) hostConfigPath() (string, error) {
return container.getRootResourcePath("hostconfig.json")
return container.GetRootResourcePath("hostconfig.json")
}
func (container *Container) jsonPath() (string, error) {
return container.getRootResourcePath("config.json")
return container.GetRootResourcePath("config.json")
}
// This method must be exported to be used from the lxc template
@ -981,7 +1005,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
}
}()
basePath, err := container.getResourcePath(resource)
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
@ -1083,7 +1107,7 @@ func (container *Container) setupContainerDns() error {
if err != nil {
return err
}
container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
if err != nil {
return err
}
@ -1244,7 +1268,7 @@ func (container *Container) initializeNetworking() error {
return err
}
hostsPath, err := container.getRootResourcePath("hosts")
hostsPath, err := container.GetRootResourcePath("hosts")
if err != nil {
return err
}
@ -1375,7 +1399,7 @@ func (container *Container) setupWorkingDirectory() error {
if container.Config.WorkingDir != "" {
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
pth, err := container.getResourcePath(container.Config.WorkingDir)
pth, err := container.GetResourcePath(container.Config.WorkingDir)
if err != nil {
return err
}
@ -1515,6 +1539,9 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
if err != nil {
return nil, err
}
if container == nc {
return nil, fmt.Errorf("cannot join own network")
}
if !nc.IsRunning() {
return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
}

View file

@ -225,7 +225,6 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
if container.IsRunning() {
logrus.Debugf("killing old running container %s", container.ID)
existingPid := container.Pid
container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
// We only have to handle this for lxc because the other drivers will ensure that
@ -237,11 +236,6 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
cmd := &execdriver.Command{
ID: container.ID,
}
var err error
cmd.ProcessConfig.Process, err = os.FindProcess(existingPid)
if err != nil {
logrus.Debugf("cannot find existing process for %d", existingPid)
}
daemon.execDriver.Terminate(cmd)
}
@ -829,7 +823,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
// Load storage driver
driver, err := graphdriver.New(config.Root, config.GraphOptions)
if err != nil {
return nil, fmt.Errorf("error intializing graphdriver: %v", err)
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
}
logrus.Debugf("Using graph driver %s", driver)
// register cleanup for graph driver
@ -948,7 +942,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
sysInfo := sysinfo.New(false)
const runDir = "/var/run/docker"
ed, err := execdrivers.NewDriver(config.ExecDriver, runDir, config.Root, sysInitPath, sysInfo)
ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, runDir, config.Root, sysInitPath, sysInfo)
if err != nil {
return nil, err
}
@ -1187,8 +1181,7 @@ func tempDir(rootDir string) (string, error) {
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
}
err := os.MkdirAll(tmpDir, 0700)
return tmpDir, err
return tmpDir, os.MkdirAll(tmpDir, 0700)
}
func checkKernel() error {

View file

@ -129,6 +129,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
if err != nil && forceRemove {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
os.RemoveAll(container.root)
}
}()

View file

@ -214,8 +214,7 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
// the exitStatus) even after the cmd is done running.
go func() {
err := container.Exec(execConfig)
if err != nil {
if err := container.Exec(execConfig); err != nil {
execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
}
}()

View file

@ -10,7 +10,7 @@ import (
"github.com/docker/docker/pkg/sysinfo"
)
func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
switch name {
case "lxc":
// we want to give the lxc driver the full docker root because it needs
@ -18,7 +18,7 @@ func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (
// to be backwards compatible
return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
case "native":
return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options)
}
return nil, fmt.Errorf("unknown exec driver %s", name)
}

View file

@ -4,7 +4,6 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
@ -107,12 +106,13 @@ func getArgs() *InitArgs {
func setupEnv(args *InitArgs) error {
// Get env
var env []string
content, err := ioutil.ReadFile(".dockerenv")
dockerenv, err := os.Open(".dockerenv")
if err != nil {
return fmt.Errorf("Unable to load environment variables: %v", err)
}
if err := json.Unmarshal(content, &env); err != nil {
return fmt.Errorf("Unable to unmarshal environment variables: %v", err)
defer dockerenv.Close()
if err := json.NewDecoder(dockerenv).Decode(&env); err != nil {
return fmt.Errorf("Unable to decode environment variables: %v", err)
}
// Propagate the plugin-specific container env variable
env = append(env, "container="+os.Getenv("container"))

View file

@ -8,12 +8,14 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/reexec"
sysinfo "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/term"
@ -39,7 +41,7 @@ type driver struct {
sync.Mutex
}
func NewDriver(root, initPath string) (*driver, error) {
func NewDriver(root, initPath string, options []string) (*driver, error) {
meminfo, err := sysinfo.ReadMemInfo()
if err != nil {
return nil, err
@ -52,11 +54,45 @@ func NewDriver(root, initPath string) (*driver, error) {
if err := apparmor.InstallDefaultProfile(); err != nil {
return nil, err
}
// choose cgroup manager
// this makes sure there are no breaking changes to people
// who upgrade from versions without native.cgroupdriver opt
cgm := libcontainer.Cgroupfs
if systemd.UseSystemd() {
cgm = libcontainer.SystemdCgroups
}
// parse the options
for _, option := range options {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case "native.cgroupdriver":
// override the default if they set options
switch val {
case "systemd":
if systemd.UseSystemd() {
cgm = libcontainer.SystemdCgroups
} else {
// warn them that they chose the wrong driver
logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead")
}
case "cgroupfs":
cgm = libcontainer.Cgroupfs
default:
return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val)
}
default:
return nil, fmt.Errorf("Unknown option %s\n", key)
}
}
logrus.Debugf("Using %v as native.cgroupdriver", cgm)
f, err := libcontainer.New(
root,
cgm,

View file

@ -218,7 +218,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
}
defer file.Close()
if err = file.Truncate(size); err != nil {
if err := file.Truncate(size); err != nil {
return "", err
}
}
@ -697,7 +697,7 @@ func (devices *DeviceSet) setupBaseImage() error {
logrus.Debugf("Creating filesystem on base device-mapper thin volume")
if err = devices.activateDeviceIfNeeded(info); err != nil {
if err := devices.activateDeviceIfNeeded(info); err != nil {
return err
}
@ -706,7 +706,7 @@ func (devices *DeviceSet) setupBaseImage() error {
}
info.Initialized = true
if err = devices.saveMetadata(info); err != nil {
if err := devices.saveMetadata(info); err != nil {
info.Initialized = false
return err
}
@ -1099,14 +1099,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
// If we didn't just create the data or metadata image, we need to
// load the transaction id and migrate old metadata
if !createdLoopback {
if err = devices.initMetaData(); err != nil {
if err := devices.initMetaData(); err != nil {
return err
}
}
// Right now this loads only NextDeviceId. If there is more metadata
// down the line, we might have to move it earlier.
if err = devices.loadDeviceSetMetaData(); err != nil {
if err := devices.loadDeviceSetMetaData(); err != nil {
return err
}
@ -1528,8 +1528,7 @@ func (devices *DeviceSet) MetadataDevicePath() string {
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
buf := new(syscall.Statfs_t)
err := syscall.Statfs(loopFile, buf)
if err != nil {
if err := syscall.Statfs(loopFile, buf); err != nil {
logrus.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err)
return 0, err
}

View file

@ -33,11 +33,15 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
if s, err := operatingsystem.GetOperatingSystem(); err == nil {
operatingSystem = s
}
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
meminfo, err := system.ReadMemInfo()

View file

@ -233,7 +233,7 @@ func InitDriver(config *Config) error {
// Configure iptables for link support
if config.EnableIptables {
if err := setupIPTables(addrv4, config.InterContainerCommunication, config.EnableIpMasq); err != nil {
logrus.Errorf("Error configuing iptables: %s", err)
logrus.Errorf("Error configuring iptables: %s", err)
return err
}
// call this on Firewalld reload
@ -355,7 +355,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
if !iptables.Exists(iptables.Filter, "FORWARD", dropArgs...) {
logrus.Debugf("Disable inter-container communication")
if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, dropArgs...)...); err != nil {
if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, dropArgs...)...); err != nil {
return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
} else if len(output) != 0 {
return fmt.Errorf("Error disabling intercontainer communication: %s", output)
@ -366,7 +366,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
if !iptables.Exists(iptables.Filter, "FORWARD", acceptArgs...) {
logrus.Debugf("Enable inter-container communication")
if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, acceptArgs...)...); err != nil {
if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, acceptArgs...)...); err != nil {
return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
} else if len(output) != 0 {
return fmt.Errorf("Error enabling intercontainer communication: %s", output)

View file

@ -601,7 +601,7 @@ func TestRegisterBadTwice(t *testing.T) {
Mask: []byte{255, 255, 255, 248},
}
if err := a.RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err)
t.Fatalf("Expected ErrNetworkAlreadyRegistered error, got %v", err)
}
}

View file

@ -76,22 +76,42 @@ func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
}
func (s *statsCollector) run() {
type publishersPair struct {
container *Container
publisher *pubsub.Publisher
}
// we cannot determine the capacity here.
// it will grow enough in first iteration
var pairs []publishersPair
for range time.Tick(s.interval) {
systemUsage, err := s.getSystemCpuUsage()
if err != nil {
logrus.Errorf("collecting system cpu usage: %v", err)
continue
}
// it does not make sense in the first iteration,
// but saves allocations in further iterations
pairs = pairs[:0]
s.m.Lock()
for container, publisher := range s.publishers {
systemUsage, err := s.getSystemCpuUsage()
if err != nil {
logrus.Errorf("collecting system cpu usage for %s: %v", container.ID, err)
continue
}
stats, err := container.Stats()
// copy pointers here to release the lock ASAP
pairs = append(pairs, publishersPair{container, publisher})
}
s.m.Unlock()
for _, pair := range pairs {
stats, err := pair.container.Stats()
if err != nil {
if err != execdriver.ErrNotRunning {
logrus.Errorf("collecting stats for %s: %v", container.ID, err)
logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
}
continue
}
stats.SystemUsage = systemUsage
publisher.Publish(stats)
pair.publisher.Publish(stats)
}
}
}

View file

@ -13,7 +13,6 @@ import (
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system"
)
type volumeMount struct {
@ -47,7 +46,7 @@ func (container *Container) createVolumes() error {
continue
}
realPath, err := container.getResourcePath(path)
realPath, err := container.GetResourcePath(path)
if err != nil {
return err
}
@ -314,21 +313,6 @@ func copyExistingContents(source, destination string) error {
return copyOwnership(source, destination)
}
// copyOwnership copies the permissions and uid:gid of the source file
// into the destination file
func copyOwnership(source, destination string) error {
stat, err := system.Stat(source)
if err != nil {
return err
}
if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil {
return err
}
return os.Chmod(destination, os.FileMode(stat.Mode()))
}
func (container *Container) mountVolumes() error {
for dest, source := range container.Volumes {
v := container.daemon.volumes.Get(source)
@ -336,7 +320,7 @@ func (container *Container) mountVolumes() error {
return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
}
destPath, err := container.getResourcePath(dest)
destPath, err := container.GetResourcePath(dest)
if err != nil {
return err
}
@ -347,7 +331,7 @@ func (container *Container) mountVolumes() error {
}
for _, mnt := range container.specialMounts() {
destPath, err := container.getResourcePath(mnt.Destination)
destPath, err := container.GetResourcePath(mnt.Destination)
if err != nil {
return err
}
@ -360,7 +344,7 @@ func (container *Container) mountVolumes() error {
func (container *Container) unmountVolumes() {
for dest := range container.Volumes {
destPath, err := container.getResourcePath(dest)
destPath, err := container.GetResourcePath(dest)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
@ -372,7 +356,7 @@ func (container *Container) unmountVolumes() {
}
for _, mnt := range container.specialMounts() {
destPath, err := container.getResourcePath(mnt.Destination)
destPath, err := container.GetResourcePath(mnt.Destination)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue

24
daemon/volumes_linux.go Normal file
View file

@ -0,0 +1,24 @@
// +build !windows
package daemon
import (
"os"
"github.com/docker/docker/pkg/system"
)
// copyOwnership copies the permissions and uid:gid of the source file
// into the destination file
func copyOwnership(source, destination string) error {
stat, err := system.Stat(source)
if err != nil {
return err
}
if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil {
return err
}
return os.Chmod(destination, os.FileMode(stat.Mode()))
}

View file

@ -0,0 +1,8 @@
// +build windows
package daemon
// Not supported on Windows
func copyOwnership(source, destination string) error {
return nil
}

View file

@ -124,124 +124,165 @@ unix://[/path/to/socket] to use.
**-v**, **--version**=*true*|*false*
Print version information and quit. Default is false.
**--exec-opt**=[]
Set exec driver options. See EXEC DRIVER OPTIONS.
**--selinux-enabled**=*true*|*false*
Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
# COMMANDS
**docker-attach(1)**
**attach**
Attach to a running container
See **docker-attach(1)** for full documentation on the **attach** command.
**docker-build(1)**
**build**
Build an image from a Dockerfile
See **docker-build(1)** for full documentation on the **build** command.
**docker-commit(1)**
**commit**
Create a new image from a container's changes
See **docker-commit(1)** for full documentation on the **commit** command.
**docker-cp(1)**
**cp**
Copy files/folders from a container's filesystem to the host
See **docker-cp(1)** for full documentation on the **cp** command.
**docker-create(1)**
**create**
Create a new container
See **docker-create(1)** for full documentation on the **create** command.
**docker-diff(1)**
**diff**
Inspect changes on a container's filesystem
See **docker-diff(1)** for full documentation on the **diff** command.
**docker-events(1)**
**events**
Get real time events from the server
See **docker-events(1)** for full documentation on the **events** command.
**docker-exec(1)**
**exec**
Run a command in a running container
See **docker-exec(1)** for full documentation on the **exec** command.
**docker-export(1)**
**export**
Stream the contents of a container as a tar archive
See **docker-export(1)** for full documentation on the **export** command.
**docker-history(1)**
**history**
Show the history of an image
See **docker-history(1)** for full documentation on the **history** command.
**docker-images(1)**
**images**
List images
See **docker-images(1)** for full documentation on the **images** command.
**docker-import(1)**
**import**
Create a new filesystem image from the contents of a tarball
See **docker-import(1)** for full documentation on the **import** command.
**docker-info(1)**
**info**
Display system-wide information
See **docker-info(1)** for full documentation on the **info** command.
**docker-inspect(1)**
**inspect**
Return low-level information on a container or image
See **docker-inspect(1)** for full documentation on the **inspect** command.
**docker-kill(1)**
**kill**
Kill a running container (which includes the wrapper process and everything
inside it)
See **docker-kill(1)** for full documentation on the **kill** command.
**docker-load(1)**
**load**
Load an image from a tar archive
See **docker-load(1)** for full documentation on the **load** command.
**docker-login(1)**
**login**
Register or login to a Docker Registry
See **docker-login(1)** for full documentation on the **login** command.
**docker-logout(1)**
**logout**
Log the user out of a Docker Registry
See **docker-logout(1)** for full documentation on the **logout** command.
**docker-logs(1)**
**logs**
Fetch the logs of a container
See **docker-logs(1)** for full documentation on the **logs** command.
**docker-pause(1)**
**pause**
Pause all processes within a container
See **docker-pause(1)** for full documentation on the **pause** command.
**docker-port(1)**
**port**
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
See **docker-port(1)** for full documentation on the **port** command.
**docker-ps(1)**
**ps**
List containers
See **docker-ps(1)** for full documentation on the **ps** command.
**docker-pull(1)**
**pull**
Pull an image or a repository from a Docker Registry
See **docker-pull(1)** for full documentation on the **pull** command.
**docker-push(1)**
**push**
Push an image or a repository to a Docker Registry
See **docker-push(1)** for full documentation on the **push** command.
**docker-restart(1)**
**restart**
Restart a running container
See **docker-restart(1)** for full documentation on the **restart** command.
**docker-rm(1)**
**rm**
Remove one or more containers
See **docker-rm(1)** for full documentation on the **rm** command.
**docker-rmi(1)**
**rmi**
Remove one or more images
See **docker-rmi(1)** for full documentation on the **rmi** command.
**docker-run(1)**
**run**
Run a command in a new container
See **docker-run(1)** for full documentation on the **run** command.
**docker-save(1)**
**save**
Save an image to a tar archive
See **docker-save(1)** for full documentation on the **save** command.
**docker-search(1)**
**search**
Search for an image in the Docker index
See **docker-search(1)** for full documentation on the **search** command.
**docker-start(1)**
**start**
Start a stopped container
See **docker-start(1)** for full documentation on the **start** command.
**docker-stats(1)**
**stats**
Display a live stream of one or more containers' resource usage statistics
See **docker-stats(1)** for full documentation on the **stats** command.
**docker-stop(1)**
**stop**
Stop a running container
See **docker-stop(1)** for full documentation on the **stop** command.
**docker-tag(1)**
**tag**
Tag an image into a repository
See **docker-tag(1)** for full documentation on the **tag** command.
**docker-top(1)**
**top**
Lookup the running processes of a container
See **docker-top(1)** for full documentation on the **top** command.
**docker-unpause(1)**
**unpause**
Unpause all processes within a container
See **docker-unpause(1)** for full documentation on the **unpause** command.
**docker-version(1)**
**version**
Show the Docker version information
See **docker-version(1)** for full documentation on the **version** command.
**docker-wait(1)**
**wait**
Block until a container stops, then print its exit code
See **docker-wait(1)** for full documentation on the **wait** command.
# STORAGE DRIVER OPTIONS
@ -319,6 +360,18 @@ for data and metadata:
--storage-opt dm.metadatadev=/dev/vdc \
--storage-opt dm.basesize=20G
# EXEC DRIVER OPTIONS
Use the **--exec-opt** flags to specify options to the exec-driver. The only
driver that accepts this flag is the *native* (libcontainer) driver. As a
result, you must also specify **-s=**native for this option to have effect. The
following is the only *native* option:
#### native.cgroupdriver
Specifies the management of the container's `cgroups`. You can specify
`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the
system uses `cgroupfs`.
#### Client
For specific client examples please see the man page for the specific Docker
command. For example:

View file

@ -34,6 +34,7 @@ pages:
- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu']
- ['installation/mac.md', 'Installation', 'Mac OS X']
- ['installation/windows.md', 'Installation', 'Microsoft Windows']
- ['installation/testing-windows-docker-client.md', 'Installation', 'Building and testing the Windows Docker client']
- ['installation/amazon.md', 'Installation', 'Amazon EC2']
- ['installation/archlinux.md', 'Installation', 'Arch Linux']
- ['installation/binaries.md', 'Installation', 'Binaries']
@ -76,7 +77,7 @@ pages:
- ['docker-hub/accounts.md', 'Docker Hub', 'Accounts']
- ['docker-hub/repos.md', 'Docker Hub', 'Repositories']
- ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds']
- ['docker-hub/official_repos.md', 'Docker Hub', 'Official repo guidelines']
- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repositories']
# Docker Hub Enterprise:
- ['docker-hub-enterprise/index.md', 'Docker Hub Enterprise', 'Overview' ]
@ -195,21 +196,21 @@ pages:
- ['terms/image.md', '**HIDDEN**']
# Project:
- ['project/index.md', '**HIDDEN**']
- ['project/who-written-for.md', 'Contribute', 'README first']
- ['project/software-required.md', 'Contribute', 'Get required software']
- ['project/set-up-git.md', 'Contribute', 'Configure Git for contributing']
- ['project/set-up-dev-env.md', 'Contribute', 'Work with a development container']
- ['project/test-and-docs.md', 'Contribute', 'Run tests and test documentation']
- ['project/make-a-contribution.md', 'Contribute', 'Understand contribution workflow']
- ['project/find-an-issue.md', 'Contribute', 'Find an issue']
- ['project/work-issue.md', 'Contribute', 'Work on an issue']
- ['project/create-pr.md', 'Contribute', 'Create a pull request']
- ['project/review-pr.md', 'Contribute', 'Participate in the PR review']
- ['project/advanced-contributing.md', 'Contribute', 'Advanced contributing']
- ['project/get-help.md', 'Contribute', 'Where to get help']
- ['project/coding-style.md', 'Contribute', 'Coding style guide']
- ['project/doc-style.md', 'Contribute', 'Documentation style guide']
- ['project/who-written-for.md', 'Contributor', 'README first']
- ['project/software-required.md', 'Contributor', 'Get required software for Linux or OS X']
- ['project/software-req-win.md', 'Contributor', 'Get required software for Windows']
- ['project/set-up-git.md', 'Contributor', 'Configure Git for contributing']
- ['project/set-up-dev-env.md', 'Contributor', 'Work with a development container']
- ['project/test-and-docs.md', 'Contributor', 'Run tests and test documentation']
- ['project/make-a-contribution.md', 'Contributor', 'Understand contribution workflow']
- ['project/find-an-issue.md', 'Contributor', 'Find an issue']
- ['project/work-issue.md', 'Contributor', 'Work on an issue']
- ['project/create-pr.md', 'Contributor', 'Create a pull request']
- ['project/review-pr.md', 'Contributor', 'Participate in the PR review']
- ['project/advanced-contributing.md', 'Contributor', 'Advanced contributing']
- ['project/get-help.md', 'Contributor', 'Where to get help']
- ['project/coding-style.md', 'Contributor', 'Coding style guide']
- ['project/doc-style.md', 'Contributor', 'Documentation style guide']

View file

@ -60,7 +60,7 @@ You might need to create the bus before you can add the ISO.
## 5. Add the new VDI image
In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image
from the SATA contoller and add the VDI image.
from the SATA controller and add the VDI image.
<img src="/articles/b2d_volume_images/add_volume.png">

View file

@ -65,4 +65,4 @@ There are lots more resources available to help you write your 'Dockerfile`.
* There's a [complete guide to all the instructions](/reference/builder/) available for use in a `Dockerfile` in the reference section.
* To help you write a clear, readable, maintainable `Dockerfile`, we've also
written a [`Dockerfile` Best Practices guide](/articles/dockerfile_best-practices).
* If you're working on an Official Repo, be sure to check out the [Official Repo Guidelines](/docker-hub/official_repos/).
* If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](/docker-hub/official_repos/).

View file

@ -172,7 +172,7 @@ will be stored (as a diff). See which images you already have using the
# Commit your container to a new named image
$ docker commit <container_id> <some_name>
# List your containers
# List your images
$ docker images
You now have an image state from which you can create new instances.

View file

@ -32,13 +32,14 @@ ephemeral as possible. By “ephemeral,” we mean that it can be stopped and
destroyed and a new one built and put in place with an absolute minimum of
set-up and configuration.
### Use [a .dockerignore file](https://docs.docker.com/reference/builder/#the-dockerignore-file)
### Use a .dockerignore file
For faster uploading and efficiency during `docker build`, you should use
a `.dockerignore` file to exclude files or directories from the build
context and final image. For example, unless`.git` is needed by your build
process or scripts, you should add it to `.dockerignore`, which can save many
megabytes worth of upload time.
In most cases, it's best to put each Dockerfile in an empty directory. Then,
add to that directory only the files needed for building the Dockerfile. To
increase the build's performance, you can exclude files and directories by
adding a `.dockerignore` file to that directory as well. This file supports
exclusion patterns similar to `.gitignore` files. For information on creating one,
see the [.dockerignore file](../../reference/builder/#dockerignore-file).
### Avoid installing unnecessary packages
@ -419,9 +420,9 @@ fail catastrophically if the new build's context is missing the resource being
added. Adding a separate tag, as recommended above, will help mitigate this by
allowing the `Dockerfile` author to make a choice.
## Examples for official repositories
## Examples for Official Repositories
These Official Repos have exemplary `Dockerfile`s:
These Official Repositories have exemplary `Dockerfile`s:
* [Go](https://registry.hub.docker.com/_/golang/)
* [Perl](https://registry.hub.docker.com/_/perl/)

View file

@ -576,7 +576,7 @@ As soon as the router wants to send an IPv6 packet to the first container it
will transmit a neighbor solicitation request, asking, who has
`2001:db8::c009`? But it will get no answer because noone on this subnet has
this address. The container with this address is hidden behind the Docker host.
The Docker host has to listen to neighbor solication requests for the container
The Docker host has to listen to neighbor solicitation requests for the container
address and send a response that itself is the device that is responsible for
the address. This is done by a Kernel feature called `NDP Proxy`. You can
enable it by executing

View file

@ -1,5 +1,5 @@
page_title: Using Puppet
page_description: Installating and using Puppet
page_description: Installing and using Puppet
page_keywords: puppet, installation, usage, docker, documentation
# Using Puppet

View file

@ -249,7 +249,7 @@ may still be utilized by Docker containers on supported kernels,
by directly using the clone syscall, or utilizing the 'unshare'
utility. Using this, some users may find it possible to drop
more capabilities from their process as user namespaces provide
an artifical capabilities set. Likewise, however, this artifical
an artificial capabilities set. Likewise, however, this artificial
capabilities set may require use of 'capsh' to restrict the
user-namespace capabilities set when using 'unshare'.

View file

@ -1,189 +1,106 @@
page_title: Guidelines for official repositories on Docker Hub
page_title: Official Repositories on Docker Hub
page_description: Guidelines for Official Repositories on Docker Hub
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, official, image, documentation
# Guidelines for creating and documenting official repositories
# Official Repositories on Docker Hub
## Introduction
The Docker [Official Repositories](http://registry.hub.docker.com/official) are
a curated set of Docker repositories that are promoted on Docker Hub and
supported by Docker, Inc. They are designed to:
Youve been given the job of creating an image for an Official Repository
hosted on [Docker Hub Registry](https://registry.hub.docker.com/). These are
our guidelines for getting that task done. Even if youre not
planning to create an Official Repo, you can think of these guidelines as best
practices for image creation generally.
* Provide essential base OS repositories (for example,
[`ubuntu`](https://registry.hub.docker.com/_/ubuntu/),
[`centos`](https://registry.hub.docker.com/_/centos/)) that serve as the
starting point for the majority of users.
This document consists of two major sections:
* Provide drop-in solutions for popular programming language runtimes, data
stores, and other services, similar to what a Platform-as-a-Service (PAAS)
would offer.
* A list of expected files, resources and supporting items for your image,
along with best practices for creating those items
* Examples embodying those practices
* Exemplify [`Dockerfile` best practices](/articles/dockerfile_best-practices)
and provide clear documentation to serve as a reference for other `Dockerfile`
authors.
## Expected files and resources
* Ensure that security updates are applied in a timely manner. This is
particularly important as many Official Repositories are some of the most
popular on Docker Hub.
### A Git repository
* Provide a channel for software vendors to redistribute up-to-date and
supported versions of their products. Organization accounts on Docker Hub can
also serve this purpose, without the careful review or restrictions on what
can be published.
Your image needs to live in a Git repository, preferably on GitHub. (If youd
like to use a different provider, please [contact us](mailto:feedback@docker.com)
directly.) Docker **strongly** recommends that this repo be publicly
accessible.
Docker, Inc. sponsors a dedicated team that is responsible for reviewing and
publishing all Official Repositories content. This team works in collaboration
with upstream software maintainers, security experts, and the broader Docker
community.
If the repo is private or has otherwise limited access, you must provide a
means of at least “read-only” access for both general users and for the
docker-library maintainers, who need access for review and building purposes.
While it is preferrable to have upstream software authors maintaining their
corresponding Official Repositories, this is not a strict requirement. Creating
and maintaining images for Official Repositories is a public process. It takes
place openly on GitHub where participation is encouraged. Anyone can provide
feedback, contribute code, suggest process changes, or even propose a new
Official Repository.
### A Dockerfile
## Should I use Official Repositories?
Complete information on `Dockerfile`s can be found in the [Reference section](https://docs.docker.com/reference/builder/).
We also have a page discussing [best practices for writing `Dockerfile`s](/articles/dockerfile_best-practices).
Your `Dockerfile` should adhere to the following:
New Docker users are encouraged to use the Official Repositories in their
projects. These repositories have clear documentation, promote best practices,
and are designed for the most common use cases. Advanced users are encouraged to
review the Official Repositories as part of their `Dockerfile` learning process.
* It must be written either by using `FROM scratch` or be based on another,
established Official Image.
* It must follow `Dockerfile` best practices. These are discussed on the
[best practices page](/articles/dockerfile_best-practices). In addition,
Docker engineer Michael Crosby has some good tips for `Dockerfiles` in
this [blog post](http://crosbymichael.com/dockerfile-best-practices-take-2.html).
A common rationale for diverging from Official Repositories is to optimize for
image size. For instance, many of the programming language stack images contain
a complete build toolchain to support installation of modules that depend on
optimized code. An advanced user could build a custom image with just the
necessary pre-compiled libraries to save space.
While [`ONBUILD` triggers](https://docs.docker.com/reference/builder/#onbuild)
are not required, if you choose to use them you should:
A number of language stacks such as
[`python`](https://registry.hub.docker.com/_/python/) and
[`ruby`](https://registry.hub.docker.com/_/ruby/) have `-slim` tag variants
designed to fill the need for optimization. Even when these "slim" variants are
insufficient, it is still recommended to inherit from an Official Repository
base OS image to leverage the ongoing maintenance work, rather than duplicating
these efforts.
* Build both `ONBUILD` and non-`ONBUILD` images, with the `ONBUILD` image
built `FROM` the non-`ONBUILD` image.
* The `ONBUILD` image should be specifically tagged, for example, `ruby:
latest`and `ruby:onbuild`, or `ruby:2` and `ruby:2-onbuild`
## How can I get involved?
### A short description
All Official Repositories contain a **User Feedback** section in their
documentation which covers the details for that specific repository. In most
cases, the GitHub repository which contains the Dockerfiles for an Official
Repository also has an active issue tracker. General feedback and support
questions should be directed to `#docker-library` on Freenode IRC.
Include a brief description of your image (in plaintext). Only one description
is required; you dont need additional descriptions for each tag. The file
should also:
## How do I create a new Official Repository?
* Be named `README-short.txt`
* Reside in the repo for the “latest” tag
* Not exceed 100 characters
From a high level, an Official Repository starts out as a proposal in the form
of a set of GitHub pull requests. You'll find detailed and objective proposal
requirements in the following GitHub repositories:
### A logo
* [docker-library/official-images](https://github.com/docker-library/official-images)
Include a logo of your company or the product (png format preferred). Only one
logo is required; you dont need additional logo files for each tag. The logo
file should have the following characteristics:
* [docker-library/docs](https://github.com/docker-library/docs)
* Be named `logo.png`
* Should reside in the repo for the “latest” tag
* Should fit inside a 200px square, maximized in one dimension (preferably the
width)
* Square or wide (landscape) is preferred over tall (portrait), but exceptions
can be made based on the logo needed
The Official Repositories team, with help from community contributors, formally
review each proposal and provide feedback to the author. This initial review
process may require a bit of back and forth before the proposal is accepted.
### A long description
There are also subjective considerations during the review process. These
subjective concerns boil down to the basic question: "is this image generally
useful?" For example, the [`python`](https://registry.hub.docker.com/_/python/)
Official Repository is "generally useful" to the large Python developer
community, whereas an obscure text adventure game written in Python last week is
not.
Include a comprehensive description of your image (in Markdown format, GitHub
flavor preferred). Only one description is required; you dont need additional
descriptions for each tag. The file should also:
When a new proposal is accepted, the author becomes responsibile for keeping
their images up-to-date and responding to user feedback. The Official
Repositories team becomes responsibile for publishing the images and
documentation on Docker Hub. Updates to the Official Repository follow the same
pull request process, though with less review. The Official Repositories team
ultimately acts as a gatekeeper for all changes, which helps mitigate the risk
of quality and security issues from being introduced.
* Be named `README.md`
* Reside in the repo for the “latest” tag
* Be no longer than absolutely necessary, while still addressing all the
content requirements
In terms of content, the long description must include the following sections:
* Overview & links
* How-to/usage
* Issues & contributions
#### Overview and links
This section should provide:
* an overview of the software contained in the image, similar to the
introduction in a Wikipedia entry
* a selection of links to outside resources that help to describe the software
* a *mandatory* link to the `Dockerfile`
#### How-to/usage
A section that describes how to run and use the image, including common use
cases and example `Dockerfile`s (if applicable). Try to provide clear, step-by-
step instructions wherever possible.
##### Issues and contributions
In this section, point users to any resources that can help them contribute to
the project. Include contribution guidelines and any specific instructions
related to your development practices. Include a link to
[Dockers resources for contributors](https://docs.docker.com/contributing/contributing/).
Be sure to include contact info, handles, etc. for official maintainers.
Also include information letting users know where they can go for help and how
they can file issues with the repo. Point them to any specific IRC channels,
issue trackers, contacts, additional “how-to” information or other resources.
### License
Include a file, `LICENSE`, of any applicable license. Docker recommends using
the license of the software contained in the image, provided it allows Docker,
Inc. to legally build and distribute the image. Otherwise, Docker recommends
adopting the [Expat license](http://directory.fsf.org/wiki/License:Expat)
(a.k.a., the MIT or X11 license).
## Examples
Below are sample short and long description files for an imaginary image
containing Ruby on Rails.
### Short description
`README-short.txt`
`Ruby on Rails is an open-source application framework written in Ruby. It emphasizes best practices such as convention over configuration, active record pattern, and the model-view-controller pattern.`
### Long description
`README.md`
```markdown
# What is Ruby on Rails
Ruby on Rails, often simply referred to as Rails, is an open source web application framework which runs via the Ruby programming language. It is a full-stack framework: it allows creating pages and applications that gather information from the web server, talk to or query the database, and render templates out of the box. As a result, Rails features a routing system that is independent of the web server.
> [wikipedia.org/wiki/Ruby_on_Rails](https://en.wikipedia.org/wiki/Ruby_on_Rails)
# How to use this image
## Create a `Dockerfile` in your rails app project
FROM rails:onbuild
Put this file in the root of your app, next to the `Gemfile`.
This image includes multiple `ONBUILD` triggers so that should be all that you need for most applications. The build will `ADD . /usr/src/app`, `RUN bundle install`, `EXPOSE 3000`, and set the default command to `rails server`.
Then build and run the Docker image.
docker build -t my-rails-app .
docker run --name some-rails-app -d my-rails-app
Test it by visiting `http://container-ip:3000` in a browser. On the other hand, if you need access outside the host on port 8080:
docker run --name some-rails-app -p 8080:3000 -d my-rails-app
Then go to `http://localhost:8080` or `http://host-ip:8080` in a browser.
```
For more examples, take a look at these repos:
* [Go](https://github.com/docker-library/golang)
* [PostgreSQL](https://github.com/docker-library/postgres)
* [Buildpack-deps](https://github.com/docker-library/buildpack-deps)
* ["Hello World" minimal container](https://github.com/docker-library/hello-world)
* [Node](https://github.com/docker-library/node)
## Submit your repo
Once you've checked off everything in these guidelines, and are confident your
image is ready for primetime, please contact us at
[partners@docker.com](mailto:partners@docker.com) to have your project
considered for the Official Repos program.
> **Note**: If you are interested in proposing an Official Repository, but would
> like to discuss it with Docker, Inc. privately first, please send your
> inquiries to partners@docker.com. There is no fast-track or pay-for-status
> option.

View file

@ -51,10 +51,10 @@ private to public.
You can also collaborate on Docker Hub with organizations and groups.
You can read more about that [here](accounts/).
## Official repositories
## Official Repositories
The Docker Hub contains a number of [official
repositories](http://registry.hub.docker.com/official). These are
The Docker Hub contains a number of [Official
Repositories](http://registry.hub.docker.com/official). These are
certified repositories from vendors and contributors to Docker. They
contain Docker images from vendors like Canonical, Oracle, and Red Hat
that you can use to build applications and services.
@ -63,9 +63,9 @@ If you use Official Repositories you know you're using a supported,
optimized and up-to-date image to power your applications.
> **Note:**
> If you would like to contribute an official repository for your
> organization, product or team you can see more information
> [here](https://github.com/docker/stackbrew).
> If you would like to contribute an Official Repository for your
> organization, see [Official Repositories on Docker
> Hub](/docker-hub/official_repos) for more information.
## Private repositories

View file

@ -8,7 +8,7 @@ Docker is available in **openSUSE 12.3 and later**. Please note that due
to its current limitations Docker is able to run only **64 bit** architecture.
Docker is not part of the official repositories of openSUSE 12.3 and
openSUSE 13.1. Hence it is neccessary to add the [Virtualization
openSUSE 13.1. Hence it is necessary to add the [Virtualization
repository](https://build.opensuse.org/project/show/Virtualization) from
[OBS](https://build.opensuse.org/) to install the `docker` package.

View file

@ -28,9 +28,10 @@ To install the latest Debian package (may not be the latest Docker release):
To verify that everything has worked as expected:
$ sudo docker run -i -t ubuntu /bin/bash
$ sudo docker run --rm hello-world
Which should download the `ubuntu` image, and then start `bash` in a container.
This command downloads and runs the `hello-world` image in a container. When the
container runs, it prints an informational message. Then, it exits.
> **Note**:
> If you want to enable memory and swap accounting see

View file

@ -7,7 +7,7 @@ page_keywords: Docker, Docker documentation, requirements, linux, rhel
Docker is supported on the following versions of RHEL:
- [*Red Hat Enterprise Linux 7 (64-bit)*](#red-hat-enterprise-linux-7-installation)
- [*Red Hat Enterprise Linux 6.5 (64-bit)*](#red-hat-enterprise-linux-6.5-installation) or later
- [*Red Hat Enterprise Linux 6.6 (64-bit)*](#red-hat-enterprise-linux-66-installation) or later
## Kernel support
@ -41,14 +41,14 @@ Portal](https://access.redhat.com/).
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
## Red Hat Enterprise Linux 6.5 installation
## Red Hat Enterprise Linux 6.6 installation
You will need **64 bit** [RHEL
6.5](https://access.redhat.com/site/articles/3078#RHEL6) or later, with
a RHEL 6 kernel version 2.6.32-431 or higher as this has specific kernel
fixes to allow Docker to work.
6.6](https://access.redhat.com/site/articles/3078#RHEL6) or later, with
a RHEL 6 kernel version 2.6.32-504.16.2 or higher as this has specific kernel
fixes to allow Docker to work. Related issues: [#9856](https://github.com/docker/docker/issues/9856).
Docker is available for **RHEL6.5** on EPEL. Please note that
Docker is available for **RHEL6.6** on EPEL. Please note that
this package is part of [Extra Packages for Enterprise Linux
(EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort to
create and maintain additional packages for the RHEL distribution.

View file

@ -67,7 +67,7 @@ The following provides greater detail on the process:
The design proposals are <a
href="https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%
3AProposal" target="_blank">all online in our GitHub pull requests</a>.
3Akind%2Fproposal" target="_blank">all online in our GitHub pull requests</a>.
3. Talk to the community about your idea.

View file

@ -77,7 +77,7 @@ Always rebase and squash your commits before making a pull request.
`git commit -s`
Make sure your message includes <a href="./set-up-git" target="_blank>your signature</a>.
Make sure your message includes <a href="./set-up-git" target="_blank>your signature</a>.
7. Force push any changes to your fork on GitHub.

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 225 KiB

View file

@ -14,7 +14,7 @@ work on Docker. You need to understand and work with both the "bots" and the
"beings" to review your contribution.
## How we proces your review
## How we process your review
First to review your pull request is Gordon. Gordon is fast. He checks your
pull request (PR) for common problems like a missing signature. If Gordon finds a

View file

@ -46,9 +46,12 @@ target="_blank">docker/docker repository</a>.
that instead. You'll need to convert what you see in the guide to what is
appropriate to your tool.
5. Open a terminal window on your local host and change to your home directory. In Windows, you'll work in your Boot2Docker window instead of Powershell or cmd.
5. Open a terminal window on your local host and change to your home directory.
$ cd ~
In Windows, you'll work in your Boot2Docker window instead of Powershell or
a `cmd` window.
6. Create a `repos` directory.

View file

@ -0,0 +1,258 @@
page_title: Set up for development on Windows
page_description: How to set up a server to test Docker Windows client
page_keywords: development, inception, container, image Dockerfile, dependencies, Go, artifacts, windows
# Get the required software for Windows
This page explains how to get the software you need to use a a Windows Server
2012 or Windows 8 machine for Docker development. Before you begin contributing
you must have:
- a GitHub account
- Git for Windows (msysGit)
- TDM-GCC, a compiler suite for Windows
- MinGW (tar and xz)
- Go language
> **Note**: This installation prcedure refers to the `C:\` drive. If you system's main drive
is `D:\` you'll need to substitute that in where appropriate in these
instructions.
### Get a GitHub account
To contribute to the Docker project, you will need a <a
href="https://github.com" target="_blank">GitHub account</a>. A free account is
fine. All the Docker project repositories are public and visible to everyone.
You should also have some experience using both the GitHub application and `git`
on the command line.
## Install Git for Windows
Git for Windows includes several tools including msysGit, which is a build
environment. The environment contains the tools you need for development such as
Git and a Git Bash shell.
1. Browse to the [Git for Windows](https://msysgit.github.io/) download page.
2. Click **Download**.
Windows prompts you to save the file to your machine.
3. Run the saved file.
The system displays the **Git Setup** wizard.
4. Click the **Next** button to move through the wizard and accept all the defaults.
5. Click **Finish** when you are done.
## Installing TDM-GCC
TDM-GCC is a compiler suite for Windows. You'll use this suite to compile the
Docker Go code as you develop.
1. Browse to
[tdm-gcc download page](http://tdm-gcc.tdragon.net/download).
2. Click on the lastest 64-bit version of the package.
Windows prompts you to save the file to your machine
3. Set up the suite by running the downloaded file.
The system opens the **TDM-GCC Setup** wizard.
4. Click **Create**.
5. Click the **Next** button to move through the wizard and accept all the defaults.
6. Click **Finish** when you are done.
## Installing MinGW (tar and xz)
MinGW is a minimalist port of the GNU Compiler Collection (GCC). In this
procedure, you first download and install the MinGW installation manager. Then,
you use the manager to install the `tar` and `xz` tools from the collection.
1. Browse to MinGW
[SourceForge](http://sourceforge.net/projects/mingw/).
2. Click **Download**.
Windows prompts you to save the file to your machine
3. Run the downloaded file.
The system opens the **MinGW Installation Manager Setup Tool**
4. Choose **Install** install the MinGW Installation Manager.
5. Press **Continue**.
The system installs and then opens the MinGW Installation Manager.
6. Press **Continue** after the install completes to open the manager.
7. Select **All Packages > MSYS Base System** from the left hand menu.
The system displays the available packages.
8. Click on the the **msys-tar bin** package and choose **Mark for Installation**.
9. Click on the **msys-xz bin** package and choose **Mark for Installation**.
10. Select **Installation > Apply Changes**, to install the selected packages.
The system displays the **Schedule of Pending Actions Dialog**.
![windows-mingw](/project/images/windows-mingw.png)
11. Press **Apply**
MingGW installs the packages for you.
12. Close the dialog and the MinGW Installation Manager.
## Set up your environment variables
You'll need to add the compiler to your `Path` environment variable.
1. Open the **Control Panel**.
2. Choose **System and Security > System**.
3. Click the **Advanced system settings** link in the sidebar.
The system opens the **System Properties** dialog.
3. Select the **Advanced** tab.
4. Click **Environment Variables**.
The system opens the **Environment Variables dialog** dialog.
5. Locate the **System variables** area and scroll to the **Path**
variable.
![windows-mingw](/project/images/path_variable.png)
6. Click **Edit** to edit the variable (you can also double-click it).
The system opens the **Edit System Variable** dialog.
7. Make sure the `Path` includes `C:\TDM-GCC64\bin`
![include gcc](/project/images/include_gcc.png)
If you don't see `C:\TDM-GCC64\bin`, add it.
8. Press **OK** to close this dialog.
9. Press **OK** twice to close out of the remaining dialogs.
## Install Go and cross-compile it
In this section, you install the Go language. Then, you build the source so that it can cross-compile for `linux/amd64` architectures.
1. Open [Go Language download](http://golang.org/dl/) page in your browser.
2. Locate and click the latest `.msi` installer.
The system prompts you to save the file.
3. Run the installer.
The system opens the **Go Programming Langauge Setup** dialog.
4. Select all the defaults to install.
5. Press **Finish** to close the installation dialog.
6. Start a command prompt.
7. Change to the Go `src` directory.
cd c:\Go\src
8. Set the following Go variables
c:\Go\src> set GOOS=linux
c:\Go\src> set GOARCH=amd64
9. Compile the source.
c:\Go\src> make.bat
Compiling the source also adds a number of variables to your Windows environment.
## Get the Docker repository
In this step, you start a Git `bash` terminal and get the Docker source code from
Github.
1. Locate the **Git Bash** program and start it.
Recall that **Git Bash** came with the Git for Windows installation. **Git
Bash** just as it sounds allows you to run a Bash terminal on Windows.
![Git Bash](/project/images/git_bash.png)
2. Change to the root directory.
$ cd /c/
3. Make a `gopath` directory.
$ mkdir gopath
4. Go get the `docker/docker` repository.
$ go.exe get github.com/docker/docker package github.com/docker/docker
imports github.com/docker/docker
imports github.com/docker/docker: no buildable Go source files in C:\gopath\src\github.com\docker\docker
In the next steps, you create environment variables for you Go paths.
5. Open the **Control Panel** on your system.
6. Choose **System and Security > System**.
7. Click the **Advanced system settings** link in the sidebar.
The system opens the **System Properties** dialog.
8. Select the **Advanced** tab.
9. Click **Environment Variables**.
The system opens the **Environment Variables dialog** dialog.
10. Locate the **System variables** area and scroll to the **Path**
variable.
11. Click **New**.
Now you are going to create some new variables. These paths you'll create in the next procedure; but you can set them now.
12. Enter `GOPATH` for the **Variable Name**.
13. For the **Variable Value** enter the following:
C:\gopath;C:\gopath\src\github.com\docker\docker\vendor
14. Press **OK** to close this dialog.
The system adds `GOPATH` to the list of **System Variables**.
15. Press **OK** twice to close out of the remaining dialogs.
## Where to go next
In the next section, you'll [learn how to set up and configure Git for
contributing to Docker](/project/set-up-git/).

View file

@ -2,9 +2,10 @@ page_title: Get the required software
page_description: Describes the software required to contribute to Docker
page_keywords: GitHub account, repository, Docker, Git, Go, make,
# Get the required software
# Get the required software for Linux or OS X
Before you begin contributing you must have:
This page explains how to get the software you need to use a Linux or OS X
machine for Docker development. Before you begin contributing you must have:
* a GitHub account
* `git`

View file

@ -40,7 +40,7 @@ units each have unit tests and then, together, integration tests that test the
interface between the components. The `integration` and `integration-cli`
directories in the Docker repository contain integration test code.
Testing is its own speciality. If you aren't familiar with testing techniques,
Testing is its own specialty. If you aren't familiar with testing techniques,
there is a lot of information available to you on the Web. For now, you should
understand that, the Docker maintainers may ask you to write a new test or
change an existing one.
@ -230,6 +230,46 @@ with new memory settings.
6. Restart your container and try your test again.
## Testing just the Windows client
This explains how to test the Windows client on a Windows server set up as a
development environment. You'll use the **Git Bash** came with the Git for
Windows installation. **Git Bash** just as it sounds allows you to run a Bash
terminal on Windows.
1. If you don't have one, start a Git Bash terminal.
![Git Bash](/project/images/git_bash.png)
2. Change to the `docker` source directory.
$ cd /c/gopath/src/github.com/docker/docker
3. Set `DOCKER_CLIENTONLY` as follows:
$ export DOCKER_CLIENTONLY=1
This ensures you are building only the client binary instead of both the
binary and the daemon.
4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your
machine's actual IP address, for example:
$ export DOCKER_TEST_HOST=tcp://263.124.23.200:2376
5. Make the binary and the test:
$ hack/make.sh binary test-integration-cli
Many tests are skipped on Windows for various reasons. You see which tests
were skipped by re-running the make and passing in the
`TESTFLAGS='-test.v'` value.
You can now choose to make changes to the Docker source or the tests. If you
make any changes just run these commands again.
## Build and test the documentation
The Docker documentation source files are under `docs/sources`. The content is

View file

@ -1047,7 +1047,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -1053,7 +1053,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -1115,7 +1115,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -1104,7 +1104,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -1114,7 +1114,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -1261,7 +1261,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:
@ -1495,7 +1495,7 @@ Get a tarball containing all images and metadata for the repository specified
by `name`.
If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
(and its parents) are returned. If `name` is an image ID, similarly only tha
(and its parents) are returned. If `name` is an image ID, similarly only that
image (and its parents) are returned, but with the exclusion of the
'repositories' file in the tarball, as there were no image names referenced.

View file

@ -1262,7 +1262,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:
@ -1509,7 +1509,7 @@ Get a tarball containing all images and metadata for the repository specified
by `name`.
If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
(and its parents) are returned. If `name` is an image ID, similarly only tha
(and its parents) are returned. If `name` is an image ID, similarly only that
image (and its parents) are returned, but with the exclusion of the
'repositories' file in the tarball, as there were no image names referenced.

View file

@ -1140,7 +1140,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:
@ -1675,7 +1675,7 @@ Get a tarball containing all images and metadata for the repository specified
by `name`.
If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
(and its parents) are returned. If `name` is an image ID, similarly only tha
(and its parents) are returned. If `name` is an image ID, similarly only that
image (and its parents) are returned, but with the exclusion of the
'repositories' file in the tarball, as there were no image names referenced.

View file

@ -91,6 +91,7 @@ Query Parameters:
- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters:
- exited=&lt;int&gt; -- containers with exit code of &lt;int&gt;
- status=(restarting|running|paused|exited)
- label=`key` or `key=value` of a container label
Status Codes:
@ -1191,6 +1192,7 @@ Query Parameters:
- **all** 1/True/true or 0/False/false, default false
- **filters** a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters:
- dangling=true
- label=`key` or `key=value` of an image label
### Build image from a Dockerfile
@ -1250,7 +1252,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:
@ -1791,7 +1793,7 @@ Get a tarball containing all images and metadata for the repository specified
by `name`.
If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
(and its parents) are returned. If `name` is an image ID, similarly only tha
(and its parents) are returned. If `name` is an image ID, similarly only that
image (and its parents) are returned, but with the exclusion of the
'repositories' file in the tarball, as there were no image names referenced.

View file

@ -91,6 +91,7 @@ Query Parameters:
- **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters:
- exited=&lt;int&gt; -- containers with exit code of &lt;int&gt;
- status=(restarting|running|paused|exited)
- label=`key` or `key=value` of a container label
Status Codes:
@ -1194,6 +1195,7 @@ Query Parameters:
- **all** 1/True/true or 0/False/false, default false
- **filters** a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters:
- dangling=true
- label=`key` or `key=value` of an image label
### Build image from a Dockerfile
@ -1253,7 +1255,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:
@ -1794,7 +1796,7 @@ Get a tarball containing all images and metadata for the repository specified
by `name`.
If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
(and its parents) are returned. If `name` is an image ID, similarly only tha
(and its parents) are returned. If `name` is an image ID, similarly only that
image (and its parents) are returned, but with the exclusion of the
'repositories' file in the tarball, as there were no image names referenced.

View file

@ -1052,7 +1052,7 @@ Query Parameters:
Request Headers:
- **Content-type** should be set to `"application/tar"`.
- **X-Registry-Config** base64-encoded ConfigFile objec
- **X-Registry-Config** base64-encoded ConfigFile object
Status Codes:

View file

@ -41,10 +41,11 @@ whole context must be transferred to the daemon. The Docker CLI reports
> repository, the entire contents of your hard drive will get sent to the daemon (and
> thus to the machine running the daemon). You probably don't want that.
In most cases, it's best to put each Dockerfile in an empty directory, and then add only
the files needed for building that Dockerfile to that directory. To further speed up the
build, you can exclude files and directories by adding a `.dockerignore` file to the same
directory.
In most cases, it's best to put each Dockerfile in an empty directory. Then,
only add the files needed for building the Dockerfile to the directory. To
increase the build's performance, you can exclude files and directories by
adding a `.dockerignore` file to the directory. For information about how to
[create a `.dockerignore` file](#the-dockerignore-file) on this page.
You can specify a repository and tag at which to save the new image if
the build succeeds:
@ -128,7 +129,7 @@ modifiers as specified below:
* `${variable:-word}` indicates that if `variable` is set then the result
will be that value. If `variable` is not set then `word` will be the result.
* `${variable:+word}` indiates that if `variable` is set then `word` will be
* `${variable:+word}` indicates that if `variable` is set then `word` will be
the result, otherwise the result is the empty string.
In all cases, `word` can be any string, including additional environment
@ -158,7 +159,7 @@ The instructions that handle environment variables in the `Dockerfile` are:
`ONBUILD` instructions are **NOT** supported for environment replacement, even
the instructions above.
Environment variable subtitution will use the same value for each variable
Environment variable substitution will use the same value for each variable
throughout the entire command. In other words, in this example:
ENV abc=hello
@ -169,43 +170,67 @@ will result in `def` having a value of `hello`, not `bye`. However,
`ghi` will have a value of `bye` because it is not part of the same command
that set `abc` to `bye`.
## The `.dockerignore` file
### .dockerignore file
If a file named `.dockerignore` exists in the source repository, then it
is interpreted as a newline-separated list of exclusion patterns.
Exclusion patterns match files or directories relative to the source repository
that will be excluded from the context. Globbing is done using Go's
If a file named `.dockerignore` exists in the root of `PATH`, then Docker
interprets it as a newline-separated list of exclusion patterns. Docker excludes
files or directories relative to `PATH` that match these exclusion patterns. If
there are any `.dockerignore` files in `PATH` subdirectories, Docker treats
them as normal files.
Filepaths in `.dockerignore` are absolute with the current directory as the
root. Wildcards are allowed but the search is not recursive. Globbing (file name
expansion) is done using Go's
[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
> **Note**:
> The `.dockerignore` file can even be used to ignore the `Dockerfile` and
> `.dockerignore` files. This might be useful if you are copying files from
> the root of the build context into your new container but do not want to
> include the `Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`).
You can specify exceptions to exclusion rules. To do this, simply prefix a
pattern with an `!` (exclamation mark) in the same way you would in a
`.gitignore` file. Currently there is no support for regular expressions.
Formats like `[^temp*]` are ignored.
The following example shows the use of the `.dockerignore` file to exclude the
`.git` directory from the context. Its effect can be seen in the changed size of
the uploaded context.
The following is an example `.dockerignore` file:
```
*/temp*
*/*/temp*
temp?
*.md
!LICENCSE.md
```
This file causes the following build behavior:
| Rule | Behavior |
|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `*/temp*` | Exclude all files with names starting with`temp` in any subdirectory below the root directory. For example, a file named`/somedir/temporary.txt` is ignored. |
| `*/*/temp*` | Exclude files starting with name `temp` from any subdirectory that is two levels below the root directory. For example, the file `/somedir/subdir/temporary.txt` is ignored. |
| `temp?` | Exclude the files that match the pattern in the root directory. For example, the files `tempa`, `tempb` in the root directory are ignored. |
| `*.md ` | Exclude all markdown files. |
| `!LICENSE.md` | Exception to the exclude all Markdown files is this file, `LICENSE.md`, include this file in the build. |
The placement of `!` exception rules influences the matching algorithm; the
last line of the `.dockerignore` that matches a particular file determines
whether it is included or excluded. In the above example, the `LICENSE.md` file
matches both the `*.md` and `!LICENSE.md` rule. If you reverse the lines in the
example:
```
*/temp*
*/*/temp*
temp?
!LICENCSE.md
*.md
```
The build would exclude `LICENSE.md` because the last `*.md` rule adds all
Markdown files back onto the ignore list. The `!LICENSE.md` rule has no effect
because the subsequent `*.md` rule overrides it.
You can even use the `.dockerignore` file to ignore the `Dockerfile` and
`.dockerignore` files. This is useful if you are copying files from the root of
the build context into your new container but do not want to include the
`Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`).
$ docker build .
Uploading context 18.829 MB
Uploading context
Step 0 : FROM busybox
---> 769b9341d937
Step 1 : CMD echo Hello World
---> Using cache
---> 99cc1ad10469
Successfully built 99cc1ad10469
$ echo ".git" > .dockerignore
$ docker build .
Uploading context 6.76 MB
Uploading context
Step 0 : FROM busybox
---> 769b9341d937
Step 1 : CMD echo Hello World
---> Using cache
---> 99cc1ad10469
Successfully built 99cc1ad10469
## FROM
@ -299,7 +324,7 @@ The cache for `RUN` instructions can be invalidated by `ADD` instructions. See
the layers with `dirperm1` option. More details on `dirperm1` option can be
found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html)
If your system doesnt have support for `dirperm1`, the issue describes a workaround.
If your system doesn't have support for `dirperm1`, the issue describes a workaround.
## CMD
@ -368,7 +393,7 @@ default specified in `CMD`.
The `LABEL` instruction adds metadata to an image. A `LABEL` is a
key-value pair. To include spaces within a `LABEL` value, use quotes and
blackslashes as you would in command-line parsing.
backslashes as you would in command-line parsing.
LABEL "com.example.vendor"="ACME Incorporated"

View file

@ -442,7 +442,7 @@ Currently supported options are:
> Otherwise, set this flag for migrating existing Docker daemons to a
> daemon with a supported environment.
### Docker exec-driver option
### Docker execdriver option
The Docker daemon uses a specifically built `libcontainer` execution driver as its
interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`.
@ -452,6 +452,21 @@ https://linuxcontainers.org/) via the `lxc` execution driver, however, this is
not where the primary development of new functionality is taking place.
Add `-e lxc` to the daemon flags to use the `lxc` execution driver.
#### Options for the native execdriver
You can configure the `native` (libcontainer) execdriver using options specified
with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
single `native.cgroupdriver` option is available.
The `native.cgroupdriver` option specifies the management of the container's
cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and
it is not available, the system uses `cgroupfs`. By default, if no option is
specified, the execdriver first tries `systemd` and falls back to `cgroupfs`.
This example sets the execdriver to `cgroupfs`:
$ sudo docker -d --exec-opt native.cgroupdriver=cgroupfs
Setting this option applies to all containers the daemon launches.
### Daemon DNS options
@ -636,12 +651,13 @@ refer to any of the files in the context. For example, your build can use
an [*ADD*](/reference/builder/#add) instruction to reference a file in the
context.
The `URL` parameter can specify the location of a Git repository; in this
case, the repository is the context. The Git repository is recursively
cloned with its submodules. The system does a fresh `git clone -recursive`
in a temporary directory on your local host. Then, this clone is sent to
the Docker daemon as the context. Local clones give you the ability to
access private repositories using local user credentials, VPN's, and so forth.
The `URL` parameter can specify the location of a Git repository;
the repository acts as the build context. The system recursively clones the repository
and its submodules using a `git clone --depth 1 --recursive` command.
This command runs in a temporary directory on your local host.
After the command succeeds, the directory is sent to the Docker daemon as the context.
Local clones give you the ability to access private repositories using
local user credentials, VPN's, and so forth.
Instead of specifying a context, you can pass a single Dockerfile in the
`URL` or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`:
@ -652,6 +668,26 @@ If you use STDIN or specify a `URL`, the system places the contents into a
file called `Dockerfile`, and any `-f`, `--file` option is ignored. In this
scenario, there is no context.
By default the `docker build` command will look for a `Dockerfile` at the
root of the build context. The `-f`, `--file`, option lets you specify
the path to an alternative file to use instead. This is useful
in cases where the same set of files are used for multiple builds. The path
must be to a file within the build context. If a relative path is specified
then it must to be relative to the current directory.
In most cases, it's best to put each Dockerfile in an empty directory. Then, add
to that directory only the files needed for building the Dockerfile. To increase
the build's performance, you can exclude files and directories by adding a
`.dockerignore` file to that directory as well. For information on creating one,
see the [.dockerignore file](../../reference/builder/#dockerignore-file).
If the Docker client loses connection to the daemon, the build is canceled.
This happens if you interrupt the Docker client with `ctrl-c` or if the Docker
client is killed for any reason.
> **Note:** Currently only the "run" phase of the build can be canceled until
> pull cancelation is implemented).
### Return code
On a successful build, a return code of success `0` will be returned.
@ -672,55 +708,11 @@ INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13
$ echo $?
1
```
### .dockerignore file
If a file named `.dockerignore` exists in the root of `PATH` then it
is interpreted as a newline-separated list of exclusion patterns.
Exclusion patterns match files or directories relative to `PATH` that
will be excluded from the context. Globbing is done using Go's
[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
Please note that `.dockerignore` files in other subdirectories are
considered as normal files. Filepaths in `.dockerignore` are absolute with
the current directory as the root. Wildcards are allowed but the search
is not recursive.
#### Example .dockerignore file
*/temp*
*/*/temp*
temp?
The first line above `*/temp*`, would ignore all files with names starting with
`temp` from any subdirectory below the root directory. For example, a file named
`/somedir/temporary.txt` would be ignored. The second line `*/*/temp*`, will
ignore files starting with name `temp` from any subdirectory that is two levels
below the root directory. For example, the file `/somedir/subdir/temporary.txt`
would get ignored in this case. The last line in the above example `temp?`
will ignore the files that match the pattern from the root directory.
For example, the files `tempa`, `tempb` are ignored from the root directory.
Currently there is no support for regular expressions. Formats
like `[^temp*]` are ignored.
By default the `docker build` command will look for a `Dockerfile` at the
root of the build context. The `-f`, `--file`, option lets you specify
the path to an alternative file to use instead. This is useful
in cases where the same set of files are used for multiple builds. The path
must be to a file within the build context. If a relative path is specified
then it must to be relative to the current directory.
If the Docker client loses connection to the daemon, the build is canceled.
This happens if you interrupt the Docker client with `ctrl-c` or if the Docker
client is killed for any reason.
> **Note:** Currently only the "run" phase of the build can be canceled until
> pull cancelation is implemented).
See also:
[*Dockerfile Reference*](/reference/builder).
#### Examples
### Examples
$ docker build .
Uploading context 10240 bytes
@ -789,7 +781,8 @@ affect the build cache.
This example shows the use of the `.dockerignore` file to exclude the `.git`
directory from the context. Its effect can be seen in the changed size of the
uploaded context.
uploaded context. The builder reference contains detailed information on
[creating a .dockerignore file](../../builder/#dockerignore-file)
$ docker build -t vieux/apache:2.0 .

View file

@ -380,7 +380,7 @@ This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600,
and so on until either the `on-failure` limit is hit, or when you `docker stop`
or `docker rm -f` the container.
If a container is succesfully restarted (the container is started and runs
If a container is successfully restarted (the container is started and runs
for at least 10 seconds), the delay is reset to its default value of 100 ms.
You can specify the maximum amount of times Docker will try to restart the

View file

@ -29,7 +29,7 @@ A Fully Qualified Image Name (FQIN) can be made up of 3 parts:
If you create a new repository which you want to share, you will need to
set at least the `user_name`, as the `default` blank `user_name` prefix is
reserved for official Docker images.
reserved for [Official Repositories](/docker-hub/official_repos).
For more information see [*Working with
Repositories*](/userguide/dockerrepos/#working-with-the-repository)

View file

@ -131,11 +131,11 @@ term `sinatra`.
We can see we've returned a lot of images that use the term `sinatra`. We've
returned a list of image names, descriptions, Stars (which measure the social
popularity of images - if a user likes an image then they can "star" it), and
the Official and Automated build statuses. Official repositories are built and
maintained by the [Stackbrew](https://github.com/docker/stackbrew) project,
and Automated repositories are [Automated Builds](
/userguide/dockerrepos/#automated-builds) that allow you to validate the source
and content of an image.
the Official and Automated build statuses.
[Official Repositories](/docker-hub/official_repos) are a carefully curated set
of Docker repositories supported by Docker, Inc. Automated repositories are
[Automated Builds](/userguide/dockerrepos/#automated-builds) that allow you to
validate the source and content of an image.
We've reviewed the images available to use and we decided to use the
`training/sinatra` image. So far we've seen two types of images repositories,

View file

@ -51,12 +51,12 @@ name, user name, or description:
tianon/centos CentOS 5 and 6, created using rinse instea... 21
...
There you can see two example results: `centos` and
`tianon/centos`. The second result shows that it comes from
the public repository of a user, named `tianon/`, while the first result,
`centos`, doesn't explicitly list a repository which means that it comes from the
trusted top-level namespace. The `/` character separates a user's
repository from the image name.
There you can see two example results: `centos` and `tianon/centos`. The second
result shows that it comes from the public repository of a user, named
`tianon/`, while the first result, `centos`, doesn't explicitly list a
repository which means that it comes from the trusted top-level namespace for
[Official Repositories](/docker-hub/official_repos). The `/` character separates
a user's repository from the image name.
Once you've found the image you want, you can download it with `docker pull <imagename>`:

View file

@ -182,7 +182,7 @@ func TestInputAddEmpty(t *testing.T) {
t.Fatal(err)
}
if len(data) > 0 {
t.Fatalf("Read from empty input shoul yield no data")
t.Fatalf("Read from empty input should yield no data")
}
}

View file

@ -83,8 +83,15 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error {
}
// write repositories, if there is something to write
if len(rootRepoMap) > 0 {
rootRepoJson, _ := json.Marshal(rootRepoMap)
if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
f, err := os.OpenFile(path.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
f.Close()
return err
}
if err := json.NewEncoder(f).Encode(rootRepoMap); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
} else {

View file

@ -348,9 +348,8 @@ func (graph *Graph) Delete(name string) error {
tmp, err := graph.Mktemp("")
graph.idIndex.Delete(id)
if err == nil {
err = os.Rename(graph.ImageRoot(id), tmp)
// On err make tmp point to old dir and cleanup unused tmp dir
if err != nil {
if err := os.Rename(graph.ImageRoot(id), tmp); err != nil {
// On err make tmp point to old dir and cleanup unused tmp dir
os.RemoveAll(tmp)
tmp = graph.ImageRoot(id)
}

View file

@ -58,22 +58,26 @@ func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
}
}
repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
if err == nil {
repositories := map[string]Repository{}
if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
reposJSONFile, err := os.Open(path.Join(tmpImageDir, "repo", "repositories"))
if err != nil {
if !os.IsNotExist(err) {
return err
}
return nil
}
defer reposJSONFile.Close()
for imageName, tagMap := range repositories {
for tag, address := range tagMap {
if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil {
return err
}
repositories := map[string]Repository{}
if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil {
return err
}
for imageName, tagMap := range repositories {
for tag, address := range tagMap {
if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil {
return err
}
}
} else if !os.IsNotExist(err) {
return err
}
return nil

View file

@ -537,8 +537,7 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
di.err <- downloadFunc(di)
}(&downloads[i])
} else {
err := downloadFunc(&downloads[i])
if err != nil {
if err := downloadFunc(&downloads[i]); err != nil {
return false, err
}
}
@ -548,8 +547,7 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
for i := len(downloads) - 1; i >= 0; i-- {
d := &downloads[i]
if d.err != nil {
err := <-d.err
if err != nil {
if err := <-d.err; err != nil {
return false, err
}
}

View file

@ -367,8 +367,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
logrus.Debugf("Pushing layer: %s", layer.ID)
if layer.Config != nil && metadata.Image != layer.ID {
err = runconfig.Merge(&metadata, layer.Config)
if err != nil {
if err := runconfig.Merge(&metadata, layer.Config); err != nil {
return err
}
}

View file

@ -115,11 +115,12 @@ func (store *TagStore) save() error {
}
func (store *TagStore) reload() error {
jsonData, err := ioutil.ReadFile(store.path)
f, err := os.Open(store.path)
if err != nil {
return err
}
if err := json.Unmarshal(jsonData, store); err != nil {
defer f.Close()
if err := json.NewDecoder(f).Decode(&store); err != nil {
return err
}
return nil

View file

@ -268,8 +268,7 @@ func NewImgJSON(src []byte) (*Image, error) {
func ValidateID(id string) error {
validHex := regexp.MustCompile(`^([a-f0-9]{64})$`)
if ok := validHex.MatchString(id); !ok {
err := fmt.Errorf("image ID '%s' is invalid", id)
return err
return fmt.Errorf("image ID '%s' is invalid", id)
}
return nil
}

View file

@ -24,13 +24,58 @@ func (s *TimerSuite) TearDownTest(c *check.C) {
fmt.Printf("%-60s%.2f\n", c.TestName(), time.Since(s.start).Seconds())
}
func init() {
check.Suite(&DockerSuite{})
}
type DockerSuite struct {
TimerSuite
}
func (s *DockerSuite) TearDownTest(c *check.C) {
deleteAllContainers()
deleteAllImages()
s.TimerSuite.TearDownTest(c)
}
var _ = check.Suite(&DockerSuite{})
func init() {
check.Suite(&DockerRegistrySuite{
ds: &DockerSuite{},
})
}
type DockerRegistrySuite struct {
ds *DockerSuite
reg *testRegistryV2
}
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
s.reg = setupRegistry(c)
s.ds.SetUpTest(c)
}
func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
s.reg.Close()
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerDaemonSuite{
ds: &DockerSuite{},
})
}
type DockerDaemonSuite struct {
ds *DockerSuite
d *Daemon
}
func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
s.d = NewDaemon(c)
s.ds.SetUpTest(c)
}
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
s.d.Stop()
s.ds.TearDownTest(c)
}

View file

@ -40,24 +40,38 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
expected := []byte("hello")
actual := make([]byte, len(expected))
outChan := make(chan string)
outChan := make(chan error)
go func() {
if _, err := ws.Read(actual); err != nil {
c.Fatal(err)
}
outChan <- "done"
_, err := ws.Read(actual)
outChan <- err
close(outChan)
}()
inChan := make(chan string)
inChan := make(chan error)
go func() {
if _, err := ws.Write(expected); err != nil {
c.Fatal(err)
}
inChan <- "done"
_, err := ws.Write(expected)
inChan <- err
close(inChan)
}()
<-inChan
<-outChan
select {
case err := <-inChan:
if err != nil {
c.Fatal(err)
}
case <-time.After(5 * time.Second):
c.Fatal("Timeout writing to ws")
}
select {
case err := <-outChan:
if err != nil {
c.Fatal(err)
}
case <-time.After(5 * time.Second):
c.Fatal("Timeout reading from ws")
}
if !bytes.Equal(expected, actual) {
c.Fatal("Expected output on websocket to match input")

View file

@ -176,7 +176,7 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
c.Fatal(out, err)
}
name := "testing"
name := "TestContainerApiStartDupVolumeBinds"
config := map[string]interface{}{
"Image": "busybox",
"Volumes": map[string]struct{}{volPath: {}},
@ -260,15 +260,14 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) {
c.Fatalf("Error on container creation: %v, output: %q", err, out)
}
type b struct {
body []byte
err error
status int
body []byte
err error
}
bc := make(chan b, 1)
go func() {
status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
c.Assert(status, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
bc <- b{body, err}
bc <- b{status, body, err}
}()
// allow some time to stream the stats from the container
@ -283,9 +282,8 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) {
case <-time.After(2 * time.Second):
c.Fatal("stream was not closed after container was removed")
case sr := <-bc:
if sr.err != nil {
c.Fatal(sr.err)
}
c.Assert(sr.err, check.IsNil)
c.Assert(sr.status, check.Equals, http.StatusOK)
dec := json.NewDecoder(bytes.NewBuffer(sr.body))
var s *types.Stats
@ -297,6 +295,7 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) {
}
func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) {
// TODO: this test does nothing because we are c.Assert'ing in goroutine
var (
name = "statscontainer"
runCmd = exec.Command(dockerBinary, "create", "--name", name, "busybox", "top")
@ -339,8 +338,8 @@ func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) {
c.Fatalf("failed to close tar archive: %v", err)
}
status, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar")
c.Assert(status, check.Equals, http.StatusInternalServerError)
res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar")
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
c.Assert(err, check.IsNil)
out, err := readBody(body)
@ -365,8 +364,8 @@ RUN find /tmp/`,
}
defer server.Close()
status, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json")
c.Assert(status, check.Equals, http.StatusOK)
res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json")
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
buf, err := readBody(body)
@ -393,8 +392,8 @@ RUN echo from dockerfile`,
}
defer git.Close()
status, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
c.Assert(status, check.Equals, http.StatusOK)
res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
buf, err := readBody(body)
@ -421,8 +420,8 @@ RUN echo from Dockerfile`,
defer git.Close()
// Make sure it tries to 'dockerfile' query param value
status, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json")
c.Assert(status, check.Equals, http.StatusOK)
res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json")
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
buf, err := readBody(body)
@ -450,8 +449,8 @@ RUN echo from dockerfile`,
defer git.Close()
// Make sure it tries to 'dockerfile' query param value
status, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
c.Assert(status, check.Equals, http.StatusOK)
res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
buf, err := readBody(body)
@ -483,8 +482,8 @@ func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) {
c.Fatalf("failed to close tar archive: %v", err)
}
status, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar")
c.Assert(status, check.Equals, http.StatusInternalServerError)
res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar")
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
c.Assert(err, check.IsNil)
out, err := readBody(body)
@ -614,14 +613,14 @@ func (s *DockerSuite) TestContainerApiTop(c *check.C) {
}
func (s *DockerSuite) TestContainerApiCommit(c *check.C) {
out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "touch /test").CombinedOutput()
cName := "testapicommit"
out, err := exec.Command(dockerBinary, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test").CombinedOutput()
if err != nil {
c.Fatal(err, out)
}
id := strings.TrimSpace(string(out))
name := "testcommit" + stringid.GenerateRandomID()
status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+id, nil)
name := "TestContainerApiCommit"
status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil)
c.Assert(status, check.Equals, http.StatusCreated)
c.Assert(err, check.IsNil)
@ -632,7 +631,6 @@ func (s *DockerSuite) TestContainerApiCommit(c *check.C) {
if err := json.Unmarshal(b, &img); err != nil {
c.Fatal(err)
}
defer deleteImages(img.Id)
cmd, err := inspectField(img.Id, "Config.Cmd")
if err != nil {
@ -644,7 +642,7 @@ func (s *DockerSuite) TestContainerApiCommit(c *check.C) {
// sanity check, make sure the image is what we think it is
out, err = exec.Command(dockerBinary, "run", img.Id, "ls", "/test").CombinedOutput()
if err != nil {
c.Fatalf("error checking commited image: %v - %q", err, string(out))
c.Fatalf("error checking committed image: %v - %q", err, string(out))
}
}
@ -721,7 +719,7 @@ func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) {
"Image": "busybox",
}
create := func(ct string) (int, io.ReadCloser, error) {
create := func(ct string) (*http.Response, io.ReadCloser, error) {
jsonData := bytes.NewBuffer(nil)
if err := json.NewEncoder(jsonData).Encode(config); err != nil {
c.Fatal(err)
@ -730,21 +728,21 @@ func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) {
}
// Try with no content-type
status, body, err := create("")
c.Assert(status, check.Equals, http.StatusInternalServerError)
res, body, err := create("")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
body.Close()
// Try with wrong content-type
status, body, err = create("application/xml")
c.Assert(status, check.Equals, http.StatusInternalServerError)
res, body, err = create("application/xml")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
body.Close()
// now application/json
status, body, err = create("application/json")
c.Assert(status, check.Equals, http.StatusCreated)
res, body, err = create("application/json")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusCreated)
body.Close()
}
@ -775,8 +773,8 @@ func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) {
"NetworkDisabled":false,
"OnBuild":null}`
status, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
c.Assert(status, check.Equals, http.StatusCreated)
res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
c.Assert(res.StatusCode, check.Equals, http.StatusCreated)
c.Assert(err, check.IsNil)
b, err := readBody(body)
@ -809,13 +807,13 @@ func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
"Memory": 524287
}`
status, body, _ := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
res, body, _ := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
b, err2 := readBody(body)
if err2 != nil {
c.Fatal(err2)
}
c.Assert(status, check.Equals, http.StatusInternalServerError)
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true)
}
@ -832,12 +830,31 @@ func (s *DockerSuite) TestStartWithTooLowMemoryLimit(c *check.C) {
"Memory": 524287
}`
status, body, _ := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json")
res, body, _ := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json")
b, err2 := readBody(body)
if err2 != nil {
c.Fatal(err2)
}
c.Assert(status, check.Equals, http.StatusInternalServerError)
c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true)
}
func (s *DockerSuite) TestContainerApiRename(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh")
out, _, err := runCommandWithOutput(runCmd)
c.Assert(err, check.IsNil)
containerID := strings.TrimSpace(out)
newName := "TestContainerApiRenameNew"
statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil)
// 204 No Content is expected, not 200
c.Assert(statusCode, check.Equals, http.StatusNoContent)
c.Assert(err, check.IsNil)
name, err := inspectField(containerID, "Name")
if name != "/"+newName {
c.Fatalf("Failed to rename container, expected %v, got %v. Container rename API failed", newName, name)
}
}

View file

@ -30,7 +30,6 @@ func (s *DockerSuite) TestApiImagesFilter(c *check.C) {
name := "utest:tag1"
name2 := "utest/docker:tag2"
name3 := "utest:5000/docker:tag3"
defer deleteImages(name, name2, name3)
for _, n := range []string{name, name2, name3} {
if out, err := exec.Command(dockerBinary, "tag", "busybox", n).CombinedOutput(); err != nil {
c.Fatal(err, out)
@ -74,11 +73,10 @@ func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) {
c.Fatal(err)
}
id := strings.TrimSpace(out)
defer deleteImages("saveandload")
status, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "")
c.Assert(status, check.Equals, http.StatusOK)
res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
defer body.Close()
@ -86,9 +84,9 @@ func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) {
c.Fatal(err, out)
}
status, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar")
c.Assert(status, check.Equals, http.StatusOK)
res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusOK)
defer loadBody.Close()

View file

@ -1,28 +1,46 @@
package main
import (
"bufio"
"bytes"
"fmt"
"net/http"
"os/exec"
"strings"
"time"
"github.com/go-check/check"
)
func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) {
name := "logs_test"
runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "bin/sh", "-c", "sleep 10 && echo "+name)
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatal(out, err)
out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done")
id := strings.TrimSpace(out)
if err := waitRun(id); err != nil {
c.Fatal(err)
}
status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&timestamps=1", name), nil)
c.Assert(status, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
type logOut struct {
out string
res *http.Response
err error
}
chLog := make(chan logOut)
if !bytes.Contains(body, []byte(name)) {
c.Fatalf("Expected %s, got %s", name, string(body[:]))
go func() {
res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&timestamps=1", id), nil, "")
out, _ := bufio.NewReader(body).ReadString('\n')
chLog <- logOut{strings.TrimSpace(out), res, err}
}()
select {
case l := <-chLog:
c.Assert(l.err, check.IsNil)
c.Assert(l.res.StatusCode, check.Equals, http.StatusOK)
if !strings.HasSuffix(l.out, "hello") {
c.Fatalf("expected log output to container 'hello', but it does not")
}
case <-time.After(2 * time.Second):
c.Fatal("timeout waiting for logs to exit")
}
}

View file

@ -2,6 +2,7 @@ package main
import (
"bufio"
"fmt"
"io"
"os/exec"
"strings"
@ -89,7 +90,6 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) {
}
func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
@ -108,29 +108,32 @@ func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) {
}
}()
done := make(chan struct{})
done := make(chan error)
go func() {
defer close(done)
cmd := exec.Command(dockerBinary, "attach", id)
if _, err := cmd.StdinPipe(); err != nil {
c.Fatal(err)
done <- err
return
}
expected := "cannot enable tty mode"
if out, _, err := runCommandWithOutput(cmd); err == nil {
c.Fatal("attach should have failed")
done <- fmt.Errorf("attach should have failed")
return
} else if !strings.Contains(out, expected) {
c.Fatalf("attach failed with error %q: expected %q", out, expected)
done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected)
return
}
}()
select {
case <-done:
case err := <-done:
c.Assert(err, check.IsNil)
case <-time.After(attachWait):
c.Fatal("attach is running but should have failed")
}
}
func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
@ -161,7 +164,7 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
c.Fatal(err)
}
if strings.TrimSpace(out) != "hello" {
c.Fatalf("exepected 'hello', got %q", out)
c.Fatalf("expected 'hello', got %q", out)
}
if err := stdin.Close(); err != nil {
@ -174,7 +177,7 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
c.Fatal(err)
}
if running != "true" {
c.Fatal("exepected container to still be running")
c.Fatal("expected container to still be running")
}
}

View file

@ -27,14 +27,14 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
c.Fatal(err)
}
done := make(chan struct{})
errChan := make(chan error)
go func() {
defer close(done)
defer close(errChan)
_, tty, err := pty.Open()
if err != nil {
c.Fatalf("could not open pty: %v", err)
errChan <- err
return
}
attachCmd := exec.Command(dockerBinary, "attach", id)
attachCmd.Stdin = tty
@ -42,7 +42,8 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
attachCmd.Stderr = tty
if err := attachCmd.Run(); err != nil {
c.Fatalf("attach returned error %s", err)
errChan <- err
return
}
}()
@ -51,7 +52,8 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
c.Fatalf("error thrown while waiting for container: %s, %v", out, err)
}
select {
case <-done:
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(attachWait):
c.Fatal("timed out without attach returning")
}
@ -71,12 +73,10 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) {
cmd.Stdout = tty
cmd.Stderr = tty
detached := make(chan struct{})
errChan := make(chan error)
go func() {
if err := cmd.Run(); err != nil {
c.Fatalf("attach returned error %s", err)
}
close(detached)
errChan <- cmd.Run()
close(errChan)
}()
time.Sleep(500 * time.Millisecond)
@ -87,7 +87,12 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) {
time.Sleep(100 * time.Millisecond)
cpty.Write([]byte{17})
<-detached
select {
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(5 * time.Second):
c.Fatal("timeout while detaching")
}
cpty, tty, err = pty.Open()
if err != nil {
@ -119,9 +124,7 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) {
select {
case err := <-readErr:
if err != nil {
c.Fatal(err)
}
c.Assert(err, check.IsNil)
case <-time.After(2 * time.Second):
c.Fatal("timeout waiting for attach read")
}
@ -172,7 +175,7 @@ func (s *DockerSuite) TestAttachDetach(c *check.C) {
c.Fatal(err)
}
if strings.TrimSpace(out) != "hello" {
c.Fatalf("exepected 'hello', got %q", out)
c.Fatalf("expected 'hello', got %q", out)
}
// escape sequence
@ -195,7 +198,7 @@ func (s *DockerSuite) TestAttachDetach(c *check.C) {
c.Fatal(err)
}
if running != "true" {
c.Fatal("exepected container to still be running")
c.Fatal("expected container to still be running")
}
go func() {
@ -243,7 +246,7 @@ func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) {
c.Fatal(err)
}
if strings.TrimSpace(out) != "hello" {
c.Fatalf("exepected 'hello', got %q", out)
c.Fatalf("expected 'hello', got %q", out)
}
// escape sequence
@ -266,7 +269,7 @@ func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) {
c.Fatal(err)
}
if running != "true" {
c.Fatal("exepected container to still be running")
c.Fatal("expected container to still be running")
}
go func() {

File diff suppressed because it is too large Load diff

View file

@ -33,7 +33,6 @@ func setupImageWithTag(tag string) (string, error) {
if out, _, err := runCommandWithOutput(cmd); err != nil {
return "", fmt.Errorf("image tagging failed: %s, %v", out, err)
}
defer deleteImages(repoAndTag)
// delete the container as we don't need it any more
if err := deleteContainer(containerName); err != nil {
@ -63,9 +62,7 @@ func setupImageWithTag(tag string) (string, error) {
return pushDigest, nil
}
func (s *DockerSuite) TestPullByTagDisplaysDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -77,7 +74,6 @@ func (s *DockerSuite) TestPullByTagDisplaysDigest(c *check.C) {
if err != nil {
c.Fatalf("error pulling by tag: %s, %v", out, err)
}
defer deleteImages(repoName)
// the pull output includes "Digest: <digest>", so find that
matches := digestRegex.FindStringSubmatch(out)
@ -90,12 +86,9 @@ func (s *DockerSuite) TestPullByTagDisplaysDigest(c *check.C) {
if pushDigest != pullDigest {
c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
}
}
func (s *DockerSuite) TestPullByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -108,7 +101,6 @@ func (s *DockerSuite) TestPullByDigest(c *check.C) {
if err != nil {
c.Fatalf("error pulling by digest: %s, %v", out, err)
}
defer deleteImages(imageReference)
// the pull output includes "Digest: <digest>", so find that
matches := digestRegex.FindStringSubmatch(out)
@ -121,12 +113,9 @@ func (s *DockerSuite) TestPullByDigest(c *check.C) {
if pushDigest != pullDigest {
c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
}
}
func (s *DockerSuite) TestCreateByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -148,12 +137,9 @@ func (s *DockerSuite) TestCreateByDigest(c *check.C) {
if res != imageReference {
c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
}
}
func (s *DockerSuite) TestRunByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -184,12 +170,9 @@ func (s *DockerSuite) TestRunByDigest(c *check.C) {
if res != imageReference {
c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
}
}
func (s *DockerSuite) TestRemoveImageByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) {
digest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -220,12 +203,9 @@ func (s *DockerSuite) TestRemoveImageByDigest(c *check.C) {
} else if !strings.Contains(err.Error(), "No such image") {
c.Fatalf("expected 'No such image' output, got %v", err)
}
}
func (s *DockerSuite) TestBuildByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) {
digest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -248,7 +228,6 @@ func (s *DockerSuite) TestBuildByDigest(c *check.C) {
// do the build
name := "buildbydigest"
defer deleteImages(name)
_, err = buildImage(name, fmt.Sprintf(
`FROM %s
CMD ["/bin/echo", "Hello World"]`, imageReference),
@ -266,12 +245,9 @@ func (s *DockerSuite) TestBuildByDigest(c *check.C) {
if res != imageID {
c.Fatalf("Image %s, expected %s", res, imageID)
}
}
func (s *DockerSuite) TestTagByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) {
digest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -306,12 +282,9 @@ func (s *DockerSuite) TestTagByDigest(c *check.C) {
if tagID != expectedID {
c.Fatalf("expected image id %q, got %q", expectedID, tagID)
}
}
func (s *DockerSuite) TestListImagesWithoutDigests(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) {
digest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -338,9 +311,7 @@ func (s *DockerSuite) TestListImagesWithoutDigests(c *check.C) {
}
func (s *DockerSuite) TestListImagesWithDigests(c *check.C) {
defer setupRegistry(c)()
defer deleteImages(repoName+":tag1", repoName+":tag2")
func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
// setup image1
digest1, err := setupImageWithTag("tag1")
@ -348,7 +319,6 @@ func (s *DockerSuite) TestListImagesWithDigests(c *check.C) {
c.Fatalf("error setting up image: %v", err)
}
imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1)
defer deleteImages(imageReference1)
c.Logf("imageReference1 = %s", imageReference1)
// pull image1 by digest
@ -377,7 +347,6 @@ func (s *DockerSuite) TestListImagesWithDigests(c *check.C) {
c.Fatalf("error setting up image: %v", err)
}
imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2)
defer deleteImages(imageReference2)
c.Logf("imageReference2 = %s", imageReference2)
// pull image1 by digest
@ -489,12 +458,9 @@ func (s *DockerSuite) TestListImagesWithDigests(c *check.C) {
if !busyboxRe.MatchString(out) {
c.Fatalf("expected %q: %s", busyboxRe.String(), out)
}
}
func (s *DockerSuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
defer setupRegistry(c)()
func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
c.Fatalf("error setting up image: %v", err)
@ -508,7 +474,6 @@ func (s *DockerSuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
c.Fatalf("error pulling by digest: %s, %v", out, err)
}
// just in case...
defer deleteImages(imageReference)
imageID, err := inspectField(imageReference, ".Id")
if err != nil {
@ -519,5 +484,4 @@ func (s *DockerSuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
if _, err := runCommand(cmd); err != nil {
c.Fatalf("error deleting image by id: %v", err)
}
}

Some files were not shown because too many files have changed in this diff Show more