diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le index ec01544997..f2f0c28e2a 100644 --- a/Dockerfile.ppc64le +++ b/Dockerfile.ppc64le @@ -168,7 +168,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc diff --git a/MAINTAINERS b/MAINTAINERS index 3bb742080a..d3fcbfe7db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -35,6 +35,7 @@ "estesp", "icecrime", "jhowardmsft", + "justincormack", "lk4d4", "mavenugo", "mhbauer", @@ -204,6 +205,11 @@ Email = "jess@linux.com" GitHub = "jfrazelle" + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" diff --git a/api/client/attach.go b/api/client/attach.go index 1506dc662c..aae639378d 100644 --- a/api/client/attach.go +++ b/api/client/attach.go @@ -27,7 +27,9 @@ func (cli *DockerCli) CmdAttach(args ...string) error { cmd.ParseFlags(args, true) - c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + ctx := context.Background() + + c, err := cli.client.ContainerInspect(ctx, cmd.Arg(0)) if err != nil { return err } @@ -64,11 +66,11 @@ func (cli *DockerCli) CmdAttach(args ...string) error { } if *proxy && !c.Config.Tty { - sigc := cli.forwardAllSignals(container) + sigc := cli.forwardAllSignals(ctx, container) defer signal.StopCatch(sigc) } - resp, errAttach := cli.client.ContainerAttach(context.Background(), container, options) + resp, errAttach := cli.client.ContainerAttach(ctx, container, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection @@ -83,15 +85,15 @@ func (cli *DockerCli) CmdAttach(args ...string) error { // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially // resize it, then go back to normal. Without this, every attach after the first will // require the user to manually resize or hit enter. - cli.resizeTtyTo(cmd.Arg(0), height+1, width+1, false) + cli.resizeTtyTo(ctx, cmd.Arg(0), height+1, width+1, false) // After the above resizing occurs, the call to monitorTtySize below will handle resetting back // to the actual size. - if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + if err := cli.monitorTtySize(ctx, cmd.Arg(0), false); err != nil { logrus.Debugf("Error monitoring TTY size: %s", err) } } - if err := cli.holdHijackedConnection(context.Background(), c.Config.Tty, in, cli.out, cli.err, resp); err != nil { + if err := cli.holdHijackedConnection(ctx, c.Config.Tty, in, cli.out, cli.err, resp); err != nil { return err } @@ -99,7 +101,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return errAttach } - _, status, err := getExitCode(cli, container) + _, status, err := cli.getExitCode(ctx, container) if err != nil { return err } diff --git a/api/client/build.go b/api/client/build.go index 5b6fb152a9..237f13b87f 100644 --- a/api/client/build.go +++ b/api/client/build.go @@ -32,7 +32,7 @@ import ( "github.com/docker/go-units" ) -type translatorFunc func(reference.NamedTagged) (reference.Canonical, error) +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) // CmdBuild builds a new image from the source code at a given path. // @@ -77,8 +77,8 @@ func (cli *DockerCli) CmdBuild(args ...string) error { cmd.ParseFlags(args, true) var ( - ctx io.ReadCloser - err error + buildCtx io.ReadCloser + err error ) specifiedContext := cmd.Arg(0) @@ -100,11 +100,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error { switch { case specifiedContext == "-": - ctx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName) + buildCtx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName) case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): - ctx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName) + buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName) default: contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, *dockerfileName) } @@ -121,7 +121,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { contextDir = tempDir } - if ctx == nil { + if buildCtx == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { @@ -159,7 +159,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { includes = append(includes, ".dockerignore", relDockerfile) } - ctx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, @@ -169,17 +169,19 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } } + ctx := context.Background() + var resolvedTags []*resolvedTag if isTrusted() { // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. - ctx = replaceDockerfileTarWrapper(ctx, relDockerfile, cli.trustedReference, &resolvedTags) + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, cli.trustedReference, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon") + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if *flMemoryString != "" { @@ -235,7 +237,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), } - response, err := cli.client.ImageBuild(context.Background(), body, options) + response, err := cli.client.ImageBuild(ctx, body, options) if err != nil { return err } @@ -271,7 +273,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { - if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { + if err := cli.tagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil { return err } } @@ -303,7 +305,7 @@ type resolvedTag struct { // "FROM " instructions to a digest reference. `translator` is a // function that takes a repository name and tag reference and returns a // trusted digest reference. -func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { scanner := bufio.NewScanner(dockerfile) buf := bytes.NewBuffer(nil) @@ -320,7 +322,7 @@ func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (new } ref = reference.WithDefaultTag(ref) if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { - trustedRef, err := translator(ref) + trustedRef, err := translator(ctx, ref) if err != nil { return nil, nil, err } @@ -346,7 +348,7 @@ func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (new // replaces the entry with the given Dockerfile name with the contents of the // new Dockerfile. Returns a new tar archive stream with the replaced // Dockerfile. -func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) @@ -373,7 +375,7 @@ func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName st // generated from a directory on the local filesystem, the // Dockerfile will only appear once in the archive. var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator) + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) if err != nil { pipeWriter.CloseWithError(err) return diff --git a/api/client/cp.go b/api/client/cp.go index 571cb748e7..ae3c7149f3 100644 --- a/api/client/cp.go +++ b/api/client/cp.go @@ -81,11 +81,13 @@ func (cli *DockerCli) CmdCp(args ...string) error { followLink: *followLink, } + ctx := context.Background() + switch direction { case fromContainer: - return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) + return cli.copyFromContainer(ctx, srcContainer, srcPath, dstPath, cpParam) case toContainer: - return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) + return cli.copyToContainer(ctx, srcPath, dstContainer, dstPath, cpParam) case acrossContainers: // Copying between containers isn't supported. return fmt.Errorf("copying between containers is not supported") @@ -126,8 +128,8 @@ func splitCpArg(arg string) (container, path string) { return parts[0], parts[1] } -func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { - return cli.client.ContainerStatPath(context.Background(), containerName, path) +func (cli *DockerCli) statContainerPath(ctx context.Context, containerName, path string) (types.ContainerPathStat, error) { + return cli.client.ContainerStatPath(ctx, containerName, path) } func resolveLocalPath(localPath string) (absPath string, err error) { @@ -138,7 +140,7 @@ func resolveLocalPath(localPath string) (absPath string, err error) { return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil } -func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { +func (cli *DockerCli) copyFromContainer(ctx context.Context, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { if dstPath != "-" { // Get an absolute destination path. dstPath, err = resolveLocalPath(dstPath) @@ -150,7 +152,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c // if client requests to follow symbol link, then must decide target file to be copied var rebaseName string if cpParam.followLink { - srcStat, err := cli.statContainerPath(srcContainer, srcPath) + srcStat, err := cli.statContainerPath(ctx, srcContainer, srcPath) // If the destination is a symbolic link, we should follow it. if err == nil && srcStat.Mode&os.ModeSymlink != 0 { @@ -167,7 +169,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c } - content, stat, err := cli.client.CopyFromContainer(context.Background(), srcContainer, srcPath) + content, stat, err := cli.client.CopyFromContainer(ctx, srcContainer, srcPath) if err != nil { return err } @@ -199,7 +201,7 @@ func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, c return archive.CopyTo(preArchive, srcInfo, dstPath) } -func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { +func (cli *DockerCli) copyToContainer(ctx context.Context, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { if srcPath != "-" { // Get an absolute source path. srcPath, err = resolveLocalPath(srcPath) @@ -215,7 +217,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP // Prepare destination copy info by stat-ing the container path. dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := cli.statContainerPath(dstContainer, dstPath) + dstStat, err := cli.statContainerPath(ctx, dstContainer, dstPath) // If the destination is a symbolic link, we should evaluate it. if err == nil && dstStat.Mode&os.ModeSymlink != 0 { @@ -227,7 +229,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP } dstInfo.Path = linkTarget - dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + dstStat, err = cli.statContainerPath(ctx, dstContainer, linkTarget) } // Ignore any error and assume that the parent directory of the destination @@ -291,5 +293,5 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP AllowOverwriteDirWithFile: false, } - return cli.client.CopyToContainer(context.Background(), dstContainer, resolvedDstPath, content, options) + return cli.client.CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) } diff --git a/api/client/create.go b/api/client/create.go index 4c770e9221..18903152e0 100644 --- a/api/client/create.go +++ b/api/client/create.go @@ -19,7 +19,7 @@ import ( networktypes "github.com/docker/engine-api/types/network" ) -func (cli *DockerCli) pullImage(image string, out io.Writer) error { +func (cli *DockerCli) pullImage(ctx context.Context, image string, out io.Writer) error { ref, err := reference.ParseNamed(image) if err != nil { return err @@ -31,7 +31,7 @@ func (cli *DockerCli) pullImage(image string, out io.Writer) error { return err } - authConfig := cli.resolveAuthConfig(repoInfo.Index) + authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index) encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return err @@ -41,7 +41,7 @@ func (cli *DockerCli) pullImage(image string, out io.Writer) error { RegistryAuth: encodedAuth, } - responseBody, err := cli.client.ImageCreate(context.Background(), image, options) + responseBody, err := cli.client.ImageCreate(ctx, image, options) if err != nil { return err } @@ -69,7 +69,7 @@ func newCIDFile(path string) (*cidFile, error) { return &cidFile{path: path, file: f}, nil } -func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { +func (cli *DockerCli) createContainer(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { var containerIDFile *cidFile if cidfile != "" { var err error @@ -89,7 +89,7 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { var err error - trustedRef, err = cli.trustedReference(ref) + trustedRef, err = cli.trustedReference(ctx, ref) if err != nil { return nil, err } @@ -98,7 +98,7 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont } //create the container - response, err := cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + response, err := cli.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, name) //if image not found try to pull it if err != nil { @@ -106,17 +106,17 @@ func (cli *DockerCli) createContainer(config *container.Config, hostConfig *cont fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) // we don't want to write to stdout anything apart from container.ID - if err = cli.pullImage(config.Image, cli.err); err != nil { + if err = cli.pullImage(ctx, config.Image, cli.err); err != nil { return nil, err } if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { - if err := cli.tagTrusted(trustedRef, ref); err != nil { + if err := cli.tagTrusted(ctx, trustedRef, ref); err != nil { return nil, err } } // Retry var retryErr error - response, retryErr = cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + response, retryErr = cli.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, name) if retryErr != nil { return nil, retryErr } @@ -158,7 +158,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error { cmd.Usage() return nil } - response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + response, err := cli.createContainer(context.Background(), config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) if err != nil { return err } diff --git a/api/client/exec.go b/api/client/exec.go index 2202324661..dd74780898 100644 --- a/api/client/exec.go +++ b/api/client/exec.go @@ -34,7 +34,9 @@ func (cli *DockerCli) CmdExec(args ...string) error { // Send client escape keys execConfig.DetachKeys = cli.configFile.DetachKeys - response, err := cli.client.ContainerExecCreate(context.Background(), container, *execConfig) + ctx := context.Background() + + response, err := cli.client.ContainerExecCreate(ctx, container, *execConfig) if err != nil { return err } @@ -56,7 +58,7 @@ func (cli *DockerCli) CmdExec(args ...string) error { Tty: execConfig.Tty, } - if err := cli.client.ContainerExecStart(context.Background(), execID, execStartCheck); err != nil { + if err := cli.client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { return err } // For now don't print this - wait for when we support exec wait() @@ -85,17 +87,17 @@ func (cli *DockerCli) CmdExec(args ...string) error { } } - resp, err := cli.client.ContainerExecAttach(context.Background(), execID, *execConfig) + resp, err := cli.client.ContainerExecAttach(ctx, execID, *execConfig) if err != nil { return err } defer resp.Close() errCh = promise.Go(func() error { - return cli.holdHijackedConnection(context.Background(), execConfig.Tty, in, out, stderr, resp) + return cli.holdHijackedConnection(ctx, execConfig.Tty, in, out, stderr, resp) }) if execConfig.Tty && cli.isTerminalIn { - if err := cli.monitorTtySize(execID, true); err != nil { + if err := cli.monitorTtySize(ctx, execID, true); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } @@ -106,7 +108,7 @@ func (cli *DockerCli) CmdExec(args ...string) error { } var status int - if _, status, err = getExecExitCode(cli, execID); err != nil { + if _, status, err = cli.getExecExitCode(ctx, execID); err != nil { return err } diff --git a/api/client/export.go b/api/client/export.go index a1d3ebe749..7b7bb3f2c7 100644 --- a/api/client/export.go +++ b/api/client/export.go @@ -38,5 +38,4 @@ func (cli *DockerCli) CmdExport(args ...string) error { } return copyToFile(*outfile, responseBody) - } diff --git a/api/client/inspect.go b/api/client/inspect.go index 2e97a5aaa8..c55cbc738f 100644 --- a/api/client/inspect.go +++ b/api/client/inspect.go @@ -28,38 +28,40 @@ func (cli *DockerCli) CmdInspect(args ...string) error { return fmt.Errorf("%q is not a valid value for --type", *inspectType) } + ctx := context.Background() + var elementSearcher inspectSearcher switch *inspectType { case "container": - elementSearcher = cli.inspectContainers(*size) + elementSearcher = cli.inspectContainers(ctx, *size) case "image": - elementSearcher = cli.inspectImages(*size) + elementSearcher = cli.inspectImages(ctx, *size) default: - elementSearcher = cli.inspectAll(*size) + elementSearcher = cli.inspectAll(ctx, *size) } return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) } -func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { +func (cli *DockerCli) inspectContainers(ctx context.Context, getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { - return cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + return cli.client.ContainerInspectWithRaw(ctx, ref, getSize) } } -func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { +func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { - return cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + return cli.client.ImageInspectWithRaw(ctx, ref, getSize) } } -func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { +func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { - c, rawContainer, err := cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize) if err != nil { // Search for image with that id if a container doesn't exist. if client.IsErrContainerNotFound(err) { - i, rawImage, err := cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize) if err != nil { if client.IsErrImageNotFound(err) { return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) diff --git a/api/client/login.go b/api/client/login.go index 0bba8d2909..df963db530 100644 --- a/api/client/login.go +++ b/api/client/login.go @@ -40,12 +40,14 @@ func (cli *DockerCli) CmdLogin(args ...string) error { cli.in = os.Stdin } + ctx := context.Background() + var serverAddress string var isDefaultRegistry bool if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } else { - serverAddress = cli.electAuthServer() + serverAddress = cli.electAuthServer(ctx) isDefaultRegistry = true } @@ -54,7 +56,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { return err } - response, err := cli.client.RegistryLogin(context.Background(), authConfig) + response, err := cli.client.RegistryLogin(ctx, authConfig) if err != nil { return err } diff --git a/api/client/logout.go b/api/client/logout.go index b5ff59ddd2..ce3043bd02 100644 --- a/api/client/logout.go +++ b/api/client/logout.go @@ -3,6 +3,8 @@ package client import ( "fmt" + "golang.org/x/net/context" + Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) @@ -22,7 +24,7 @@ func (cli *DockerCli) CmdLogout(args ...string) error { if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } else { - serverAddress = cli.electAuthServer() + serverAddress = cli.electAuthServer(context.Background()) } // check if we're logged in based on the records in the config file diff --git a/api/client/logs.go b/api/client/logs.go index 25c9004c70..c4e6bc8d56 100644 --- a/api/client/logs.go +++ b/api/client/logs.go @@ -33,7 +33,9 @@ func (cli *DockerCli) CmdLogs(args ...string) error { name := cmd.Arg(0) - c, err := cli.client.ContainerInspect(context.Background(), name) + ctx := context.Background() + + c, err := cli.client.ContainerInspect(ctx, name) if err != nil { return err } @@ -51,7 +53,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error { Tail: *tail, Details: *details, } - responseBody, err := cli.client.ContainerLogs(context.Background(), name, options) + responseBody, err := cli.client.ContainerLogs(ctx, name, options) if err != nil { return err } diff --git a/api/client/network.go b/api/client/network.go index 6eaf21662e..5966743c18 100644 --- a/api/client/network.go +++ b/api/client/network.go @@ -104,9 +104,11 @@ func (cli *DockerCli) CmdNetworkRm(args ...string) error { return err } + ctx := context.Background() + status := 0 for _, net := range cmd.Args() { - if err := cli.client.NetworkRemove(context.Background(), net); err != nil { + if err := cli.client.NetworkRemove(ctx, net); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue @@ -239,8 +241,10 @@ func (cli *DockerCli) CmdNetworkInspect(args ...string) error { return err } + ctx := context.Background() + inspectSearcher := func(name string) (interface{}, []byte, error) { - i, err := cli.client.NetworkInspect(context.Background(), name) + i, err := cli.client.NetworkInspect(ctx, name) return i, nil, err } diff --git a/api/client/pause.go b/api/client/pause.go index ffba1c9a2b..186f785569 100644 --- a/api/client/pause.go +++ b/api/client/pause.go @@ -19,9 +19,11 @@ func (cli *DockerCli) CmdPause(args ...string) error { cmd.ParseFlags(args, true) + ctx := context.Background() + var errs []string for _, name := range cmd.Args() { - if err := cli.client.ContainerPause(context.Background(), name); err != nil { + if err := cli.client.ContainerPause(ctx, name); err != nil { errs = append(errs, err.Error()) } else { fmt.Fprintf(cli.out, "%s\n", name) diff --git a/api/client/pull.go b/api/client/pull.go index 78de9c9791..d618dce905 100644 --- a/api/client/pull.go +++ b/api/client/pull.go @@ -55,18 +55,20 @@ func (cli *DockerCli) CmdPull(args ...string) error { return err } - authConfig := cli.resolveAuthConfig(repoInfo.Index) + ctx := context.Background() + + authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") if isTrusted() && !registryRef.HasDigest() { // Check if tag is digest - return cli.trustedPull(repoInfo, registryRef, authConfig, requestPrivilege) + return cli.trustedPull(ctx, repoInfo, registryRef, authConfig, requestPrivilege) } - return cli.imagePullPrivileged(authConfig, distributionRef.String(), requestPrivilege, *allTags) + return cli.imagePullPrivileged(ctx, authConfig, distributionRef.String(), requestPrivilege, *allTags) } -func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { +func (cli *DockerCli) imagePullPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { @@ -78,7 +80,7 @@ func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, ref strin All: all, } - responseBody, err := cli.client.ImagePull(context.Background(), ref, options) + responseBody, err := cli.client.ImagePull(ctx, ref, options) if err != nil { return err } diff --git a/api/client/push.go b/api/client/push.go index 0d631002e8..903f85f766 100644 --- a/api/client/push.go +++ b/api/client/push.go @@ -33,15 +33,18 @@ func (cli *DockerCli) CmdPush(args ...string) error { if err != nil { return err } - // Resolve the Auth config relevant for this server - authConfig := cli.resolveAuthConfig(repoInfo.Index) + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") + if isTrusted() { - return cli.trustedPush(repoInfo, ref, authConfig, requestPrivilege) + return cli.trustedPush(ctx, repoInfo, ref, authConfig, requestPrivilege) } - responseBody, err := cli.imagePushPrivileged(authConfig, ref.String(), requestPrivilege) + responseBody, err := cli.imagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) if err != nil { return err } @@ -51,7 +54,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) } -func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { +func (cli *DockerCli) imagePushPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return nil, err @@ -61,5 +64,5 @@ func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, ref strin PrivilegeFunc: requestPrivilege, } - return cli.client.ImagePush(context.Background(), ref, options) + return cli.client.ImagePush(ctx, ref, options) } diff --git a/api/client/rm.go b/api/client/rm.go index 7b9c0a4723..77d70d5159 100644 --- a/api/client/rm.go +++ b/api/client/rm.go @@ -23,6 +23,8 @@ func (cli *DockerCli) CmdRm(args ...string) error { cmd.ParseFlags(args, true) + ctx := context.Background() + var errs []string for _, name := range cmd.Args() { if name == "" { @@ -30,7 +32,7 @@ func (cli *DockerCli) CmdRm(args ...string) error { } name = strings.Trim(name, "/") - if err := cli.removeContainer(name, *v, *link, *force); err != nil { + if err := cli.removeContainer(ctx, name, *v, *link, *force); err != nil { errs = append(errs, err.Error()) } else { fmt.Fprintf(cli.out, "%s\n", name) @@ -42,13 +44,13 @@ func (cli *DockerCli) CmdRm(args ...string) error { return nil } -func (cli *DockerCli) removeContainer(container string, removeVolumes, removeLinks, force bool) error { +func (cli *DockerCli) removeContainer(ctx context.Context, container string, removeVolumes, removeLinks, force bool) error { options := types.ContainerRemoveOptions{ RemoveVolumes: removeVolumes, RemoveLinks: removeLinks, Force: force, } - if err := cli.client.ContainerRemove(context.Background(), container, options); err != nil { + if err := cli.client.ContainerRemove(ctx, container, options); err != nil { return err } return nil diff --git a/api/client/rmi.go b/api/client/rmi.go index e6bb3d4fc8..65e5e13a17 100644 --- a/api/client/rmi.go +++ b/api/client/rmi.go @@ -31,6 +31,8 @@ func (cli *DockerCli) CmdRmi(args ...string) error { v.Set("noprune", "1") } + ctx := context.Background() + var errs []string for _, image := range cmd.Args() { options := types.ImageRemoveOptions{ @@ -38,7 +40,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error { PruneChildren: !*noprune, } - dels, err := cli.client.ImageRemove(context.Background(), image, options) + dels, err := cli.client.ImageRemove(ctx, image, options) if err != nil { errs = append(errs, err.Error()) } else { diff --git a/api/client/run.go b/api/client/run.go index f0db9bf8b1..6af7cd2a11 100644 --- a/api/client/run.go +++ b/api/client/run.go @@ -147,20 +147,20 @@ func (cli *DockerCli) CmdRun(args ...string) error { hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() } - createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + ctx, cancelFun := context.WithCancel(context.Background()) + + createResponse, err := cli.createContainer(ctx, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) if err != nil { cmd.ReportError(err.Error(), true) return runStartContainerErr(err) } if sigProxy { - sigc := cli.forwardAllSignals(createResponse.ID) + sigc := cli.forwardAllSignals(ctx, createResponse.ID) defer signal.StopCatch(sigc) } var ( waitDisplayID chan struct{} errCh chan error - cancelFun context.CancelFunc - ctx context.Context ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchronous to allow the client to write to stdin before having to read the ID @@ -205,7 +205,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { DetachKeys: cli.configFile.DetachKeys, } - resp, errAttach := cli.client.ContainerAttach(context.Background(), createResponse.ID, options) + resp, errAttach := cli.client.ContainerAttach(ctx, createResponse.ID, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection @@ -214,7 +214,6 @@ func (cli *DockerCli) CmdRun(args ...string) error { } defer resp.Close() - ctx, cancelFun = context.WithCancel(context.Background()) errCh = promise.Go(func() error { errHijack := cli.holdHijackedConnection(ctx, config.Tty, in, out, stderr, resp) if errHijack == nil { @@ -226,14 +225,16 @@ func (cli *DockerCli) CmdRun(args ...string) error { if *flAutoRemove { defer func() { - if err := cli.removeContainer(createResponse.ID, true, false, true); err != nil { + // Explicitly not sharing the context as it could be "Done" (by calling cancelFun) + // and thus the container would not be removed. + if err := cli.removeContainer(context.Background(), createResponse.ID, true, false, true); err != nil { fmt.Fprintf(cli.err, "%v\n", err) } }() } //start the container - if err := cli.client.ContainerStart(context.Background(), createResponse.ID); err != nil { + if err := cli.client.ContainerStart(ctx, createResponse.ID); err != nil { // If we have holdHijackedConnection, we should notify // holdHijackedConnection we are going to exit and wait // to avoid the terminal are not restored. @@ -247,7 +248,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { - if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + if err := cli.monitorTtySize(ctx, createResponse.ID, false); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } @@ -272,23 +273,23 @@ func (cli *DockerCli) CmdRun(args ...string) error { if *flAutoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container - if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + if status, err = cli.client.ContainerWait(ctx, createResponse.ID); err != nil { return runStartContainerErr(err) } - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + if _, status, err = cli.getExitCode(ctx, createResponse.ID); err != nil { return err } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { // In non-TTY mode, we can't detach, so we must wait for container exit - if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + if status, err = cli.client.ContainerWait(ctx, createResponse.ID); err != nil { return err } } else { // In TTY mode, there is a race: if the process dies too slowly, the state could // be updated after the getExitCode call and result in the wrong exit code being reported - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + if _, status, err = cli.getExitCode(ctx, createResponse.ID); err != nil { return err } } diff --git a/api/client/search.go b/api/client/search.go index 5ee6b8b652..d00d5a3d01 100644 --- a/api/client/search.go +++ b/api/client/search.go @@ -10,10 +10,12 @@ import ( "golang.org/x/net/context" Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" registrytypes "github.com/docker/engine-api/types/registry" ) @@ -21,14 +23,32 @@ import ( // // Usage: docker search [OPTIONS] TERM func (cli *DockerCli) CmdSearch(args ...string) error { + var ( + err error + + filterArgs = filters.NewArgs() + + flFilter = opts.NewListOpts(nil) + ) + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") - automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") - stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + // Deprecated since Docker 1.12 in favor of "--filter" + automated := cmd.Bool([]string{"#-automated"}, false, "Only show automated builds - DEPRECATED") + stars := cmd.Uint([]string{"s", "#-stars"}, 0, "Only displays with at least x stars - DEPRECATED") + cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) + for _, f := range flFilter.GetAll() { + if filterArgs, err = filters.ParseFlag(f, filterArgs); err != nil { + return err + } + } + name := cmd.Arg(0) v := url.Values{} v.Set("term", name) @@ -38,7 +58,9 @@ func (cli *DockerCli) CmdSearch(args ...string) error { return err } - authConfig := cli.resolveAuthConfig(indexInfo) + ctx := context.Background() + + authConfig := cli.resolveAuthConfig(ctx, indexInfo) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") encodedAuth, err := encodeAuthToBase64(authConfig) @@ -49,9 +71,10 @@ func (cli *DockerCli) CmdSearch(args ...string) error { options := types.ImageSearchOptions{ RegistryAuth: encodedAuth, PrivilegeFunc: requestPrivilege, + Filters: filterArgs, } - unorderedResults, err := cli.client.ImageSearch(context.Background(), name, options) + unorderedResults, err := cli.client.ImageSearch(ctx, name, options) if err != nil { return err } @@ -62,6 +85,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") for _, res := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { continue } diff --git a/api/client/start.go b/api/client/start.go index db0f77804c..594212671b 100644 --- a/api/client/start.go +++ b/api/client/start.go @@ -17,7 +17,7 @@ import ( "github.com/docker/engine-api/types" ) -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { +func (cli *DockerCli) forwardAllSignals(ctx context.Context, cid string) chan os.Signal { sigc := make(chan os.Signal, 128) signal.CatchAll(sigc) go func() { @@ -37,7 +37,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { continue } - if err := cli.client.ContainerKill(context.Background(), cid, sig); err != nil { + if err := cli.client.ContainerKill(ctx, cid, sig); err != nil { logrus.Debugf("Error sending signal: %s", err) } } @@ -57,6 +57,8 @@ func (cli *DockerCli) CmdStart(args ...string) error { cmd.ParseFlags(args, true) + ctx, cancelFun := context.WithCancel(context.Background()) + if *attach || *openStdin { // We're going to attach to a container. // 1. Ensure we only have one container. @@ -66,13 +68,13 @@ func (cli *DockerCli) CmdStart(args ...string) error { // 2. Attach to the container. container := cmd.Arg(0) - c, err := cli.client.ContainerInspect(context.Background(), container) + c, err := cli.client.ContainerInspect(ctx, container) if err != nil { return err } if !c.Config.Tty { - sigc := cli.forwardAllSignals(container) + sigc := cli.forwardAllSignals(ctx, container) defer signal.StopCatch(sigc) } @@ -94,7 +96,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { in = cli.in } - resp, errAttach := cli.client.ContainerAttach(context.Background(), container, options) + resp, errAttach := cli.client.ContainerAttach(ctx, container, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach return an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection @@ -102,7 +104,6 @@ func (cli *DockerCli) CmdStart(args ...string) error { return errAttach } defer resp.Close() - ctx, cancelFun := context.WithCancel(context.Background()) cErr := promise.Go(func() error { errHijack := cli.holdHijackedConnection(ctx, c.Config.Tty, in, cli.out, cli.err, resp) if errHijack == nil { @@ -112,7 +113,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { }) // 3. Start the container. - if err := cli.client.ContainerStart(context.Background(), container); err != nil { + if err := cli.client.ContainerStart(ctx, container); err != nil { cancelFun() <-cErr return err @@ -120,14 +121,14 @@ func (cli *DockerCli) CmdStart(args ...string) error { // 4. Wait for attachment to break. if c.Config.Tty && cli.isTerminalOut { - if err := cli.monitorTtySize(container, false); err != nil { + if err := cli.monitorTtySize(ctx, container, false); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } - _, status, err := getExitCode(cli, container) + _, status, err := cli.getExitCode(ctx, container) if err != nil { return err } @@ -137,16 +138,16 @@ func (cli *DockerCli) CmdStart(args ...string) error { } else { // We're not going to attach to anything. // Start as many containers as we want. - return cli.startContainersWithoutAttachments(cmd.Args()) + return cli.startContainersWithoutAttachments(ctx, cmd.Args()) } return nil } -func (cli *DockerCli) startContainersWithoutAttachments(containers []string) error { +func (cli *DockerCli) startContainersWithoutAttachments(ctx context.Context, containers []string) error { var failedContainers []string for _, container := range containers { - if err := cli.client.ContainerStart(context.Background(), container); err != nil { + if err := cli.client.ContainerStart(ctx, container); err != nil { fmt.Fprintf(cli.err, "%s\n", err) failedContainers = append(failedContainers, container) } else { diff --git a/api/client/stats.go b/api/client/stats.go index 943f189936..39b0361ea4 100644 --- a/api/client/stats.go +++ b/api/client/stats.go @@ -33,6 +33,8 @@ func (cli *DockerCli) CmdStats(args ...string) error { showAll := len(names) == 0 closeChan := make(chan error) + ctx := context.Background() + // monitorContainerEvents watches for container creation and removal (only // used when calling `docker stats` without arguments). monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { @@ -41,7 +43,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { options := types.EventsOptions{ Filters: f, } - resBody, err := cli.client.Events(context.Background(), options) + resBody, err := cli.client.Events(ctx, options) // Whether we successfully subscribed to events or not, we can now // unblock the main goroutine. close(started) @@ -71,7 +73,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { options := types.ContainerListOptions{ All: *all, } - cs, err := cli.client.ContainerList(context.Background(), options) + cs, err := cli.client.ContainerList(ctx, options) if err != nil { closeChan <- err } @@ -79,7 +81,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { s := &containerStats{Name: container.ID[:12]} if cStats.add(s) { waitFirst.Add(1) - go s.Collect(cli.client, !*noStream, waitFirst) + go s.Collect(ctx, cli.client, !*noStream, waitFirst) } } } @@ -96,7 +98,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { s := &containerStats{Name: e.ID[:12]} if cStats.add(s) { waitFirst.Add(1) - go s.Collect(cli.client, !*noStream, waitFirst) + go s.Collect(ctx, cli.client, !*noStream, waitFirst) } } }) @@ -105,7 +107,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { s := &containerStats{Name: e.ID[:12]} if cStats.add(s) { waitFirst.Add(1) - go s.Collect(cli.client, !*noStream, waitFirst) + go s.Collect(ctx, cli.client, !*noStream, waitFirst) } }) @@ -131,7 +133,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { s := &containerStats{Name: name} if cStats.add(s) { waitFirst.Add(1) - go s.Collect(cli.client, !*noStream, waitFirst) + go s.Collect(ctx, cli.client, !*noStream, waitFirst) } } diff --git a/api/client/stats_helpers.go b/api/client/stats_helpers.go index f757d9e527..21f676b808 100644 --- a/api/client/stats_helpers.go +++ b/api/client/stats_helpers.go @@ -63,7 +63,7 @@ func (s *stats) isKnownContainer(cid string) (int, bool) { return -1, false } -func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { +func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { logrus.Debugf("collecting stats for %s", s.Name) var ( getFirst bool @@ -80,7 +80,7 @@ func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFir } }() - responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats) + responseBody, err := cli.ContainerStats(ctx, s.Name, streamStats) if err != nil { s.mu.Lock() s.err = err diff --git a/api/client/stop.go b/api/client/stop.go index 23d53447dd..7f2fc38960 100644 --- a/api/client/stop.go +++ b/api/client/stop.go @@ -22,9 +22,11 @@ func (cli *DockerCli) CmdStop(args ...string) error { cmd.ParseFlags(args, true) + ctx := context.Background() + var errs []string for _, name := range cmd.Args() { - if err := cli.client.ContainerStop(context.Background(), name, *nSeconds); err != nil { + if err := cli.client.ContainerStop(ctx, name, *nSeconds); err != nil { errs = append(errs, err.Error()) } else { fmt.Fprintf(cli.out, "%s\n", name) diff --git a/api/client/trust.go b/api/client/trust.go index 952bea5144..987a050989 100644 --- a/api/client/trust.go +++ b/api/client/trust.go @@ -229,14 +229,14 @@ func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { } } -func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) { +func (cli *DockerCli) trustedReference(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return nil, err } // Resolve the Auth config relevant for this server - authConfig := cli.resolveAuthConfig(repoInfo.Index) + authConfig := cli.resolveAuthConfig(ctx, repoInfo.Index) notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") if err != nil { @@ -262,14 +262,14 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can return reference.WithDigest(ref, r.digest) } -func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error { +func (cli *DockerCli) tagTrusted(ctx context.Context, trustedRef reference.Canonical, ref reference.NamedTagged) error { fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) options := types.ImageTagOptions{ Force: true, } - return cli.client.ImageTag(context.Background(), trustedRef.String(), ref.String(), options) + return cli.client.ImageTag(ctx, trustedRef.String(), ref.String(), options) } func notaryError(repoName string, err error) error { @@ -302,7 +302,7 @@ func notaryError(repoName string, err error) error { return err } -func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { +func (cli *DockerCli) trustedPull(ctx context.Context, repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { var refs []target notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") @@ -364,7 +364,7 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr if err != nil { return err } - if err := cli.imagePullPrivileged(authConfig, ref.String(), requestPrivilege, false); err != nil { + if err := cli.imagePullPrivileged(ctx, authConfig, ref.String(), requestPrivilege, false); err != nil { return err } @@ -378,7 +378,7 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr if err != nil { return err } - if err := cli.tagTrusted(trustedRef, tagged); err != nil { + if err := cli.tagTrusted(ctx, trustedRef, tagged); err != nil { return err } } @@ -386,8 +386,8 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr return nil } -func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := cli.imagePushPrivileged(authConfig, ref.String(), requestPrivilege) +func (cli *DockerCli) trustedPush(ctx context.Context, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := cli.imagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) if err != nil { return err } diff --git a/api/client/unpause.go b/api/client/unpause.go index b8630b1f99..c53a1e1120 100644 --- a/api/client/unpause.go +++ b/api/client/unpause.go @@ -19,9 +19,11 @@ func (cli *DockerCli) CmdUnpause(args ...string) error { cmd.ParseFlags(args, true) + ctx := context.Background() + var errs []string for _, name := range cmd.Args() { - if err := cli.client.ContainerUnpause(context.Background(), name); err != nil { + if err := cli.client.ContainerUnpause(ctx, name); err != nil { errs = append(errs, err.Error()) } else { fmt.Fprintf(cli.out, "%s\n", name) diff --git a/api/client/update.go b/api/client/update.go index a2f9e53470..fede892073 100644 --- a/api/client/update.go +++ b/api/client/update.go @@ -99,10 +99,13 @@ func (cli *DockerCli) CmdUpdate(args ...string) error { RestartPolicy: restartPolicy, } + ctx := context.Background() + names := cmd.Args() var errs []string + for _, name := range names { - if err := cli.client.ContainerUpdate(context.Background(), name, updateConfig); err != nil { + if err := cli.client.ContainerUpdate(ctx, name, updateConfig); err != nil { errs = append(errs, err.Error()) } else { fmt.Fprintf(cli.out, "%s\n", name) diff --git a/api/client/utils.go b/api/client/utils.go index e9ed915821..fe057856ee 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -23,13 +23,13 @@ import ( registrytypes "github.com/docker/engine-api/types/registry" ) -func (cli *DockerCli) electAuthServer() string { +func (cli *DockerCli) electAuthServer(ctx context.Context) string { // The daemon `/info` endpoint informs us of the default registry being // used. This is essential in cross-platforms environment, where for // example a Linux client might be interacting with a Windows daemon, hence // the default registry URL might be Windows specific. serverAddress := registry.IndexServer - if info, err := cli.client.Info(context.Background()); err != nil { + if info, err := cli.client.Info(ctx); err != nil { fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) } else { serverAddress = info.IndexServerAddress @@ -58,12 +58,12 @@ func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes. } } -func (cli *DockerCli) resizeTty(id string, isExec bool) { +func (cli *DockerCli) resizeTty(ctx context.Context, id string, isExec bool) { height, width := cli.getTtySize() - cli.resizeTtyTo(id, height, width, isExec) + cli.resizeTtyTo(ctx, id, height, width, isExec) } -func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { +func (cli *DockerCli) resizeTtyTo(ctx context.Context, id string, height, width int, isExec bool) { if height == 0 && width == 0 { return } @@ -75,9 +75,9 @@ func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { var err error if isExec { - err = cli.client.ContainerExecResize(context.Background(), id, options) + err = cli.client.ContainerExecResize(ctx, id, options) } else { - err = cli.client.ContainerResize(context.Background(), id, options) + err = cli.client.ContainerResize(ctx, id, options) } if err != nil { @@ -87,8 +87,8 @@ func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { // getExitCode perform an inspect on the container. It returns // the running state and the exit code. -func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { - c, err := cli.client.ContainerInspect(context.Background(), containerID) +func (cli *DockerCli) getExitCode(ctx context.Context, containerID string) (bool, int, error) { + c, err := cli.client.ContainerInspect(ctx, containerID) if err != nil { // If we can't connect, then the daemon probably died. if err != client.ErrConnectionFailed { @@ -102,8 +102,8 @@ func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { // getExecExitCode perform an inspect on the exec command. It returns // the running state and the exit code. -func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { - resp, err := cli.client.ContainerExecInspect(context.Background(), execID) +func (cli *DockerCli) getExecExitCode(ctx context.Context, execID string) (bool, int, error) { + resp, err := cli.client.ContainerExecInspect(ctx, execID) if err != nil { // If we can't connect, then the daemon probably died. if err != client.ErrConnectionFailed { @@ -115,8 +115,8 @@ func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { return resp.Running, resp.ExitCode, nil } -func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { - cli.resizeTty(id, isExec) +func (cli *DockerCli) monitorTtySize(ctx context.Context, id string, isExec bool) error { + cli.resizeTty(ctx, id, isExec) if runtime.GOOS == "windows" { go func() { @@ -126,7 +126,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { h, w := cli.getTtySize() if prevW != w || prevH != h { - cli.resizeTty(id, isExec) + cli.resizeTty(ctx, id, isExec) } prevH = h prevW = w @@ -137,7 +137,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for range sigchan { - cli.resizeTty(id, isExec) + cli.resizeTty(ctx, id, isExec) } }() } @@ -185,10 +185,10 @@ func copyToFile(outfile string, r io.Reader) error { // resolveAuthConfig is like registry.ResolveAuthConfig, but if using the // default index, it uses the default index name for the daemon's platform, // not the client's platform. -func (cli *DockerCli) resolveAuthConfig(index *registrytypes.IndexInfo) types.AuthConfig { +func (cli *DockerCli) resolveAuthConfig(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { configKey := index.Name if index.Official { - configKey = cli.electAuthServer() + configKey = cli.electAuthServer(ctx) } a, _ := getCredentials(cli.configFile, configKey) diff --git a/api/client/volume.go b/api/client/volume.go index 37e623fbd3..9681dc3c68 100644 --- a/api/client/volume.go +++ b/api/client/volume.go @@ -110,8 +110,10 @@ func (cli *DockerCli) CmdVolumeInspect(args ...string) error { return nil } + ctx := context.Background() + inspectSearcher := func(name string) (interface{}, []byte, error) { - i, err := cli.client.VolumeInspect(context.Background(), name) + i, err := cli.client.VolumeInspect(ctx, name) return i, nil, err } @@ -161,8 +163,10 @@ func (cli *DockerCli) CmdVolumeRm(args ...string) error { var status = 0 + ctx := context.Background() + for _, name := range cmd.Args() { - if err := cli.client.VolumeRemove(context.Background(), name); err != nil { + if err := cli.client.VolumeRemove(ctx, name); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue diff --git a/api/client/wait.go b/api/client/wait.go index 609cd3be42..02a65d8660 100644 --- a/api/client/wait.go +++ b/api/client/wait.go @@ -21,9 +21,11 @@ func (cli *DockerCli) CmdWait(args ...string) error { cmd.ParseFlags(args, true) + ctx := context.Background() + var errs []string for _, name := range cmd.Args() { - status, err := cli.client.ContainerWait(context.Background(), name) + status, err := cli.client.ContainerWait(ctx, name) if err != nil { errs = append(errs, err.Error()) } else { diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 7e5a6b6436..cc854a2768 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -39,5 +39,5 @@ type importExportBackend interface { type registryBackend interface { PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - SearchRegistryForImages(ctx context.Context, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index b9168bc89d..3b8180f214 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -301,7 +301,7 @@ func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter headers[k] = v } } - query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("term"), config, headers) + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), config, headers) if err != nil { return err } diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index facf0f6310..1de7dc2465 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -266,7 +266,7 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str return err } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", b.runConfig.WorkingDir)) } // RUN some command yo diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go index c1c9425087..ac61dba943 100644 --- a/builder/dockerfile/evaluator_test.go +++ b/builder/dockerfile/evaluator_test.go @@ -3,6 +3,7 @@ package dockerfile import ( "io/ioutil" "os" + "path/filepath" "strings" "testing" @@ -16,6 +17,7 @@ import ( type dispatchTestCase struct { name, dockerfile, expectedError string + files map[string]string } func init() { @@ -34,21 +36,97 @@ func initDispatchTestCases() []dispatchTestCase { name: "ONBUILD forbidden FROM", dockerfile: "ONBUILD FROM scratch", expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, }, { name: "ONBUILD forbidden MAINTAINER", dockerfile: "ONBUILD MAINTAINER docker.io", expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, }, { name: "ARG two arguments", dockerfile: "ARG foo bar", expectedError: "ARG requires exactly one argument definition", + files: nil, }, { name: "MAINTAINER unknown flag", dockerfile: "MAINTAINER --boo joe@example.com", expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "No source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "Source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "Unknown instruction: FOO", + files: nil, }} return dispatchTestCases @@ -66,6 +144,10 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") defer cleanup() + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) if err != nil { @@ -132,3 +214,16 @@ func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { } } } + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 6cfb5374de..2f26265618 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -618,25 +618,8 @@ func (b *Builder) readDockerfile() error { } } - f, err := b.context.Open(b.options.Dockerfile) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) - } - return err - } - if f, ok := f.(*os.File); ok { - // ignoring error because Open already succeeded - fi, err := f.Stat() - if err != nil { - return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) - } - if fi.Size() == 0 { - return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) - } - } - b.dockerfile, err = parser.Parse(f) - f.Close() + err := b.parseDockerfile() + if err != nil { return err } @@ -655,6 +638,33 @@ func (b *Builder) readDockerfile() error { return nil } +func (b *Builder) parseDockerfile() error { + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + defer f.Close() + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f) + if err != nil { + return err + } + + return nil +} + // determine if build arg is part of built-in args or user // defined args in Dockerfile at any point in time. func (b *Builder) isBuildArgAllowed(arg string) bool { diff --git a/builder/dockerfile/internals_test.go b/builder/dockerfile/internals_test.go new file mode 100644 index 0000000000..8cd723f514 --- /dev/null +++ b/builder/dockerfile/internals_test.go @@ -0,0 +1,55 @@ +package dockerfile + +import ( + "strings" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/engine-api/types" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + options := &types.ImageBuildOptions{} + + b := &Builder{options: options, context: context} + + err = b.readDockerfile() + + if err == nil { + t.Fatalf("No error when executing test for empty Dockerfile") + } + + if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", "The Dockerfile (Dockerfile) cannot be empty", err.Error()) + } +} diff --git a/cli/cli.go b/cli/cli.go index 12649df6da..f6d48d6fac 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -39,12 +39,7 @@ func New(handlers ...Handler) *Cli { return cli } -// initErr is an error returned upon initialization of a handler implementing Initializer. -type initErr struct{ error } - -func (err initErr) Error() string { - return err.Error() -} +var errCommandNotFound = errors.New("command not found") func (cli *Cli) command(args ...string) (func(...string) error, error) { for _, c := range cli.handlers { @@ -54,35 +49,36 @@ func (cli *Cli) command(args ...string) (func(...string) error, error) { if cmd := c.Command(strings.Join(args, " ")); cmd != nil { if ci, ok := c.(Initializer); ok { if err := ci.Initialize(); err != nil { - return nil, initErr{err} + return nil, err } } return cmd, nil } } - return nil, errors.New("command not found") + return nil, errCommandNotFound } // Run executes the specified command. func (cli *Cli) Run(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) - switch err := err.(type) { - case nil: + if err == nil { return command(args[2:]...) - case initErr: - return err.error + } + if err != errCommandNotFound { + return err } } if len(args) > 0 { command, err := cli.command(args[0]) - switch err := err.(type) { - case nil: - return command(args[1:]...) - case initErr: - return err.error + if err != nil { + if err == errCommandNotFound { + cli.noSuchCommand(args[0]) + return nil + } + return err } - cli.noSuchCommand(args[0]) + return command(args[1:]...) } return cli.CmdHelp() } @@ -110,24 +106,25 @@ func (cli *Cli) Command(name string) func(...string) error { func (cli *Cli) CmdHelp(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) - switch err := err.(type) { - case nil: + if err == nil { command("--help") return nil - case initErr: - return err.error + } + if err != errCommandNotFound { + return err } } if len(args) > 0 { command, err := cli.command(args[0]) - switch err := err.(type) { - case nil: - command("--help") - return nil - case initErr: - return err.error + if err != nil { + if err == errCommandNotFound { + cli.noSuchCommand(args[0]) + return nil + } + return err } - cli.noSuchCommand(args[0]) + command("--help") + return nil } if cli.Usage == nil { diff --git a/cliconfig/config.go b/cliconfig/config.go index a5a1303943..9d5df0ac4b 100644 --- a/cliconfig/config.go +++ b/cliconfig/config.go @@ -16,11 +16,6 @@ const ( ConfigFileName = "config.json" configFileDir = ".docker" oldConfigfile = ".dockercfg" - - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" ) var ( diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index bf40fd81b8..3df9400a98 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -228,10 +228,11 @@ func (cli *DaemonCli) start() (err error) { if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") } - l, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) if err != nil { return err } + ls = wrapListeners(proto, ls) // If we're binding to a TCP port, make sure that a container doesn't try to use it. if proto == "tcp" { if err := allocateDaemonPort(addr); err != nil { @@ -239,7 +240,7 @@ func (cli *DaemonCli) start() (err error) { } } logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - api.Accept(protoAddrParts[1], l...) + api.Accept(protoAddrParts[1], ls...) } if err := migrateKey(); err != nil { diff --git a/cmd/dockerd/daemon_solaris.go b/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000000..a0f4908601 --- /dev/null +++ b/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,74 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir() string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go index dd6709908c..7096701fb7 100644 --- a/cmd/dockerd/daemon_unix.go +++ b/cmd/dockerd/daemon_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!solaris package main @@ -11,6 +11,7 @@ import ( "strconv" "syscall" + "github.com/docker/docker/cmd/dockerd/hack" "github.com/docker/docker/daemon" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/pkg/system" @@ -111,3 +112,17 @@ func allocateDaemonPort(addr string) error { // notifyShutdown is called after the daemon shuts down but before the process exits. func notifyShutdown(err error) { } + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + if os.Getenv("DOCKER_HTTP_HOST_COMPAT") != "" { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + } + return ls +} diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go index 9801e13a7e..9772f2b2ef 100644 --- a/cmd/dockerd/daemon_windows.go +++ b/cmd/dockerd/daemon_windows.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "net" "os" "syscall" @@ -75,3 +76,7 @@ func (cli *DaemonCli) getLibcontainerdRoot() string { func allocateDaemonPort(addr string) error { return nil } + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/cmd/dockerd/hack/malformed_host_override.go b/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000000..888d20a61a --- /dev/null +++ b/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,116 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i, bb := range b[:c] { + if bb == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if bb != '\n' { + continue + } + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initilizing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/cmd/dockerd/hack/malformed_host_override_test.go b/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000000..443da3b34c --- /dev/null +++ b/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,115 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +func TestHeaderOverrideHack(t *testing.T) { + client, srv := net.Pipe() + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + l := MalformedHostHeaderOverrideConn{client, true} + read := make([]byte, 4096) + + for _, pair := range tests { + go func(x []byte) { + srv.Write(x) + }(pair[0]) + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + l.first = true + // clean out the slice + read = read[:0] + } + srv.Close() + l.Close() +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/container/container_solaris.go b/container/container_solaris.go new file mode 100644 index 0000000000..ca02d8ea89 --- /dev/null +++ b/container/container_solaris.go @@ -0,0 +1,95 @@ +// +build solaris + +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types/container" +) + +// Container holds fields specific to the Solaris implementation. See +// CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // fields below here are platform specific. + HostnamePath string + HostsPath string + ResolvConfPath string +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { + return nil +} + +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { + return volumeMounts, nil +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + return true +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mount []Mount + return mount +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + return nil +} + +// UnmountIpcMounts unmount Ipc related mounts. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// UpdateContainer updates configuration of a container +func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error { + return nil +} + +// UnmountVolumes explicitly unmounts volumes from the container. +func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { + return nil +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() []Mount { + var mounts []Mount + return mounts +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// canMountFS determines if the file system for the container +// can be mounted locally. A no-op on non-Windows platforms +func (container *Container) canMountFS() bool { + return true +} diff --git a/container/state_solaris.go b/container/state_solaris.go new file mode 100644 index 0000000000..02802a02a4 --- /dev/null +++ b/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCode = exitStatus.ExitCode +} diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index f26ff80244..29fcf687f7 100644 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -966,6 +966,11 @@ _docker_events() { __docker_complete_containers_all return ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; event) COMPREPLY=( $( compgen -W " attach @@ -987,6 +992,7 @@ _docker_events() { pause pull push + reload rename resize restart @@ -1012,7 +1018,7 @@ _docker_events() { return ;; type) - COMPREPLY=( $( compgen -W "container image network volume" -- "${cur##*=}" ) ) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) return ;; volume) @@ -1024,7 +1030,7 @@ _docker_events() { case "$prev" in --filter|-f) - COMPREPLY=( $( compgen -S = -W "container event image label network type volume" -- "$cur" ) ) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) __docker_nospace return ;; @@ -1907,15 +1913,29 @@ _docker_save() { } _docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + case "$prev" in - --stars|-s) + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--filter --help --no-trunc" -- "$cur" ) ) ;; esac } diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 5862768ff3..2132fe8722 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -311,6 +311,54 @@ __docker_complete_ps_filters() { return ret } +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('dangling' 'label') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + __docker_network_complete_ls_filters() { [[ $PREFIX = -* ]] && return 1 integer ret=1 @@ -929,11 +977,17 @@ __docker_subcommand() { $opts_help \ "($help -a --all)"{-a,--all}"[Show all images]" \ "($help)--digests[Show digests]" \ - "($help)*"{-f=,--filter=}"[Filter values]:filter: " \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ "($help)--format[Pretty-print containers using a Go template]:format: " \ "($help)--no-trunc[Do not truncate output]" \ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ "($help -): :__docker_repositories" && ret=0 + + case $state in + (filter-options) + __docker_complete_images_filters && ret=0 + ;; + esac ;; (import) _arguments $(__docker_arguments) \ @@ -1126,10 +1180,15 @@ __docker_subcommand() { (search) _arguments $(__docker_arguments) \ $opts_help \ - "($help)--automated[Only show automated builds]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ "($help)--no-trunc[Do not truncate output]" \ - "($help -s --stars)"{-s=,--stars=}"[Only display with at least X stars]:stars:(0 10 100 1000)" \ "($help -):term: " && ret=0 + + case $state in + (filter-options) + __docker_complete_search_filters && ret=0 + ;; + esac ;; (start) _arguments $(__docker_arguments) \ diff --git a/daemon/config.go b/daemon/config.go index 2d29dcc6d2..f16026d311 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -88,7 +88,7 @@ type CommonConfig struct { Root string `json:"graph,omitempty"` SocketGroup string `json:"group,omitempty"` TrustKeyPath string `json:"-"` - CorsHeaders string `json:"api-cors-headers,omitempty"` + CorsHeaders string `json:"api-cors-header,omitempty"` EnableCors bool `json:"api-enable-cors,omitempty"` // ClusterStore is the storage backend used for the cluster information. It is used by both diff --git a/daemon/config_solaris.go b/daemon/config_solaris.go new file mode 100644 index 0000000000..69b98165d5 --- /dev/null +++ b/daemon/config_solaris.go @@ -0,0 +1,39 @@ +package daemon + +import ( + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExec = "zones" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // Fields below here are platform specific. + ExecRoot string `json:"exec-root,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags + config.attachExperimentalFlags(cmd, usageFn) +} diff --git a/daemon/config_unix.go b/daemon/config_unix.go index 441c349c8e..f7a0ea599b 100644 --- a/daemon/config_unix.go +++ b/daemon/config_unix.go @@ -41,7 +41,7 @@ type bridgeConfig struct { EnableIPv6 bool `json:"ipv6,omitempty"` EnableIPTables bool `json:"iptables,omitempty"` EnableIPForward bool `json:"ip-forward,omitempty"` - EnableIPMasq bool `json:"ip-mask,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` EnableUserlandProxy bool `json:"userland-proxy,omitempty"` DefaultIP net.IP `json:"ip,omitempty"` IP string `json:"bip,omitempty"` diff --git a/daemon/container_operations_solaris.go b/daemon/container_operations_solaris.go new file mode 100644 index 0000000000..b98faaae16 --- /dev/null +++ b/daemon/container_operations_solaris.go @@ -0,0 +1,50 @@ +// +build solaris + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + return fmt.Errorf("Solaris does not support connecting a running container to a network") +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + return 0, 0 +} + +// DisconnectFromNetwork disconnects a container from the network +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + return fmt.Errorf("Solaris does not support disconnecting a running container from a network") +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} diff --git a/daemon/create.go b/daemon/create.go index eea3a1351d..188e6b5b38 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" volumestore "github.com/docker/docker/volume/store" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" @@ -122,6 +123,9 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err diff --git a/daemon/daemon.go b/daemon/daemon.go index 524aeb8b60..aeaf8c8ed0 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -6,6 +6,7 @@ package daemon import ( + "encoding/json" "fmt" "io" "io/ioutil" @@ -15,6 +16,7 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" "sync" "syscall" @@ -23,7 +25,6 @@ import ( "github.com/Sirupsen/logrus" containerd "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/api" - "github.com/docker/docker/builder" "github.com/docker/docker/container" "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/exec" @@ -40,7 +41,6 @@ import ( "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" - "github.com/docker/docker/image/tarexport" "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/migrate/v1" @@ -64,6 +64,7 @@ import ( volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types/filters" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" nwconfig "github.com/docker/libnetwork/config" @@ -78,15 +79,6 @@ var ( errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") ) -// ErrImageDoesNotExist is error returned when no image can be found for a reference. -type ErrImageDoesNotExist struct { - RefOrID string -} - -func (e ErrImageDoesNotExist) Error() string { - return fmt.Sprintf("no such id: %s", e.RefOrID) -} - // Daemon holds information about the Docker daemon. type Daemon struct { ID string @@ -286,11 +278,6 @@ func (daemon *Daemon) restore() error { defer wg.Done() rm := c.RestartManager(false) if c.IsRunning() || c.IsPaused() { - // Fix activityCount such that graph mounts can be unmounted later - if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil { - logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err) - return - } if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil { logrus.Errorf("Failed to restore with containerd: %q", err) return @@ -808,7 +795,7 @@ func NewDaemon(config *Config, registryService *registry.Service, containerdRemo sysInfo := sysinfo.New(false) // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux/FreeBSD. - if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { + if runtime.GOOS != "windows" && runtime.GOOS != "solaris" && !sysInfo.CgroupDevicesEnabled { return nil, fmt.Errorf("Devices cgroup isn't mounted") } @@ -1006,221 +993,6 @@ func isBrokenPipe(e error) bool { return e == syscall.EPIPE } -// ExportImage exports a list of images to the given output stream. The -// exported images are archived into a tar when written to the output -// stream. All images with the given tag and all versions containing -// the same tag are exported. names is the set of tags to export, and -// outStream is the writer which the images are written to. -func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Save(names, outStream) -} - -// LookupImage looks up an image by name and returns it as an ImageInspect -// structure. -func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, fmt.Errorf("No such image: %s", name) - } - - refs := daemon.referenceStore.References(img.ID()) - repoTags := []string{} - repoDigests := []string{} - for _, ref := range refs { - switch ref.(type) { - case reference.NamedTagged: - repoTags = append(repoTags, ref.String()) - case reference.Canonical: - repoDigests = append(repoDigests, ref.String()) - } - } - - var size int64 - var layerMetadata map[string]string - layerID := img.RootFS.ChainID() - if layerID != "" { - l, err := daemon.layerStore.Get(layerID) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - size, err = l.Size() - if err != nil { - return nil, err - } - - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } - } - - comment := img.Comment - if len(comment) == 0 && len(img.History) > 0 { - comment = img.History[len(img.History)-1].Comment - } - - imageInspect := &types.ImageInspect{ - ID: img.ID().String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: img.Parent.String(), - Comment: comment, - Created: img.Created.Format(time.RFC3339Nano), - Container: img.Container, - ContainerConfig: &img.ContainerConfig, - DockerVersion: img.DockerVersion, - Author: img.Author, - Config: img.Config, - Architecture: img.Architecture, - Os: img.OS, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate - RootFS: rootFSToAPIType(img.RootFS), - } - - imageInspect.GraphDriver.Name = daemon.GraphDriverName() - - imageInspect.GraphDriver.Data = layerMetadata - - return imageInspect, nil -} - -// LoadImage uploads a set of images into the repository. This is the -// complement of ImageExport. The input stream is an uncompressed tar -// ball containing images and metadata. -func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Load(inTar, outStream, quiet) -} - -// ImageHistory returns a slice of ImageHistory structures for the specified image -// name by walking the image lineage. -func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - - history := []*types.ImageHistory{} - - layerCounter := 0 - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - for _, h := range img.History { - var layerSize int64 - - if !h.EmptyLayer { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, fmt.Errorf("too many non-empty layers in History section") - } - - rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := daemon.layerStore.Get(rootFS.ChainID()) - if err != nil { - return nil, err - } - layerSize, err = l.DiffSize() - layer.ReleaseAndLog(daemon.layerStore, l) - if err != nil { - return nil, err - } - - layerCounter++ - } - - history = append([]*types.ImageHistory{{ - ID: "", - Created: h.Created.Unix(), - CreatedBy: h.CreatedBy, - Comment: h.Comment, - Size: layerSize, - }}, history...) - } - - // Fill in image IDs and tags - histImg := img - id := img.ID() - for _, h := range history { - h.ID = id.String() - - var tags []string - for _, r := range daemon.referenceStore.References(id) { - if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, r.String()) - } - } - - h.Tags = tags - - id = histImg.Parent - if id == "" { - break - } - histImg, err = daemon.GetImage(id.String()) - if err != nil { - break - } - } - - return history, nil -} - -// GetImageID returns an image ID corresponding to the image referred to by -// refOrID. -func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { - id, ref, err := reference.ParseIDOrReference(refOrID) - if err != nil { - return "", err - } - if id != "" { - if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { - return "", ErrImageDoesNotExist{refOrID} - } - return image.ID(id), nil - } - - if id, err := daemon.referenceStore.Get(ref); err == nil { - return id, nil - } - if tagged, ok := ref.(reference.NamedTagged); ok { - if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { - for _, namedRef := range daemon.referenceStore.References(id) { - if namedRef.Name() == ref.Name() { - return id, nil - } - } - } - } - - // Search based on ID - if id, err := daemon.imageStore.Search(refOrID); err == nil { - return id, nil - } - - return "", ErrImageDoesNotExist{refOrID} -} - -// GetImage returns an image corresponding to the image referred to by refOrID. -func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { - imgID, err := daemon.GetImageID(refOrID) - if err != nil { - return nil, err - } - return daemon.imageStore.Get(imgID) -} - -// GetImageOnBuild looks up a Docker image referenced by `name`. -func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - return img, nil -} - // GraphDriverName returns the name of the graph driver used by the layer.Store func (daemon *Daemon) GraphDriverName() string { return daemon.layerStore.DriverName() @@ -1241,57 +1013,6 @@ func (daemon *Daemon) GetRemappedUIDGID() (int, int) { return uid, gid } -// GetCachedImage returns the most recent created image that is a child -// of the image with imgID, that had the same config when it was -// created. nil is returned if a child cannot be found. An error is -// returned if the parent image cannot be found. -func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Loop on the children of the given image and check the config - getMatch := func(siblings []image.ID) (*image.Image, error) { - var match *image.Image - for _, id := range siblings { - img, err := daemon.imageStore.Get(id) - if err != nil { - return nil, fmt.Errorf("unable to find image %q", id) - } - - if runconfig.Compare(&img.ContainerConfig, config) { - // check for the most up to date match - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil - } - - // In this case, this is `FROM scratch`, which isn't an actual image. - if imgID == "" { - images := daemon.imageStore.Map() - var siblings []image.ID - for id, img := range images { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - return getMatch(siblings) - } - - // find match from child images - siblings := daemon.imageStore.Children(imgID) - return getMatch(siblings) -} - -// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` -// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. -func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { - cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) - if cache == nil || err != nil { - return "", err - } - return cache.ID().String(), nil -} - // tempDir returns the default directory to use for temporary files. func tempDir(rootDir string, rootUID, rootGID int) (string, error) { var tmpDir string @@ -1427,12 +1148,85 @@ func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *ty return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx)) } +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + // SearchRegistryForImages queries the registry for images matching // term. authConfig is used to login. -func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string, +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { - return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers) + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + unfilteredResult, err := daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.ExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.ExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.ExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.ExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil } // IsShuttingDown tells whether the daemon is shutting down or not @@ -1539,6 +1333,11 @@ func (daemon *Daemon) initDiscovery(config *Config) error { func (daemon *Daemon) Reload(config *Config) error { daemon.configStore.reloadLock.Lock() defer daemon.configStore.reloadLock.Unlock() + + if err := daemon.reloadClusterDiscovery(config); err != nil { + return err + } + if config.IsValueSet("labels") { daemon.configStore.Labels = config.Labels } @@ -1572,7 +1371,28 @@ func (daemon *Daemon) Reload(config *Config) error { daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) } - return daemon.reloadClusterDiscovery(config) + // We emit daemon reload event here with updatable configurations + attributes := map[string]string{} + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) + attributes["cluster-store"] = daemon.configStore.ClusterStore + if daemon.configStore.ClusterOpts != nil { + opts, _ := json.Marshal(daemon.configStore.ClusterOpts) + attributes["cluster-store-opts"] = string(opts) + } else { + attributes["cluster-store-opts"] = "{}" + } + attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise + if daemon.configStore.Labels != nil { + labels, _ := json.Marshal(daemon.configStore.Labels) + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) + daemon.LogDaemonEventWithAttributes("reload", attributes) + + return nil } func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { diff --git a/daemon/daemon_solaris.go b/daemon/daemon_solaris.go new file mode 100644 index 0000000000..b43ec68481 --- /dev/null +++ b/daemon/daemon_solaris.go @@ -0,0 +1,159 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func setupInitLayer(initLayer string, rootUID, rootGID int) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + return nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + return warnings, nil +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { + return nil, nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index 5e342feb4f..2392165002 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -488,7 +488,9 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } - if sysInfo.IPv4ForwardingDisabled { + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") } diff --git a/daemon/daemon_unsupported.go b/daemon/daemon_unsupported.go index 987528f476..cb1acf63d6 100644 --- a/daemon/daemon_unsupported.go +++ b/daemon/daemon_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd,!windows +// +build !linux,!freebsd,!windows,!solaris package daemon diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go index 5fd5d96537..1f9429accf 100644 --- a/daemon/daemon_windows.go +++ b/daemon/daemon_windows.go @@ -470,6 +470,10 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V + if system.IsWindowsClient() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { @@ -485,6 +489,12 @@ func (daemon *Daemon) setDefaultIsolation() error { if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() { + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go index fef1bd776b..eed8222f79 100644 --- a/daemon/debugtrap_unsupported.go +++ b/daemon/debugtrap_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!darwin,!freebsd,!windows +// +build !linux,!darwin,!freebsd,!windows,!solaris package daemon diff --git a/daemon/events.go b/daemon/events.go index 8ee04aa21a..7fb8cd29ca 100644 --- a/daemon/events.go +++ b/daemon/events.go @@ -80,6 +80,20 @@ func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, actio daemon.EventsService.Log(action, events.NetworkEventType, actor) } +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + // SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { ef := daemonevents.NewFilter(filter) diff --git a/daemon/events/filter.go b/daemon/events/filter.go index 8936e371d2..b38c034c9f 100644 --- a/daemon/events/filter.go +++ b/daemon/events/filter.go @@ -20,6 +20,7 @@ func NewFilter(filter filters.Args) *Filter { func (ef *Filter) Include(ev events.Message) bool { return ef.filter.ExactMatch("event", ev.Action) && ef.filter.ExactMatch("type", ev.Type) && + ef.matchDaemon(ev) && ef.matchContainer(ev) && ef.matchVolume(ev) && ef.matchNetwork(ev) && @@ -34,6 +35,10 @@ func (ef *Filter) matchLabels(attributes map[string]string) bool { return ef.filter.MatchKVList("label", attributes) } +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + func (ef *Filter) matchContainer(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.ContainerEventType) } diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go new file mode 100644 index 0000000000..7003355d91 --- /dev/null +++ b/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 83380e1143..044351b7ee 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -70,6 +70,7 @@ type Driver struct { root string uidMaps []idtools.IDMap gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter pathCacheLock sync.Mutex pathCache map[string]string } @@ -108,6 +109,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap uidMaps: uidMaps, gidMaps: gidMaps, pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -320,6 +322,9 @@ func (a *Driver) Get(id, mountLabel string) (string, error) { m = a.getMountpoint(id) } } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data @@ -344,6 +349,9 @@ func (a *Driver) Put(id string) error { a.pathCache[id] = m } a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } err := a.unmount(m) if err != nil { diff --git a/daemon/graphdriver/counter.go b/daemon/graphdriver/counter.go index 572fc9be47..5ea604f5b6 100644 --- a/daemon/graphdriver/counter.go +++ b/daemon/graphdriver/counter.go @@ -2,31 +2,66 @@ package graphdriver import "sync" +type minfo struct { + check bool + count int +} + // RefCounter is a generic counter for use by graphdriver Get/Put calls type RefCounter struct { - counts map[string]int - mu sync.Mutex + counts map[string]*minfo + mu sync.Mutex + checker Checker } // NewRefCounter returns a new RefCounter -func NewRefCounter() *RefCounter { - return &RefCounter{counts: make(map[string]int)} +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } } // Increment increaes the ref count for the given id and returns the current count -func (c *RefCounter) Increment(id string) int { +func (c *RefCounter) Increment(path string) int { c.mu.Lock() - c.counts[id]++ - count := c.counts[id] + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ c.mu.Unlock() - return count + return m.count } // Decrement decreases the ref count for the given id and returns the current count -func (c *RefCounter) Decrement(id string) int { +func (c *RefCounter) Decrement(path string) int { c.mu.Lock() - c.counts[id]-- - count := c.counts[id] + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- c.mu.Unlock() - return count + return m.count } diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 7cd90e924a..38fa3ece70 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -47,7 +47,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap home: home, uidMaps: uidMaps, gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(), + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil @@ -160,35 +160,35 @@ func (d *Driver) Remove(id string) error { // Get mounts a device with given id into the root filesystem func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Increment(id); count > 1 { + if count := d.ctr.Increment(mp); count > 1 { return mp, nil } uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) return "", err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) return "", err } if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) return "", err } // Mount the device if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) return "", err } rootFs := path.Join(mp, "rootfs") if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err } @@ -198,7 +198,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err } @@ -209,10 +209,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put unmounts a device and removes it. func (d *Driver) Put(id string) error { - if count := d.ctr.Decrement(id); count > 0 { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { return nil } - mp := path.Join(d.home, "mnt", id) err := d.DeviceSet.UnmountDevice(id, mp) if err != nil { logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 495bac2cf5..79f6789f99 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -113,6 +113,12 @@ type FileGetCloser interface { Close() error } +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + func init() { drivers = make(map[string]InitFunc) } diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go index 2ab20b01a9..70b2ce22f1 100644 --- a/daemon/graphdriver/driver_linux.go +++ b/daemon/graphdriver/driver_linux.go @@ -5,6 +5,8 @@ package graphdriver import ( "path/filepath" "syscall" + + "github.com/docker/docker/pkg/mount" ) const ( @@ -89,6 +91,36 @@ func GetFSMagic(rootpath string) (FsMagic, error) { return FsMagic(buf.Type), nil } +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf syscall.Statfs_t diff --git a/daemon/graphdriver/driver_solaris.go b/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000000..29719ffa4f --- /dev/null +++ b/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,65 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + log "github.com/Sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go index b3f6857309..4a875608b0 100644 --- a/daemon/graphdriver/driver_unsupported.go +++ b/daemon/graphdriver/driver_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build !linux,!windows,!freebsd,!solaris package graphdriver diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go index 6c09affae3..ffd30c2950 100644 --- a/daemon/graphdriver/driver_windows.go +++ b/daemon/graphdriver/driver_windows.go @@ -4,8 +4,6 @@ var ( // Slice of drivers that should be used in order priority = []string{ "windowsfilter", - "windowsdiff", - "vfs", } ) diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go index a03a5acea6..56325ae7bc 100644 --- a/daemon/graphdriver/overlay/overlay.go +++ b/daemon/graphdriver/overlay/overlay.go @@ -9,7 +9,6 @@ import ( "os" "os/exec" "path" - "sync" "syscall" "github.com/Sirupsen/logrus" @@ -92,12 +91,10 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { - home string - pathCacheLock sync.Mutex - pathCache map[string]string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter } func init() { @@ -141,11 +138,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } d := &Driver{ - home: home, - pathCache: make(map[string]string), - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(), + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), } return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil @@ -328,110 +324,64 @@ func (d *Driver) Remove(id string) error { if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { return err } - d.pathCacheLock.Lock() - delete(d.pathCache, id) - d.pathCacheLock.Unlock() return nil } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (string, error) { +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err } - // If id has a root, just return it rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { - d.pathCacheLock.Lock() - d.pathCache[id] = rootDir - d.pathCacheLock.Unlock() return rootDir, nil } - + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { return "", err } - lowerDir := path.Join(d.dir(string(lowerID)), "root") - upperDir := path.Join(dir, "upper") - workDir := path.Join(dir, "work") - mergedDir := path.Join(dir, "merged") - - if count := d.ctr.Increment(id); count > 1 { - return mergedDir, nil - } - - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) - - // if it's mounted already, just return - mounted, err := d.mounted(mergedDir) - if err != nil { - d.ctr.Decrement(id) - return "", err - } - if mounted { - d.ctr.Decrement(id) - return mergedDir, nil - } - + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - d.ctr.Decrement(id) return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - d.ctr.Decrement(id) - syscall.Unmount(mergedDir, 0) return "", err } - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - d.ctr.Decrement(id) - syscall.Unmount(mergedDir, 0) return "", err } - - d.pathCacheLock.Lock() - d.pathCache[id] = mergedDir - d.pathCacheLock.Unlock() - return mergedDir, nil } -func (d *Driver) mounted(dir string) (bool, error) { - return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir) -} - // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - if count := d.ctr.Decrement(id); count > 0 { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - d.pathCacheLock.Lock() - mountpoint, exists := d.pathCache[id] - d.pathCacheLock.Unlock() - - if !exists { - logrus.Debugf("Put on a non-mounted device %s", id) - // but it might be still here - if d.Exists(id) { - mountpoint = path.Join(d.dir(id), "merged") - } - - d.pathCacheLock.Lock() - d.pathCache[id] = mountpoint - d.pathCacheLock.Unlock() - } - - if mounted, err := d.mounted(mountpoint); mounted || err != nil { - if err = syscall.Unmount(mountpoint, 0); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) - } - return err + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } return nil } diff --git a/daemon/graphdriver/register/register_zfs.go b/daemon/graphdriver/register/register_zfs.go index 8c31c415f4..8f34e35537 100644 --- a/daemon/graphdriver/register/register_zfs.go +++ b/daemon/graphdriver/register/register_zfs.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris package register diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go index 622babdc0e..99330d62a1 100644 --- a/daemon/graphdriver/windows/windows.go +++ b/daemon/graphdriver/windows/windows.go @@ -15,6 +15,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "syscall" "time" "unsafe" @@ -26,7 +27,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/longpath" @@ -35,28 +35,33 @@ import ( "github.com/vbatts/tar-split/tar/storage" ) +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + // init registers the windows graph drivers to the register. func init() { graphdriver.Register("windowsfilter", InitFilter) - graphdriver.Register("windowsdiff", InitDiff) reexec.Register("docker-windows-write-layer", writeLayer) } -const ( - // diffDriver is an hcsshim driver type - diffDriver = iota - // filterDriver is an hcsshim driver type - filterDriver -) +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} // Driver represents a windows graph driver. type Driver struct { // info stores the shim driver information info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string } -var _ graphdriver.DiffGetterDriver = &Driver{} - func isTP5OrOlder() bool { return system.GetOSVersion().Build <= 14300 } @@ -69,32 +74,15 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) HomeDir: home, Flavour: filterDriver, }, - } - return d, nil -} - -// InitDiff returns a new Windows differencing disk driver. -func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitDiff at %s", home) - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: diffDriver, - }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), } return d, nil } // String returns the string representation of a driver. func (d *Driver) String() string { - switch d.info.Flavour { - case diffDriver: - return "windowsdiff" - case filterDriver: - return "windowsfilter" - default: - return "Unknown driver flavour" - } + return "Windows filter storage driver" } // Status returns the status of the driver. @@ -238,17 +226,23 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err != nil { return "", err } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(rID) if err != nil { + d.ctr.Decrement(rID) return "", err } if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) return "", err } if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } @@ -257,11 +251,15 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) if err != nil { + d.ctr.Decrement(rID) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } return "", err } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() // If the layer has a mount path, use that. Otherwise, use the // folder path. @@ -282,6 +280,12 @@ func (d *Driver) Put(id string) error { if err != nil { return err } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + delete(d.cache, rID) + d.cacheMu.Unlock() if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return err @@ -390,20 +394,6 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // new layer in bytes. // The layer should not be mounted when calling this function func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - if d.info.Flavour == diffDriver { - start := time.Now().UTC() - logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer") - destination := d.dir(id) - destination = filepath.Dir(destination) - size, err := chrootarchive.ApplyUncompressedLayer(destination, diff, nil) - if err != nil { - return 0, err - } - logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return size, nil - } - var layerChain []string if parent != "" { rPId, err := d.resolveID(parent) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index ffde8a545f..b06ccbe476 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux freebsd solaris package zfs @@ -105,7 +105,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri filesystemsCache: filesystemsCache, uidMaps: uidMaps, gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(), + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } @@ -307,7 +307,7 @@ func (d *Driver) Remove(id string) error { // Get returns the mountpoint for the given id after creating the target directories if necessary. func (d *Driver) Get(id, mountLabel string) (string, error) { mountpoint := d.mountPath(id) - if count := d.ctr.Increment(id); count > 1 { + if count := d.ctr.Increment(mountpoint); count > 1 { return mountpoint, nil } @@ -317,17 +317,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mountpoint) return "", err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mountpoint) return "", err } if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { - d.ctr.Decrement(id) + d.ctr.Decrement(mountpoint) return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) } @@ -335,7 +335,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // permissions instead of the remapped root uid:gid (if user namespaces are enabled): if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { mount.Unmount(mountpoint) - d.ctr.Decrement(id) + d.ctr.Decrement(mountpoint) return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) } @@ -344,10 +344,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put removes the existing mountpoint for the given id if it exists. func (d *Driver) Put(id string) error { - if count := d.ctr.Decrement(id); count > 0 { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - mountpoint := d.mountPath(id) mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) if err != nil || !mounted { return err diff --git a/daemon/graphdriver/zfs/zfs_solaris.go b/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000000..0bf6c3d071 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go index 643b169bc5..ce8daadaf6 100644 --- a/daemon/graphdriver/zfs/zfs_unsupported.go +++ b/daemon/graphdriver/zfs/zfs_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd +// +build !linux,!freebsd,!solaris package zfs diff --git a/daemon/image.go b/daemon/image.go new file mode 100644 index 0000000000..9a3fa1aeaa --- /dev/null +++ b/daemon/image.go @@ -0,0 +1,124 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.ID(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return id, nil + } + if tagged, ok := ref.(reference.NamedTagged); ok { + if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { + for _, namedRef := range daemon.referenceStore.References(id) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} + +// GetCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` +// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. +func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { + cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) + if cache == nil || err != nil { + return "", err + } + return cache.ID().String(), nil +} diff --git a/daemon/image_exporter.go b/daemon/image_exporter.go new file mode 100644 index 0000000000..95d1d3dcdb --- /dev/null +++ b/daemon/image_exporter.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/image/tarexport" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/daemon/image_history.go b/daemon/image_history.go new file mode 100644 index 0000000000..05140d3685 --- /dev/null +++ b/daemon/image_history.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + + return history, nil +} diff --git a/daemon/image_inspect.go b/daemon/image_inspect.go new file mode 100644 index 0000000000..5b0022688e --- /dev/null +++ b/daemon/image_inspect.go @@ -0,0 +1,81 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go new file mode 100644 index 0000000000..2e49bef3a3 --- /dev/null +++ b/daemon/inspect_solaris.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/engine-api/types" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.containerInspectCurrent(name, false) +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/daemon/inspect_unix.go b/daemon/inspect_unix.go index 6033c02dd7..9d72d145df 100644 --- a/daemon/inspect_unix.go +++ b/daemon/inspect_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!solaris package daemon diff --git a/daemon/list_unix.go b/daemon/list_unix.go index 8dccbe4e89..91c9caccf4 100644 --- a/daemon/list_unix.go +++ b/daemon/list_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux freebsd solaris package daemon diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go new file mode 100644 index 0000000000..5ccfada76a --- /dev/null +++ b/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/daemon/oci_solaris.go b/daemon/oci_solaris.go new file mode 100644 index 0000000000..05eca21169 --- /dev/null +++ b/daemon/oci_solaris.go @@ -0,0 +1,12 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/oci" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { + s := oci.DefaultSpec() + return (*libcontainerd.Spec)(&s), nil +} diff --git a/daemon/seccomp_linux.go b/daemon/seccomp_linux.go index 659a15decd..34ffcb5975 100644 --- a/daemon/seccomp_linux.go +++ b/daemon/seccomp_linux.go @@ -35,7 +35,7 @@ func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { return err } } else { - profile, err = seccomp.GetDefaultProfile() + profile, err = seccomp.GetDefaultProfile(rs) if err != nil { return err } diff --git a/daemon/stats_collector_solaris.go b/daemon/stats_collector_solaris.go new file mode 100644 index 0000000000..9cf9f0a94e --- /dev/null +++ b/daemon/stats_collector_solaris.go @@ -0,0 +1,34 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "time" +) + +// newStatsCollector returns a new statsCollector for collection stats +// for a registered container at the specified interval. The collector allows +// non-running containers to be added and will start processing stats when +// they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + return &statsCollector{} +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + return nil +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/daemon/stats_collector_unix.go b/daemon/stats_collector_unix.go index 1f016322df..f66dc2c3c7 100644 --- a/daemon/stats_collector_unix.go +++ b/daemon/stats_collector_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!solaris package daemon diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go new file mode 100644 index 0000000000..848adae9d2 --- /dev/null +++ b/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/libcontainerd" + "github.com/docker/engine-api/types/container" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go index 5a38e3f038..330882f24f 100644 --- a/distribution/xfer/download_test.go +++ b/distribution/xfer/download_test.go @@ -121,10 +121,6 @@ func (ls *mockLayerStore) GetMountID(string) (string, error) { return "", errors.New("not implemented") } -func (ls *mockLayerStore) ReinitRWLayer(layer.RWLayer) error { - return errors.New("not implemented") -} - func (ls *mockLayerStore) Cleanup() error { return nil } diff --git a/docs/Makefile b/docs/Makefile index 70aa7b171c..2e23c59307 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -23,9 +23,8 @@ HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER HUGO_BIND_IP=0.0.0.0 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) - +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE diff --git a/docs/breaking_changes.md b/docs/breaking_changes.md index 6d34b9f802..ac3a863057 100644 --- a/docs/breaking_changes.md +++ b/docs/breaking_changes.md @@ -22,6 +22,13 @@ Unfortunately, Docker is a fast moving project, and newly introduced features may sometime introduce breaking changes and/or incompatibilities. This page documents these by Engine version. +# Engine 1.12 + +Docker clients <= 1.9.2 used an invalid Host header when making request to the +daemon. Docker 1.12 is built using golang 1.6 which is now checking the validity +of the Host header and as such clients <= 1.9.2 can't talk anymore to the daemon. +[An environment variable was added to overcome this issue.](reference/commandline/dockerd.md#miscellaneous-options) + # Engine 1.10 There were two breaking changes in the 1.10 release. diff --git a/docs/deprecated.md b/docs/deprecated.md index df87118478..d80f24e729 100644 --- a/docs/deprecated.md +++ b/docs/deprecated.md @@ -58,6 +58,15 @@ defining it at container creation (`POST /containers/create`). The `docker ps --before` and `docker ps --since` options are deprecated. Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. +### Docker search 'automated' and 'stars' options + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Removed In Release: v1.14** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + ### Command line short variant options **Deprecated In Release: v1.9** diff --git a/docs/reference/api/docker_remote_api.md b/docs/reference/api/docker_remote_api.md index 3e6f197451..626485effa 100644 --- a/docs/reference/api/docker_remote_api.md +++ b/docs/reference/api/docker_remote_api.md @@ -118,6 +118,9 @@ This section lists each version from latest to oldest. Each listing includes a * `POST /containers/create` now takes `MaximumIOps` and `MaximumIOBps` fields. Windows daemon only. * `POST /containers/create` now returns a HTTP 400 "bad parameter" message if no command is specified (instead of a HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. ### v1.23 API changes diff --git a/docs/reference/api/docker_remote_api_v1.24.md b/docs/reference/api/docker_remote_api_v1.24.md index aa47ef7f2a..b7d67d0f29 100644 --- a/docs/reference/api/docker_remote_api_v1.24.md +++ b/docs/reference/api/docker_remote_api_v1.24.md @@ -2133,6 +2133,10 @@ Search for an image on [Docker Hub](https://hub.docker.com). Query Parameters: - **term** – term to search +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` Status Codes: @@ -2416,6 +2420,10 @@ Docker networks report the following events: create, connect, disconnect, destroy +Docker daemon report the following event: + + reload + **Example request**: GET /events?since=1374067924 @@ -2585,9 +2593,10 @@ Query Parameters: - `event=`; -- event to filter - `image=`; -- image to filter - `label=`; -- image and container label to filter - - `type=`; -- either `container` or `image` or `volume` or `network` + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` - `volume=`; -- volume to filter - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter Status Codes: diff --git a/docs/reference/builder.md b/docs/reference/builder.md index 49e33cc1c9..6ed7fe661c 100644 --- a/docs/reference/builder.md +++ b/docs/reference/builder.md @@ -362,6 +362,15 @@ RUN /bin/bash -c 'source $HOME/.bashrc ; echo $HOME' > `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute > a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path seperator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` The cache for `RUN` instructions isn't invalidated automatically during the next build. The cache for an instruction like diff --git a/docs/reference/commandline/build.md b/docs/reference/commandline/build.md index eb1bb426b7..ea820b5d96 100644 --- a/docs/reference/commandline/build.md +++ b/docs/reference/commandline/build.md @@ -225,7 +225,8 @@ uploaded context. The builder reference contains detailed information on $ docker build -t vieux/apache:2.0 . This will build like the previous example, but it will then tag the resulting -image. The repository name will be `vieux/apache` and the tag will be `2.0` +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). You can apply multiple tags to an image. For example, you can apply the `latest` tag to a newly built image and add another tag that references a specific @@ -298,6 +299,9 @@ accessed like regular environment variables in the `RUN` instruction of the Dockerfile. Also, these values don't persist in the intermediate or final images like `ENV` values do. +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + For detailed information on using `ARG` and `ENV` instructions, see the [Dockerfile reference](../builder.md). diff --git a/docs/reference/commandline/commit.md b/docs/reference/commandline/commit.md index df64e957ac..ad25f64c84 100644 --- a/docs/reference/commandline/commit.md +++ b/docs/reference/commandline/commit.md @@ -24,6 +24,7 @@ It can be useful to commit a container's file changes or settings into a new image. This allows you debug a container by running an interactive shell, or to export a working dataset to another server. Generally, it is better to use Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). The commit operation will not include any data contained in volumes mounted inside the container. diff --git a/docs/reference/commandline/cp.md b/docs/reference/commandline/cp.md index 841aeb36e0..8afabc95bc 100644 --- a/docs/reference/commandline/cp.md +++ b/docs/reference/commandline/cp.md @@ -81,7 +81,17 @@ you must be explicit with a relative or absolute path, for example: `/path/to/file:name.txt` or `./file:name.txt` It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, and mounts created by the user in the container. +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. For example (consider `SRC_PATH` and `DEST_PATH` +are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. The command extracts the content of the tar to the `DEST_PATH` in container's diff --git a/docs/reference/commandline/create.md b/docs/reference/commandline/create.md index 3e4024374c..03d3779237 100644 --- a/docs/reference/commandline/create.md +++ b/docs/reference/commandline/create.md @@ -164,7 +164,8 @@ Linux namespaces. On Microsoft Windows, you can specify these values: | Value | Description | |-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | | `process` | Namespace isolation only. | | `hyperv` | Hyper-V hypervisor partition-based isolation. | diff --git a/docs/reference/commandline/dockerd.md b/docs/reference/commandline/dockerd.md index c3caa10fcd..2b90df9277 100644 --- a/docs/reference/commandline/dockerd.md +++ b/docs/reference/commandline/dockerd.md @@ -527,8 +527,9 @@ can specify default container isolation technology with this, for example: $ dockerd --exec-opt isolation=hyperv -Will make `hyperv` the default isolation technology on Windows, without specifying -isolation value on daemon start, Windows isolation technology will default to `process`. +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. ## Daemon DNS options @@ -849,6 +850,19 @@ set like this: export DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 +Docker clients <= 1.9.2 used an invalid Host header when making request to the +daemon. Docker 1.12 is built using golang 1.6 which is now checking the validity +of the Host header and as such clients <= 1.9.2 can't talk anymore to the daemon. +Docker supports overcoming this issue via a Docker daemon +environment variable. In case you are seeing this error when contacting the +daemon: + + Error response from daemon: 400 Bad Request: malformed Host header + +The `DOCKER_HTTP_HOST_COMPAT` can be set like this: + + DOCKER_HTTP_HOST_COMPAT=1 /usr/local/bin/dockerd ... + ## Default cgroup parent @@ -925,7 +939,7 @@ This is a full example of the allowed configuration options in the file: "tlscacert": "", "tlscert": "", "tlskey": "", - "api-cors-headers": "", + "api-cors-header": "", "selinux-enabled": false, "userns-remap": "", "group": "", @@ -934,7 +948,7 @@ This is a full example of the allowed configuration options in the file: "ipv6": false, "iptables": false, "ip-forward": false, - "ip-mask": false, + "ip-masq": false, "userland-proxy": false, "ip": "0.0.0.0", "bridge": "", diff --git a/docs/reference/commandline/events.md b/docs/reference/commandline/events.md index 0abfdf4377..a958a3a1ed 100644 --- a/docs/reference/commandline/events.md +++ b/docs/reference/commandline/events.md @@ -35,6 +35,10 @@ Docker networks report the following events: create, connect, disconnect, destroy +Docker daemon report the following events: + + reload + The `--since` and `--until` parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s time. If you do not provide the `--since` option, @@ -68,9 +72,10 @@ The currently supported filters are: * event (`event=`) * image (`image=`) * label (`label=` or `label==`) -* type (`type=`) +* type (`type=`) * volume (`volume=`) * network (`network=`) +* daemon (`daemon=`) ## Examples diff --git a/docs/reference/commandline/push.md b/docs/reference/commandline/push.md index 81091b1430..1e617865df 100644 --- a/docs/reference/commandline/push.md +++ b/docs/reference/commandline/push.md @@ -19,6 +19,7 @@ parent = "smn_cli" Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) registry or to a self-hosted one. +[Read more about valid image names and tags](tag.md). Killing the `docker push` process, for example by pressing `CTRL-c` while it is running in a terminal, will terminate the push operation. diff --git a/docs/reference/commandline/run.md b/docs/reference/commandline/run.md index 1e4f089931..6b66c394a8 100644 --- a/docs/reference/commandline/run.md +++ b/docs/reference/commandline/run.md @@ -618,14 +618,16 @@ On Microsoft Windows, can take any of these values: | `process` | Namespace isolation only. | | `hyperv` | Hyper-V hypervisor partition-based isolation. | -In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: - +On Windows, the default isolation for client is `hyperv`, and for server is +`process`. Therefore when running on Windows server without a `daemon` option +set, these two commands are equivalent: ``` $ docker run -d --isolation default busybox top $ docker run -d --isolation process busybox top ``` -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, +if running on Windows server, any of these commands also result in `hyperv` isolation: ``` $ docker run -d --isolation default busybox top diff --git a/docs/reference/commandline/search.md b/docs/reference/commandline/search.md index 893895e214..8bca98f0c9 100644 --- a/docs/reference/commandline/search.md +++ b/docs/reference/commandline/search.md @@ -14,10 +14,12 @@ parent = "smn_cli" Search the Docker Hub for images - --automated Only show automated builds + --filter=[] Filter output based on these conditions: + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars --help Print usage --no-trunc Don't truncate output - -s, --stars=0 Only displays with at least x stars Search [Docker Hub](https://hub.docker.com) for images @@ -61,29 +63,6 @@ This example displays images with a name containing 'busybox': scottabernethy/busybox 0 [OK] marclop/busybox-solr -### Search images by name and number of stars (-s, --stars) - -This example displays images with a name containing 'busybox' and at -least 3 stars: - - $ docker search --stars=3 busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - busybox Busybox base image. 325 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - - -### Search automated images (--automated) - -This example displays images with a name containing 'busybox', at -least 3 stars and are automated builds: - - $ docker search --stars=3 --automated busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - - ### Display non-truncated description (--no-trunc) This example displays images with a name containing 'busybox', @@ -95,3 +74,48 @@ at least 3 stars and the description isn't truncated in the output: progrium/busybox 50 [OK] radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + + $ docker search --filter stars=3 busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 325 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + + +### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + + $ docker search --filter is-automated busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + +### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + + $ docker search --filter "is-automated=true" --filter "stars=3" busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + + diff --git a/docs/reference/commandline/tag.md b/docs/reference/commandline/tag.md index cd104e8c6a..e8d802a834 100644 --- a/docs/reference/commandline/tag.md +++ b/docs/reference/commandline/tag.md @@ -10,11 +10,57 @@ parent = "smn_cli" # tag - Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] + Usage: docker tag [OPTIONS] NAME[:TAG] NAME[:TAG] Tag an image into a repository --help Print usage +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase characters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name may contain lowercase and uppercase characters, digits, underscores, +periods and dashes. A tag name may not start with a period or a dash and may +contain a maximum of 128 characters. + You can group your images together using names and tags, and then upload them to [*Share Images via Repositories*](../../userguide/containers/dockerrepos.md#contributing-to-docker-hub). + +# Examples + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 diff --git a/docs/reference/run.md b/docs/reference/run.md index 190b9b7601..edf1f023e5 100644 --- a/docs/reference/run.md +++ b/docs/reference/run.md @@ -630,15 +630,12 @@ with the same logic -- if the original volume was specified with a name it will You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. For example, you can specify the MCS/MLS level, a -requirement for MLS systems. Specifying the level in the following command +the `--security-opt` flag. Specifying the level in the following command allows you to share the same content between containers. $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash -An MLS example might be: - - $ docker run --security-opt label=level:TopSecret -it rhel7 bash +> **Note**: Automatic translation of MLS labels is not currently supported. To disable the security labeling for this container versus running with the `--permissive` flag, use the following command: @@ -1089,14 +1086,6 @@ one can use this flag: --privileged=false: Give extended privileges to this container --device=[]: Allows you to run devices inside the container without the --privileged flag. -> **Note:** -> With Docker 1.10 and greater, the default seccomp profile will also block -> syscalls, regardless of `--cap-add` passed to the container. We recommend in -> these cases to create your own custom seccomp profile based off our -> [default](https://github.com/docker/docker/blob/master/profiles/seccomp/default.json). -> Or if you don't want to run with the default seccomp profile, you can pass -> `--security-opt=seccomp=unconfined` on run. - By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a @@ -1214,6 +1203,11 @@ To mount a FUSE based filesystem, you need to combine both `--cap-add` and -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore .... +The default seccomp profile will adjust to the selected capabilities, in order to allow +use of facilities allowed by the capabilities, so you should not have to adjust this, +since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary +to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding +capabilities. ## Logging drivers (--log-driver) @@ -1451,7 +1445,7 @@ The `host-src` can either be an absolute path or a `name` value. If you supply an absolute path for the `host-dir`, Docker bind-mounts to the path you specify. If you supply a `name`, Docker creates a named volume by that `name`. -A `name` value must start with start with an alphanumeric character, +A `name` value must start with an alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). An absolute path starts with a `/` (forward slash). diff --git a/docs/userguide/networking/default_network/container-communication.md b/docs/userguide/networking/default_network/container-communication.md index 6b93681ab6..1446260cb3 100644 --- a/docs/userguide/networking/default_network/container-communication.md +++ b/docs/userguide/networking/default_network/container-communication.md @@ -37,6 +37,9 @@ or to turn it on manually: net.ipv4.conf.all.forwarding = 1 ``` +> **Note**: this setting does not affect containers that use the host +> network stack (`--net=host`). + Many using Docker will want `ip_forward` to be on, to at least make communication _possible_ between containers and the wider world. May also be needed for inter-container communication if you are in a multiple bridge setup. diff --git a/docs/userguide/networking/dockernetworks.md b/docs/userguide/networking/dockernetworks.md index 465470e737..22b8b346a3 100644 --- a/docs/userguide/networking/dockernetworks.md +++ b/docs/userguide/networking/dockernetworks.md @@ -57,7 +57,7 @@ docker0 Link encap:Ethernet HWaddr 02:42:47:bc:3a:eb RX bytes:1100 (1.1 KB) TX bytes:648 (648.0 B) ``` -The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at it's stack you see this: +The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at its stack you see this: ``` $ docker attach nonenetcontainer diff --git a/hack/make.sh b/hack/make.sh index e103a1aad8..3624de0e16 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -93,7 +93,7 @@ if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; the git status --porcelain --untracked-files=no echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" fi - ! BUILDTIME=$(date --rfc-3339 ns | sed -e 's/ /T/') &> /dev/null + ! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null if [ -z $BUILDTIME ]; then # If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI BUILDTIME=$(date -u) @@ -113,6 +113,12 @@ if [ "$AUTO_GOPATH" ]; then mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" export GOPATH="${PWD}/.gopath:${PWD}/vendor" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GOPATH="${GOPATH}:/usr/lib/gocode/1.5" + fi fi if [ ! "$GOPATH" ]; then diff --git a/hack/vendor.sh b/hack/vendor.sh index b041665556..8e5ceeb7f9 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -1,10 +1,46 @@ #!/usr/bin/env bash set -e +# this script is used to update vendored dependencies +# +# Usage: +# vendor.sh revendor all dependencies +# vendor.sh github.com/docker/engine-api revendor only the engine-api dependency. +# vendor.sh github.com/docker/engine-api v0.3.3 vendor only engine-api at the specified tag/commit. +# vendor.sh git github.com/docker/engine-api v0.3.3 is the same but specifies the VCS for cases where the VCS is something else than git +# vendor.sh git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git vendor only golang.org/x/sys downloading from the specified URL + cd "$(dirname "$BASH_SOURCE")/.." -rm -rf vendor/ source 'hack/.vendor-helpers.sh' +case $# in +0) + rm -rf vendor/ + ;; +# If user passed arguments to the script +1) + eval "$(grep -E "^clone [^ ]+ $1" "$0")" + clean + exit 0 + ;; +2) + rm -rf "vendor/src/$1" + clone git "$1" "$2" + clean + exit 0 + ;; +[34]) + rm -rf "vendor/src/$2" + clone "$@" + clean + exit 0 + ;; +*) + >&2 echo "error: unexpected parameters" + exit 1 + ;; +esac + # the following lines are in sorted order, FYI clone git github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 clone git github.com/Microsoft/hcsshim v0.2.2 diff --git a/image/spec/v1.1.md b/image/spec/v1.1.md new file mode 100644 index 0000000000..0b7d5395f6 --- /dev/null +++ b/image/spec/v1.1.md @@ -0,0 +1,639 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addresable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z_0-9]. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must follow comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. There is + also a Windows-specific value layers+base that allows + a base layer to be specified in a field of rootfs + called base_layer. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatiblity. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/integration-cli/daemon.go b/integration-cli/daemon.go index 35ca0b081a..ca85888ed4 100644 --- a/integration-cli/daemon.go +++ b/integration-cli/daemon.go @@ -486,3 +486,8 @@ func (d *Daemon) findContainerIP(id string) string { } return strings.Trim(out, " \r\n'") } + +func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) + return runCommandWithOutput(buildCmd) +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index f48366c19f..2c81e18d42 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -862,138 +862,6 @@ RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); } } -func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) { - name := "testaddmultiplefilestofile" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD file1.txt file2.txt test - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) { - name := "testjsonaddmultiplefilestofile" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD ["file1.txt", "file2.txt", "test"] - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) { - name := "testaddmultiplefilestofilewild" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD file*.txt test - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) { - name := "testjsonaddmultiplefilestofilewild" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD ["file*.txt", "test"] - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) { - name := "testcopymultiplefilestofile" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - COPY file1.txt file2.txt test - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) { - name := "testjsoncopymultiplefilestofile" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - COPY ["file1.txt", "file2.txt", "test"] - `, - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) // Not currently passing on Windows name := "testaddfilewithwhitespace" @@ -1066,48 +934,6 @@ RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, } } -func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) { - name := "testaddmultiplefilestofilewithwhitespace" - ctx, err := fakeContext(`FROM busybox - ADD [ "test file1", "test file2", "test" ] - `, - map[string]string{ - "test file1": "test1", - "test file2": "test2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - -func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) { - name := "testcopymultiplefilestofilewithwhitespace" - ctx, err := fakeContext(`FROM busybox - COPY [ "test file1", "test file2", "test" ] - `, - map[string]string{ - "test file1": "test1", - "test file2": "test2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" - if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) - } - -} - func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testcopywildcard" @@ -1159,26 +985,6 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { } -func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) { - name := "testcopywildcardnofind" - ctx, err := fakeContext(`FROM busybox - COPY file*.txt /tmp/ - `, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err == nil { - c.Fatal("should have failed to find a file") - } - if !strings.Contains(err.Error(), "No source files were specified") { - c.Fatalf("Wrong error %v, must be about no source files", err) - } - -} - func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { name := "testcopywildcardinname" ctx, err := fakeContext(`FROM busybox @@ -1580,17 +1386,6 @@ COPY . /`, } } -func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) { - name := "testcopydisallowremote" - - _, out, err := buildImageWithOut(name, `FROM `+minimalBaseImage()+` -COPY https://index.docker.io/robots.txt /`, - true) - if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { - c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) - } -} - func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { testRequires(c, DaemonIsLinux) // Not currently working on Windows @@ -3289,18 +3084,6 @@ func (s *DockerSuite) TestBuildFails(c *check.C) { } } -func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) { - name := "testbuildfails" - _, err := buildImage(name, ``, true) - if err != nil { - if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { - c.Fatalf("Wrong error %v, must be about empty Dockerfile", err) - } - } else { - c.Fatal("Error must not be nil") - } -} - func (s *DockerSuite) TestBuildOnBuild(c *check.C) { name := "testbuildonbuild" _, err := buildImage(name, @@ -3319,21 +3102,6 @@ func (s *DockerSuite) TestBuildOnBuild(c *check.C) { } } -func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) { - name := "testbuildonbuildforbiddenchained" - _, err := buildImage(name, - `FROM busybox - ONBUILD ONBUILD RUN touch foobar`, - true) - if err != nil { - if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { - c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) - } - } else { - c.Fatal("Error must not be nil") - } -} - // gh #2446 func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { testRequires(c, DaemonIsLinux) @@ -4564,16 +4332,6 @@ func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { } -func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) { - name := "testbuildignoreinvalidinstruction" - - out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) - if err == nil { - c.Fatalf("Should have failed: %s", out) - } - -} - func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { if _, err := buildImage("parent", ` diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index c3fa3edd01..f4a352ec06 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -1616,35 +1616,6 @@ func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) } -// os.Kill should kill daemon ungracefully, leaving behind container mounts. -// A subsequent daemon restart shoud clean up said mounts. -func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonKill(c *check.C) { - testRequires(c, NotExperimentalDaemon) - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) - mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - - // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) - - // restart daemon. - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - // Now, container mounts should be gone. - mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) -} - func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { if err := s.d.StartWithBusybox(); err != nil { t.Fatal(err) @@ -2349,3 +2320,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + err := s.d.Start("-b=none", "--iptables=false") + c.Assert(err, check.IsNil) + s.d.c.Logf("dockerBinary %s", dockerBinary) + out, code, err := s.d.buildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go index 8ba67f1f67..49c7533a4b 100644 --- a/integration-cli/docker_cli_events_unix_test.go +++ b/integration-cli/docker_cli_events_unix_test.go @@ -9,6 +9,7 @@ import ( "os" "os/exec" "strings" + "syscall" "time" "unicode" @@ -366,3 +367,99 @@ func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { c.Assert(events[0], checker.Contains, "test-event-network-local") c.Assert(events[0], checker.Contains, "type=bridge") } + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, labels=[\"bar=foo\"], max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go index e178062909..4872c2b909 100644 --- a/integration-cli/docker_cli_run_unix_test.go +++ b/integration-cli/docker_cli_run_unix_test.go @@ -948,10 +948,10 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) var group sync.WaitGroup - group.Add(4) - errChan := make(chan error, 4) + group.Add(11) + errChan := make(chan error, 11) go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") if err == nil || !strings.Contains(out, "Operation not permitted") { errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out) } @@ -959,13 +959,69 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) { }() go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello") + out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") if err == nil || !strings.Contains(out, "Operation not permitted") { errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out) } group.Done() }() + go func() { + out, _, err := dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + errChan <- fmt.Errorf("expected No such file or directory, got: %s", out) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + errChan <- fmt.Errorf("expected No such file or directory, got: %s", out) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + errChan <- fmt.Errorf("expected No such file or directory, got: %s", out) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + errChan <- fmt.Errorf("expected Operation not permitted, got: %s", out) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + errChan <- fmt.Errorf("expected hello1, got: %s, %v", out, err) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + errChan <- fmt.Errorf("expected hello2, got: %s, %v", out, err) + } + group.Done() + }() + + go func() { + out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + errChan <- fmt.Errorf("expected hello3, got: %s, %v", out, err) + } + group.Done() + }() + go func() { out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") if err == nil || !strings.Contains(out, "No such file or directory") { @@ -975,9 +1031,9 @@ func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) { }() go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello") - if err != nil || !strings.Contains(out, "hello") { - errChan <- fmt.Errorf("expected hello, got: %s, %v", out, err) + out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + errChan <- fmt.Errorf("expected hello4, got: %s, %v", out, err) } group.Done() }() diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go index dfab81044a..a93d657265 100644 --- a/integration-cli/docker_cli_search_test.go +++ b/integration-cli/docker_cli_search_test.go @@ -16,34 +16,78 @@ func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { } func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { - out, _, err := dockerCmdWithError("search", "--stars=a", "busybox") + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning")) + // -s --stars deprecated since Docker 1.13 out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning")) } func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { - testRequires(c, Network) + testRequires(c, Network, DaemonIsLinux) out, _ := dockerCmd(c, "search", "--help") c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") outSearchCmd, _ := dockerCmd(c, "search", "busybox") outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) - outSearchCmdautomated, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") for i := range outSearchCmdautomatedSlice { - c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", out)) + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) } - outSearchCmdStars, _ := dockerCmd(c, "search", "-s=2", "busybox") + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") } diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 7e30860d92..c2d3fcb153 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -930,7 +930,15 @@ func getContainerState(c *check.C, id string) (int, bool, error) { } func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { - args := []string{"build", "-t", name} + return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) +} + +func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) if !useCache { args = append(args, "--no-cache") } diff --git a/layer/layer.go b/layer/layer.go index 5100fe2dee..5d3b8c672a 100644 --- a/layer/layer.go +++ b/layer/layer.go @@ -174,7 +174,6 @@ type Store interface { CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) GetRWLayer(id string) (RWLayer, error) GetMountID(id string) (string, error) - ReinitRWLayer(l RWLayer) error ReleaseRWLayer(RWLayer) ([]Metadata, error) Cleanup() error diff --git a/layer/layer_store.go b/layer/layer_store.go index f18aff2145..8c3d0a4911 100644 --- a/layer/layer_store.go +++ b/layer/layer_store.go @@ -495,25 +495,6 @@ func (ls *layerStore) GetMountID(id string) (string, error) { return mount.mountID, nil } -// ReinitRWLayer reinitializes a given mount to the layerstore, specifically -// initializing the usage count. It should strictly only be used in the -// daemon's restore path to restore state of live containers. -func (ls *layerStore) ReinitRWLayer(l RWLayer) error { - ls.mountL.Lock() - defer ls.mountL.Unlock() - - m, ok := ls.mounts[l.Name()] - if !ok { - return ErrMountDoesNotExist - } - - if err := m.incActivityCount(l); err != nil { - return err - } - - return nil -} - func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { ls.mountL.Lock() defer ls.mountL.Unlock() diff --git a/layer/layer_test.go b/layer/layer_test.go index 85687cebb4..8e6817c96a 100644 --- a/layer/layer_test.go +++ b/layer/layer_test.go @@ -174,10 +174,7 @@ func getCachedLayer(l Layer) *roLayer { } func getMountLayer(l RWLayer) *mountedLayer { - if rl, ok := l.(*referencedRWLayer); ok { - return rl.mountedLayer - } - return l.(*mountedLayer) + return l.(*referencedRWLayer).mountedLayer } func createMetadata(layers ...Layer) []Metadata { @@ -400,14 +397,11 @@ func TestStoreRestore(t *testing.T) { if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { t.Fatal(err) } - assertActivityCount(t, m, 1) if err := m.Unmount(); err != nil { t.Fatal(err) } - assertActivityCount(t, m, 0) - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) if err != nil { t.Fatal(err) @@ -438,20 +432,15 @@ func TestStoreRestore(t *testing.T) { t.Fatalf("Unexpected path %s, expected %s", mountPath, path) } - assertActivityCount(t, m2, 1) - if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) } else if path != mountPath { t.Fatalf("Unexpected path %s, expected %s", mountPath, path) } - assertActivityCount(t, m2, 2) if err := m2.Unmount(); err != nil { t.Fatal(err) } - assertActivityCount(t, m2, 1) - b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) if err != nil { t.Fatal(err) @@ -464,8 +453,6 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - assertActivityCount(t, m2, 0) - if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { t.Fatal(err) } else if len(metadata) != 0 { @@ -674,13 +661,6 @@ func assertReferences(t *testing.T, references ...Layer) { } } -func assertActivityCount(t *testing.T, l RWLayer, expected int) { - rl := l.(*referencedRWLayer) - if rl.activityCount != expected { - t.Fatalf("Unexpected activity count %d, expected %d", rl.activityCount, expected) - } -} - func TestRegisterExistingLayer(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() diff --git a/layer/layer_unix.go b/layer/layer_unix.go index d77e2fc66e..776b78ac02 100644 --- a/layer/layer_unix.go +++ b/layer/layer_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd darwin openbsd +// +build linux freebsd darwin openbsd solaris package layer diff --git a/layer/migration_test.go b/layer/migration_test.go index 7e2e2a6489..50ea6407bb 100644 --- a/layer/migration_test.go +++ b/layer/migration_test.go @@ -380,8 +380,6 @@ func TestMountMigration(t *testing.T) { Kind: archive.ChangeAdd, }) - assertActivityCount(t, rwLayer1, 1) - if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { t.Fatal("Expected error creating mount with same name") } else if err != ErrMountNameConflict { @@ -401,16 +399,10 @@ func TestMountMigration(t *testing.T) { t.Fatal(err) } - assertActivityCount(t, rwLayer2, 1) - assertActivityCount(t, rwLayer1, 1) - if _, err := rwLayer2.Mount(""); err != nil { t.Fatal(err) } - assertActivityCount(t, rwLayer2, 2) - assertActivityCount(t, rwLayer1, 1) - if metadata, err := ls.Release(layer1); err != nil { t.Fatal(err) } else if len(metadata) > 0 { @@ -420,8 +412,6 @@ func TestMountMigration(t *testing.T) { if err := rwLayer1.Unmount(); err != nil { t.Fatal(err) } - assertActivityCount(t, rwLayer2, 2) - assertActivityCount(t, rwLayer1, 0) if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { t.Fatal(err) @@ -430,9 +420,6 @@ func TestMountMigration(t *testing.T) { if err := rwLayer2.Unmount(); err != nil { t.Fatal(err) } - if _, err := ls.ReleaseRWLayer(rwLayer2); err == nil { - t.Fatal("Expected error deleting active mount") - } if err := rwLayer2.Unmount(); err != nil { t.Fatal(err) } diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go index 5a07fd08ea..add33d9f19 100644 --- a/layer/mounted_layer.go +++ b/layer/mounted_layer.go @@ -2,7 +2,6 @@ package layer import ( "io" - "sync" "github.com/docker/docker/pkg/archive" ) @@ -50,14 +49,6 @@ func (ml *mountedLayer) Parent() Layer { return nil } -func (ml *mountedLayer) Mount(mountLabel string) (string, error) { - return ml.layerStore.driver.Get(ml.mountID, mountLabel) -} - -func (ml *mountedLayer) Unmount() error { - return ml.layerStore.driver.Put(ml.mountID) -} - func (ml *mountedLayer) Size() (int64, error) { return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) } @@ -83,106 +74,30 @@ func (ml *mountedLayer) hasReferences() bool { return len(ml.references) > 0 } -func (ml *mountedLayer) incActivityCount(ref RWLayer) error { - rl, ok := ml.references[ref] - if !ok { - return ErrLayerNotRetained - } - - if err := rl.acquire(); err != nil { - return err - } - return nil -} - func (ml *mountedLayer) deleteReference(ref RWLayer) error { - rl, ok := ml.references[ref] - if !ok { + if _, ok := ml.references[ref]; !ok { return ErrLayerNotRetained } - - if err := rl.release(); err != nil { - return err - } delete(ml.references, ref) - return nil } func (ml *mountedLayer) retakeReference(r RWLayer) { if ref, ok := r.(*referencedRWLayer); ok { - ref.activityCount = 0 ml.references[ref] = ref } } type referencedRWLayer struct { *mountedLayer - - activityL sync.Mutex - activityCount int -} - -func (rl *referencedRWLayer) acquire() error { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - rl.activityCount++ - - return nil -} - -func (rl *referencedRWLayer) release() error { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount > 0 { - return ErrActiveMount - } - - rl.activityCount = -1 - - return nil } func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount == -1 { - return "", ErrLayerNotRetained - } - - if rl.activityCount > 0 { - rl.activityCount++ - return rl.path, nil - } - - m, err := rl.mountedLayer.Mount(mountLabel) - if err == nil { - rl.activityCount++ - rl.path = m - } - return m, err + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) } // Unmount decrements the activity count and unmounts the underlying layer // Callers should only call `Unmount` once per call to `Mount`, even on error. func (rl *referencedRWLayer) Unmount() error { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount == 0 { - return ErrNotMounted - } - if rl.activityCount == -1 { - return ErrLayerNotRetained - } - - rl.activityCount-- - if rl.activityCount > 0 { - return nil - } - - return rl.mountedLayer.Unmount() + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) } diff --git a/libcontainerd/client_linux.go b/libcontainerd/client_linux.go index 6422eb619e..165597b9a6 100644 --- a/libcontainerd/client_linux.go +++ b/libcontainerd/client_linux.go @@ -13,7 +13,7 @@ import ( containerd "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/specs/specs-go" + specs "github.com/opencontainers/specs/specs-go" "golang.org/x/net/context" ) @@ -380,6 +380,81 @@ func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { return w } +func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + iopipe, err := container.openFifos(terminal) + if err != nil { + return err + } + + if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + return err + } + + if event, ok := clnt.remote.pastEvents[containerID]; ok { + // This should only be a pause or resume event + if event.Type == StatePause || event.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: event.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("unexpected backlog event: %#v", event) + } + + return nil +} + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + if err := clnt.restore(cont, options...); err != nil { + logrus.Errorf("error restoring %s: %v", containerID, err) + } + return nil + } + return clnt.setExited(containerID) +} + type exitNotifier struct { id string client *client diff --git a/libcontainerd/client_liverestore_linux.go b/libcontainerd/client_liverestore_linux.go deleted file mode 100644 index 2d6c2b257f..0000000000 --- a/libcontainerd/client_liverestore_linux.go +++ /dev/null @@ -1,85 +0,0 @@ -// +build experimental - -package libcontainerd - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" -) - -func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { - clnt.lock(cont.Id) - defer clnt.unlock(cont.Id) - - logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) - - containerID := cont.Id - if _, err := clnt.getContainer(containerID); err == nil { - return fmt.Errorf("container %s is already active", containerID) - } - - defer func() { - if err != nil { - clnt.deleteContainer(cont.Id) - } - }() - - container := clnt.newContainer(cont.BundlePath, options...) - container.systemPid = systemPid(cont) - - var terminal bool - for _, p := range cont.Processes { - if p.Pid == InitFriendlyName { - terminal = p.Terminal - } - } - - iopipe, err := container.openFifos(terminal) - if err != nil { - return err - } - - if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { - return err - } - - clnt.appendContainer(container) - - err = clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateRestore, - Pid: container.systemPid, - }}) - - if err != nil { - return err - } - - if event, ok := clnt.remote.pastEvents[containerID]; ok { - // This should only be a pause or resume event - if event.Type == StatePause || event.Type == StateResume { - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: event.Type, - Pid: container.systemPid, - }}) - } - - logrus.Warnf("unexpected backlog event: %#v", event) - } - - return nil -} - -func (clnt *client) Restore(containerID string, options ...CreateOption) error { - cont, err := clnt.getContainerdContainer(containerID) - if err == nil && cont.Status != "stopped" { - if err := clnt.restore(cont, options...); err != nil { - logrus.Errorf("error restoring %s: %v", containerID, err) - } - return nil - } - return clnt.setExited(containerID) -} diff --git a/libcontainerd/client_shutdownrestore_linux.go b/libcontainerd/client_shutdownrestore_linux.go deleted file mode 100644 index 52ea2a6180..0000000000 --- a/libcontainerd/client_shutdownrestore_linux.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !experimental - -package libcontainerd - -import ( - "syscall" - "time" - - "github.com/Sirupsen/logrus" -) - -func (clnt *client) Restore(containerID string, options ...CreateOption) error { - w := clnt.getOrCreateExitNotifier(containerID) - defer w.close() - cont, err := clnt.getContainerdContainer(containerID) - if err == nil && cont.Status != "stopped" { - clnt.lock(cont.Id) - container := clnt.newContainer(cont.BundlePath) - container.systemPid = systemPid(cont) - clnt.appendContainer(container) - clnt.unlock(cont.Id) - - if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { - logrus.Errorf("error sending sigterm to %v: %v", containerID, err) - } - select { - case <-time.After(10 * time.Second): - if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { - logrus.Errorf("error sending sigkill to %v: %v", containerID, err) - } - select { - case <-time.After(2 * time.Second): - case <-w.wait(): - return nil - } - case <-w.wait(): - return nil - } - } - return clnt.setExited(containerID) -} diff --git a/libcontainerd/client_solaris.go b/libcontainerd/client_solaris.go new file mode 100644 index 0000000000..ea8c5e1828 --- /dev/null +++ b/libcontainerd/client_solaris.go @@ -0,0 +1,56 @@ +package libcontainerd + +type client struct { + clientCommon + + // Platform specific properties below here. +} + +func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { + return nil +} + +func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { + return nil +} + +func (clnt *client) Signal(containerID string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} diff --git a/libcontainerd/client_windows.go b/libcontainerd/client_windows.go index fac9a35683..ddf8543e33 100644 --- a/libcontainerd/client_windows.go +++ b/libcontainerd/client_windows.go @@ -291,6 +291,9 @@ func (clnt *client) AddProcess(containerID, processFriendlyName string, procToAd return err } + // TEMP: Work around Windows BS/DEL behavior. + iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, procToAdd.Terminal) + // Convert io.ReadClosers to io.Readers if stdout != nil { iopipe.Stdout = openReaderFromPipe(stdout) diff --git a/libcontainerd/container_solaris.go b/libcontainerd/container_solaris.go new file mode 100644 index 0000000000..24ab1de03b --- /dev/null +++ b/libcontainerd/container_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +type container struct { + containerCommon +} diff --git a/libcontainerd/container_windows.go b/libcontainerd/container_windows.go index ec35746b5c..e1f64a1704 100644 --- a/libcontainerd/container_windows.go +++ b/libcontainerd/container_windows.go @@ -102,6 +102,9 @@ func (ctr *container) start() error { } ctr.startedAt = time.Now() + // TEMP: Work around Windows BS/DEL behavior. + iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, ctr.ociSpec.Process.Terminal) + // Convert io.ReadClosers to io.Readers if stdout != nil { iopipe.Stdout = openReaderFromPipe(stdout) diff --git a/libcontainerd/process_solaris.go b/libcontainerd/process_solaris.go new file mode 100644 index 0000000000..2ee9b25662 --- /dev/null +++ b/libcontainerd/process_solaris.go @@ -0,0 +1,6 @@ +package libcontainerd + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon +} diff --git a/libcontainerd/process_windows.go b/libcontainerd/process_windows.go index 0371aec91f..12593bba67 100644 --- a/libcontainerd/process_windows.go +++ b/libcontainerd/process_windows.go @@ -2,6 +2,8 @@ package libcontainerd import ( "io" + + "github.com/docker/docker/pkg/system" ) // process keeps the state for both main container process and exec process. @@ -25,3 +27,33 @@ func openReaderFromPipe(p io.ReadCloser) io.Reader { }() return r } + +// fixStdinBackspaceBehavior works around a bug in Windows before build 14350 +// where it interpreted DEL as VK_DELETE instead of as VK_BACK. This replaces +// DEL with BS to work around this. +func fixStdinBackspaceBehavior(w io.WriteCloser, tty bool) io.WriteCloser { + if !tty || system.GetOSVersion().Build >= 14350 { + return w + } + return &delToBsWriter{w} +} + +type delToBsWriter struct { + io.WriteCloser +} + +func (w *delToBsWriter) Write(b []byte) (int, error) { + const ( + backspace = 0x8 + del = 0x7f + ) + bc := make([]byte, len(b)) + for i, c := range b { + if c == del { + bc[i] = backspace + } else { + bc[i] = c + } + } + return w.WriteCloser.Write(bc) +} diff --git a/libcontainerd/remote_solaris.go b/libcontainerd/remote_solaris.go new file mode 100644 index 0000000000..bd115485ad --- /dev/null +++ b/libcontainerd/remote_solaris.go @@ -0,0 +1,25 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +func (r *remote) Cleanup() { +} + +// New creates a fresh instance of libcontainerd remote. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} diff --git a/libcontainerd/types_solaris.go b/libcontainerd/types_solaris.go new file mode 100644 index 0000000000..637e54300f --- /dev/null +++ b/libcontainerd/types_solaris.go @@ -0,0 +1,38 @@ +package libcontainerd + +import ( + "github.com/opencontainers/specs/specs-go" +) + +// Spec is the base configuration for the container. It specifies platform +// independent configuration. This information must be included when the +// bundle is packaged for distribution. +type Spec specs.Spec + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary container a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo +} + +// User specifies Solaris specific user and group information for the container's +// main process. +type User specs.User + +// Resources defines updatable container resource values. +type Resources struct{} diff --git a/man/docker-build.1.md b/man/docker-build.1.md index 69d78cd9a3..b654e2d922 100644 --- a/man/docker-build.1.md +++ b/man/docker-build.1.md @@ -91,7 +91,9 @@ set as the **URL**, the repository is cloned locally and then sent as the contex Remove intermediate containers after a successful build. The default is *true*. **-t**, **--tag**="" - Repository names (and optionally with tags) to be applied to the resulting image in case of success. + Repository names (and optionally with tags) to be applied to the resulting + image in case of success. Refer to **docker-tag(1)** for more information + about valid tag names. **-m**, **--memory**=*MEMORY* Memory limit diff --git a/man/docker-commit.1.md b/man/docker-commit.1.md index 5912d3636d..d8a4cf8387 100644 --- a/man/docker-commit.1.md +++ b/man/docker-commit.1.md @@ -16,7 +16,8 @@ CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION Create a new image from an existing container specified by name or container ID. The new image will contain the contents of the -container filesystem, *excluding* any data volumes. +container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** +for more information about valid image and tag names. While the `docker commit` command is a convenient way of extending an existing image, you should prefer the use of a Dockerfile and `docker diff --git a/man/docker-cp.1.md b/man/docker-cp.1.md index 84d64c2688..949d60bb8b 100644 --- a/man/docker-cp.1.md +++ b/man/docker-cp.1.md @@ -78,7 +78,16 @@ you must be explicit with a relative or absolute path, for example: `/path/to/file:name.txt` or `./file:name.txt` It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, and mounts created by the user in the container. +`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. +However, you can still copy such files by manually running `tar` in `docker exec`. +For example (consider `SRC_PATH` and `DEST_PATH` are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. The command extracts the content of the tar to the `DEST_PATH` in container's diff --git a/man/docker-create.1.md b/man/docker-create.1.md index d48e1ac3c4..e630d5fdcd 100644 --- a/man/docker-create.1.md +++ b/man/docker-create.1.md @@ -197,7 +197,9 @@ two memory nodes. 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. **--kernel-memory**="" Kernel memory limit (format: `[]`, where unit = b, k, m or g) diff --git a/man/docker-events.1.md b/man/docker-events.1.md index 6a1a3649ff..15a5d516a5 100644 --- a/man/docker-events.1.md +++ b/man/docker-events.1.md @@ -47,7 +47,7 @@ Docker networks report the following events: The `--since` and `--until` parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine’s time. If you do not provide the `--since` option, +relative to the client machine's time. If you do not provide the `--since` option, the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local diff --git a/man/docker-logs.1.md b/man/docker-logs.1.md index db23a0f137..e70f796e28 100644 --- a/man/docker-logs.1.md +++ b/man/docker-logs.1.md @@ -21,7 +21,7 @@ any logs at the time you execute docker logs). The **docker logs --follow** command combines commands **docker logs** and **docker attach**. It will first return all logs from the beginning and -then continue streaming new output from the container’s stdout and stderr. +then continue streaming new output from the container's stdout and stderr. **Warning**: This command works only for the **json-file** or **journald** logging drivers. @@ -46,7 +46,7 @@ logging drivers. Output the specified number of lines at the end of logs (defaults to all logs) The `--since` option can be Unix timestamps, date formatted timestamps, or Go -duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s +duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's time. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be diff --git a/man/docker-push.1.md b/man/docker-push.1.md index 1b487a0d55..4919489a99 100644 --- a/man/docker-push.1.md +++ b/man/docker-push.1.md @@ -13,7 +13,8 @@ NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] This command pushes an image or a repository to a registry. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at -`registry-1.docker.io` by default. +`registry-1.docker.io` by default. Refer to **docker-tag(1)** for more +information about valid image and tag names. # OPTIONS **--help** diff --git a/man/docker-run.1.md b/man/docker-run.1.md index 6de2f2d70e..fb810d32f9 100644 --- a/man/docker-run.1.md +++ b/man/docker-run.1.md @@ -103,7 +103,7 @@ pull** IMAGE, before it starts the container from that image. In foreground mode (the default when **-d** is not specified), **docker run** can start the process in the container -and attach the console to the process’s standard input, output, and standard +and attach the console to the process's standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The **-a** option can be set for each of stdin, stdout, and stderr. @@ -297,7 +297,9 @@ redirection on the host system. 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. **-l**, **--label**=[] Set metadata on the container (e.g., --label com.example.key=value) @@ -735,7 +737,7 @@ This should list the message sent to logger. If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) . You can specify to which of the three standard streams (stdin, stdout, stderr) -you’d like to connect instead, as in: +you'd like to connect instead, as in: # docker run -a stdin -a stdout -i -t fedora /bin/bash @@ -849,7 +851,7 @@ If a container is connected to the default bridge network and `linked` with other containers, then the container's `/etc/hosts` file is updated with the linked container's name. -> **Note** Since Docker may live update the container’s `/etc/hosts` file, there +> **Note** Since Docker may live update the container's `/etc/hosts` file, there may be situations when processes inside the container can end up reading an empty or incomplete `/etc/hosts` file. In most cases, retrying the read again should fix the problem. diff --git a/man/docker-search.1.md b/man/docker-search.1.md index a95c023773..c1728f548c 100644 --- a/man/docker-search.1.md +++ b/man/docker-search.1.md @@ -6,10 +6,9 @@ docker-search - Search the Docker Hub for images # SYNOPSIS **docker search** -[**--automated**] +[**-f**|**--filter**[=*[]*]] [**--help**] [**--no-trunc**] -[**-s**|**--stars**[=*0*]] TERM # DESCRIPTION @@ -21,8 +20,12 @@ of stars awarded, whether the image is official, and whether it is automated. *Note* - Search queries will only return up to 25 results # OPTIONS -**--automated**=*true*|*false* - Only show automated builds. The default is *false*. + +**-f**, **--filter**=[] + Filter output based on these conditions: + - stars= + - is-automated=(true|false) + - is-official=(true|false) **--help** Print usage statement @@ -30,9 +33,6 @@ of stars awarded, whether the image is official, and whether it is automated. **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. -**-s**, **--stars**=*X* - Only displays with at least X stars. The default is zero. - # EXAMPLES ## Search Docker Hub for ranked images @@ -40,7 +40,7 @@ of stars awarded, whether the image is official, and whether it is automated. Search a registry for the term 'fedora' and only display those images ranked 3 or higher: - $ docker search -s 3 fedora + $ docker search --filter=stars=3 fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED mattdm/fedora A basic Fedora image corresponding roughly... 50 fedora (Semi) Official Fedora base image. 38 @@ -52,7 +52,7 @@ ranked 3 or higher: Search Docker Hub for the term 'fedora' and only display automated images ranked 1 or higher: - $ docker search --automated -s 1 fedora + $ docker search --filter=is-automated=true --filter=stars=1 fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED goldmann/wildfly A WildFly application server running on a ... 3 [OK] tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] @@ -62,4 +62,5 @@ April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 +April 2016, updated by Vincent Demeester diff --git a/man/docker-tag.1.md b/man/docker-tag.1.md index 68c90b765c..9bb252aef0 100644 --- a/man/docker-tag.1.md +++ b/man/docker-tag.1.md @@ -7,44 +7,59 @@ docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**--help**] -IMAGE[:TAG] [REGISTRY_HOST/][USERNAME/]NAME[:TAG] +NAME[:TAG] NAME[:TAG] # DESCRIPTION Assigns a new alias to an image in a registry. An alias refers to the entire image name including the optional `TAG` after the ':'. -If you do not specify a `REGISTRY_HOST`, the command uses Docker's public -registry located at `registry-1.docker.io` by default. - # "OPTIONS" **--help** Print usage statement. -**REGISTRY_HOST** - The hostname of the registry if required. This may also include the port -separated by a ':' - -**USERNAME** - The username or other qualifying identifier for the image. - **NAME** - The image name. + The image name which is made up of slash-separated name components, + optionally prefixed by a registry hostname. The hostname must comply with + standard DNS rules, but may not contain underscores. If a hostname is + present, it may optionally be followed by a port number in the format + `:8080`. If not present, the command uses Docker's public registry located at + `registry-1.docker.io` by default. Name components may contain lowercase + characters, digits and separators. A separator is defined as a period, one or + two underscores, or one or more dashes. A name component may not start or end + with a separator. **TAG** - The tag you are assigning to the image. Though this is arbitrary it is -recommended to be used for a version to distinguish images with the same name. -Also, for consistency tags should only include a-z0-9-_. . -Note that here TAG is a part of the overall name or "tag". + The tag assigned to the image to version and distinguish images with the same + name. The tag name may contain lowercase and uppercase characters, digits, + underscores, periods and dashes. A tag name may not start with a period or a + dash and may contain a maximum of 128 characters. # EXAMPLES -## Giving an image a new alias +## Tagging an image referenced by ID -Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and -tagging it into the "fedora" repository with "version1.0": +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": docker tag 0e5574283393 fedora/httpd:version1.0 +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + ## Tagging an image for a private repository To push an image to a private registry and not the central Docker diff --git a/man/dockerd.8.md b/man/dockerd.8.md index 122609ee19..9f980310e4 100644 --- a/man/dockerd.8.md +++ b/man/dockerd.8.md @@ -39,6 +39,7 @@ dockerd - Enable daemon mode [**--ip-masq**[=*true*]] [**--iptables**[=*true*]] [**--ipv6**] +[**--isolation**[=*default*]] [**-l**|**--log-level**[=*info*]] [**--label**[=*[]*]] [**--log-driver**[=*json-file*]] @@ -183,6 +184,11 @@ unix://[/path/to/socket] to use. **--ipv6**=*true*|*false* Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6". +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" Set the logging level. Default is `info`. diff --git a/oci/defaults_solaris.go b/oci/defaults_solaris.go new file mode 100644 index 0000000000..f3ed5c9c77 --- /dev/null +++ b/oci/defaults_solaris.go @@ -0,0 +1,11 @@ +package oci + +import ( + "github.com/opencontainers/specs/specs-go" +) + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{} + return s +} diff --git a/pkg/directory/directory_unix.go b/pkg/directory/directory_unix.go index dbebdd3c2c..b43c79fb85 100644 --- a/pkg/directory/directory_unix.go +++ b/pkg/directory/directory_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux freebsd solaris package directory diff --git a/pkg/fileutils/fileutils_solaris.go b/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000000..0f2cb7ab93 --- /dev/null +++ b/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/pkg/integration/utils_test.go b/pkg/integration/utils_test.go index d166489e62..8ebdb142d8 100644 --- a/pkg/integration/utils_test.go +++ b/pkg/integration/utils_test.go @@ -14,30 +14,32 @@ import ( ) func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") } - lsCmd := exec.Command("ls") - lsCmd.Start() - // Wait for it to finish - err := lsCmd.Wait() + err := lsCmd.Run() if IsKilled(err) { t.Fatalf("Expected the ls command to not be killed, was.") } } func TestIsKilledTrueWithKilledProcess(t *testing.T) { - // TODO Windows: Using golang 1.5.3, this seems to hit - // a bug in go where Process.Kill() causes a panic. - // Needs further investigation @jhowardmsft - if runtime.GOOS == "windows" { - t.SkipNow() + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") } - longCmd := exec.Command("top") + // Start a command - longCmd.Start() + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } // Capture the error when *dying* done := make(chan error, 1) go func() { @@ -46,7 +48,7 @@ func TestIsKilledTrueWithKilledProcess(t *testing.T) { // Then kill it longCmd.Process.Kill() // Get the error - err := <-done + err = <-done if !IsKilled(err) { t.Fatalf("Expected the command to be killed, was not.") } diff --git a/pkg/listeners/listeners_solaris.go b/pkg/listeners/listeners_solaris.go new file mode 100644 index 0000000000..ff833e3741 --- /dev/null +++ b/pkg/listeners/listeners_solaris.go @@ -0,0 +1,31 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/pkg/listeners/listeners_unix.go b/pkg/listeners/listeners_unix.go index fddd3a8ed8..1bcae7aa3e 100644 --- a/pkg/listeners/listeners_unix.go +++ b/pkg/listeners/listeners_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!solaris package listeners diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go index a90d3d1151..5564f7b3cd 100644 --- a/pkg/mount/flags_unsupported.go +++ b/pkg/mount/flags_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd freebsd,!cgo +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo package mount diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go index ed7216e5c0..66ac4bf472 100644 --- a/pkg/mount/mount.go +++ b/pkg/mount/mount.go @@ -9,8 +9,8 @@ func GetMounts() ([]*Info, error) { return parseMountTable() } -// Mounted looks at /proc/self/mountinfo to determine of the specified -// mountpoint has been mounted +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. func Mounted(mountpoint string) (bool, error) { entries, err := parseMountTable() if err != nil { diff --git a/pkg/mount/mounter_solaris.go b/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000000..c684aa81fc --- /dev/null +++ b/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go index eb93365eb7..a2a3bb457f 100644 --- a/pkg/mount/mounter_unsupported.go +++ b/pkg/mount/mounter_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd freebsd,!cgo +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo package mount diff --git a/pkg/mount/mountinfo_solaris.go b/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000000..ad9ab57f8b --- /dev/null +++ b/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go index b8d9aa5c73..7fbcf19214 100644 --- a/pkg/mount/mountinfo_unsupported.go +++ b/pkg/mount/mountinfo_unsupported.go @@ -1,4 +1,4 @@ -// +build !windows,!linux,!freebsd freebsd,!cgo +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo package mount diff --git a/pkg/parsers/kernel/uname_solaris.go b/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000000..49370bd3dd --- /dev/null +++ b/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go index 79c66b3228..1da3f239fa 100644 --- a/pkg/parsers/kernel/uname_unsupported.go +++ b/pkg/parsers/kernel/uname_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux,!solaris package kernel diff --git a/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 0000000000..d08ad14860 --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/pkg/platform/architecture_freebsd.go b/pkg/platform/architecture_freebsd.go deleted file mode 100644 index 5849eccc5a..0000000000 --- a/pkg/platform/architecture_freebsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package platform - -import ( - "os/exec" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - cmd := exec.Command("uname", "-m") - machine, err := cmd.Output() - if err != nil { - return "", err - } - return string(machine), nil -} diff --git a/pkg/platform/architecture_unix.go b/pkg/platform/architecture_unix.go new file mode 100644 index 0000000000..21e6a26fe2 --- /dev/null +++ b/pkg/platform/architecture_unix.go @@ -0,0 +1,18 @@ +// +build freebsd solaris + +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture get the name of the current architecture (i86pc, sun4v) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/pkg/reexec/command_freebsd.go b/pkg/reexec/command_unix.go similarity index 94% rename from pkg/reexec/command_freebsd.go rename to pkg/reexec/command_unix.go index c7f797a5fa..b70edcb316 100644 --- a/pkg/reexec/command_freebsd.go +++ b/pkg/reexec/command_unix.go @@ -1,4 +1,4 @@ -// +build freebsd +// +build freebsd solaris package reexec diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go index ad4ea38ebb..9aed004e86 100644 --- a/pkg/reexec/command_unsupported.go +++ b/pkg/reexec/command_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build !linux,!windows,!freebsd,!solaris package reexec diff --git a/pkg/signal/signal_solaris.go b/pkg/signal/signal_solaris.go new file mode 100644 index 0000000000..89576b9e3b --- /dev/null +++ b/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go index 161ba27397..c592d37dfe 100644 --- a/pkg/signal/signal_unsupported.go +++ b/pkg/signal/signal_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!darwin,!freebsd,!windows +// +build !linux,!darwin,!freebsd,!windows,!solaris package signal diff --git a/pkg/sysinfo/sysinfo_solaris.go b/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000000..75a9c9bb2d --- /dev/null +++ b/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,119 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/pkg/system/meminfo_solaris.go b/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000000..313c601b12 --- /dev/null +++ b/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("Error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go index 82ddd30c1b..3ce019dffd 100644 --- a/pkg/system/meminfo_unsupported.go +++ b/pkg/system/meminfo_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows +// +build !linux,!windows,!solaris package system diff --git a/pkg/system/stat_solaris.go b/pkg/system/stat_solaris.go index b01d08acfe..0216985a25 100644 --- a/pkg/system/stat_solaris.go +++ b/pkg/system/stat_solaris.go @@ -15,3 +15,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/syscall_windows.go b/pkg/system/syscall_windows.go index ef596f343f..f5f2d56941 100644 --- a/pkg/system/syscall_windows.go +++ b/pkg/system/syscall_windows.go @@ -3,10 +3,13 @@ package system import ( "syscall" "unsafe" + + "github.com/Sirupsen/logrus" ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") ) // OSVersion is a wrapper for Windows version information @@ -18,6 +21,21 @@ type OSVersion struct { Build uint16 } +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + // GetOSVersion gets the operating system version on Windows. Note that // docker.exe must be manifested to get the correct version information. func GetOSVersion() OSVersion { @@ -34,6 +52,18 @@ func GetOSVersion() OSVersion { return osv } +// IsWindowsClient returns true if the SKU is client +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go index cd21b5fc2b..9bc52a8c65 100644 --- a/pkg/term/term_windows.go +++ b/pkg/term/term_windows.go @@ -83,11 +83,13 @@ func useNativeConsole() bool { return false } - // TODO Windows. The native emulator still has issues which - // mean it shouldn't be enabled for everyone. Change this next line to true - // to change the default to "enable if available". In the meantime, users - // can still try it out by using USE_NATIVE_CONSOLE env variable. - return false + // Must have a post-TP5 RS1 build of Windows Server 2016/Windows 10 for + // the native console to be usable. + if osv.Build < 14350 { + return false + } + + return true } // getNativeConsole returns the console modes ('state') for the native Windows console diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json index dc67c7ac37..212ff49942 100755 --- a/profiles/seccomp/default.json +++ b/profiles/seccomp/default.json @@ -26,11 +26,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "arch_prctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "bind", "action": "SCMP_ACT_ALLOW", @@ -61,21 +56,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "chown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chroot", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "clock_getres", "action": "SCMP_ACT_ALLOW", @@ -91,18 +71,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "clone", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ] - }, { "name": "close", "action": "SCMP_ACT_ALLOW", @@ -223,11 +191,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "fanotify_init", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "fanotify_mark", "action": "SCMP_ACT_ALLOW", @@ -248,21 +211,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "fchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchownat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "fcntl", "action": "SCMP_ACT_ALLOW", @@ -608,16 +556,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "lchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "lgetxattr", "action": "SCMP_ACT_ALLOW", @@ -1164,11 +1102,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "setdomainname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "setfsgid", "action": "SCMP_ACT_ALLOW", @@ -1209,11 +1142,6 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, - { - "name": "sethostname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, { "name": "setitimer", "action": "SCMP_ACT_ALLOW", @@ -1364,6 +1292,11 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, + { + "name": "socketcall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, { "name": "socketpair", "action": "SCMP_ACT_ALLOW", @@ -1579,23 +1512,70 @@ "action": "SCMP_ACT_ALLOW", "args": [] }, + { + "name": "arch_prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, { "name": "modify_ldt", "action": "SCMP_ACT_ALLOW", "args": [] }, { - "name": "breakpoint", + "name": "chown", "action": "SCMP_ACT_ALLOW", "args": [] }, { - "name": "cacheflush", + "name": "chown32", "action": "SCMP_ACT_ALLOW", "args": [] }, { - "name": "set_tls", + "name": "fchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchownat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chroot", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "fchown", "action": "SCMP_ACT_ALLOW", "args": [] } diff --git a/profiles/seccomp/generate.go b/profiles/seccomp/generate.go index bf56594765..059370bffe 100644 --- a/profiles/seccomp/generate.go +++ b/profiles/seccomp/generate.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" + "github.com/docker/docker/oci" "github.com/docker/docker/profiles/seccomp" ) @@ -20,8 +21,10 @@ func main() { } f := filepath.Join(wd, "default.json") + rs := oci.DefaultSpec() + // write the default profile to the file - b, err := json.MarshalIndent(seccomp.DefaultProfile, "", "\t") + b, err := json.MarshalIndent(seccomp.DefaultProfile(&rs), "", "\t") if err != nil { panic(err) } diff --git a/profiles/seccomp/seccomp.go b/profiles/seccomp/seccomp.go index 0718d8401e..7a58e2c521 100644 --- a/profiles/seccomp/seccomp.go +++ b/profiles/seccomp/seccomp.go @@ -13,8 +13,8 @@ import ( //go:generate go run -tags 'seccomp' generate.go // GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile() (*specs.Seccomp, error) { - return setupSeccomp(DefaultProfile) +func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile(rs)) } // LoadProfile takes a file path and decodes the seccomp profile. diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go index 9471edfcc3..a088412351 100644 --- a/profiles/seccomp/seccomp_default.go +++ b/profiles/seccomp/seccomp_default.go @@ -6,6 +6,7 @@ import ( "syscall" "github.com/docker/engine-api/types" + "github.com/opencontainers/specs/specs-go" libseccomp "github.com/seccomp/libseccomp-golang" ) @@ -34,10 +35,9 @@ func arches() []types.Arch { } // DefaultProfile defines the whitelist for the default seccomp profile. -var DefaultProfile = &types.Seccomp{ - DefaultAction: types.ActErrno, - Architectures: arches(), - Syscalls: []*types.Syscall{ +func DefaultProfile(rs *specs.Spec) *types.Seccomp { + + syscalls := []*types.Syscall{ { Name: "accept", Action: types.ActAllow, @@ -58,11 +58,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "arch_prctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "bind", Action: types.ActAllow, @@ -93,21 +88,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "chown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chroot", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "clock_getres", Action: types.ActAllow, @@ -123,18 +103,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "clone", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - }, { Name: "close", Action: types.ActAllow, @@ -255,11 +223,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "fanotify_init", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "fanotify_mark", Action: types.ActAllow, @@ -280,21 +243,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "fchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchownat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "fcntl", Action: types.ActAllow, @@ -640,16 +588,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "lchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "lgetxattr", Action: types.ActAllow, @@ -1193,11 +1131,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "setdomainname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "setfsgid", Action: types.ActAllow, @@ -1238,11 +1171,6 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - { - Name: "sethostname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, { Name: "setitimer", Action: types.ActAllow, @@ -1393,6 +1321,11 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, + { + Name: "socketcall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, { Name: "socketpair", Action: types.ActAllow, @@ -1608,27 +1541,332 @@ var DefaultProfile = &types.Seccomp{ Action: types.ActAllow, Args: []*types.Arg{}, }, - // i386 specific syscalls - { - Name: "modify_ldt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - // arm specific syscalls - { - Name: "breakpoint", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "cacheflush", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_tls", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }, + } + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + switch arch { + case "arm", "arm64": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "breakpoint", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "cacheflush", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tls", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "amd64", "x32": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "arch_prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + fallthrough + case "x86": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "modify_ldt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + } + + capSysAdmin := false + + var cap string + for _, cap = range rs.Process.Capabilities { + switch cap { + case "CAP_CHOWN": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "chown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchownat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_DAC_READ_SEARCH": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "name_to_handle_at", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "open_by_handle_at", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_IPC_LOCK": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "mlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_ADMIN": + capSysAdmin = true + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "bpf", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clone", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lookup_dcookie", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mount", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "perf_event_open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setdomainname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sethostname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setns", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "umount", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "umount2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unshare", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_BOOT": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "reboot", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_CHROOT": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "chroot", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_MODULE": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "delete_module", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "init_module", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "finit_module", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "query_module", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_PACCT": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "acct", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_PTRACE": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "kcmp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "process_vm_readv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "process_vm_writev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ptrace", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_RAWIO": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "iopl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioperm", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_TIME": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "settimeofday", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "adjtimex", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + case "CAP_SYS_TTY_CONFIG": + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "vhangup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + } + } + + if !capSysAdmin { + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "clone", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + }, + }...) + } + + // We need some additional syscalls in this case see #22252 + if !rs.Process.NoNewPrivileges { + syscalls = append(syscalls, []*types.Syscall{ + { + Name: "fchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }...) + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + Architectures: arches(), + Syscalls: syscalls, + } } diff --git a/profiles/seccomp/seccomp_unsupported.go b/profiles/seccomp/seccomp_unsupported.go index 649632920a..ec7399cd01 100644 --- a/profiles/seccomp/seccomp_unsupported.go +++ b/profiles/seccomp/seccomp_unsupported.go @@ -2,9 +2,12 @@ package seccomp -import "github.com/docker/engine-api/types" - -var ( - // DefaultProfile is a nil pointer on unsupported systems. - DefaultProfile *types.Seccomp +import ( + "github.com/docker/engine-api/types" + "github.com/opencontainers/specs/specs-go" ) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile(rs *specs.Spec) *types.Seccomp { + return nil +} diff --git a/runconfig/hostconfig_solaris.go b/runconfig/hostconfig_solaris.go new file mode 100644 index 0000000000..6bcee7d111 --- /dev/null +++ b/runconfig/hostconfig_solaris.go @@ -0,0 +1,48 @@ +package runconfig + +import ( + "fmt" + "strings" + + "github.com/docker/engine-api/types/container" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("default") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + switch mode := parts[0]; mode { + case "default", "none": + default: + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func ValidateIsolation(hc *container.HostConfig) error { + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +// a disk can be limited by either Bps or IOps, but not both. +func ValidateQoS(hc *container.HostConfig) error { + return nil +} diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go index 97806d6da5..9f5e289e2c 100644 --- a/runconfig/hostconfig_unix.go +++ b/runconfig/hostconfig_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!solaris package runconfig diff --git a/volume/local/local_unix.go b/volume/local/local_unix.go index 2e63777a19..0d288528e1 100644 --- a/volume/local/local_unix.go +++ b/volume/local/local_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux freebsd solaris // Package local provides the default implementation for volumes. It // is used to mount data volume containers and directories local to diff --git a/volume/store/store_unix.go b/volume/store/store_unix.go index 319c541d60..8ebc1f20c7 100644 --- a/volume/store/store_unix.go +++ b/volume/store/store_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd +// +build linux freebsd solaris package store