Remove cmd/docker and other directories in cli/ in accordance with the new Moby project scope
Starting with this commit, integration tests should no longer rely on the docker cli, they should be API tests instead. For the existing tests the scripts will use a frozen version of the docker cli with a DOCKER_API_VERSION frozen to 1.30, which should ensure that the CI remains green at all times. To help contributors develop and test manually with a modified docker cli, this commit also adds a DOCKER_CLI_PATH environment variable to the Makefile. This allows to set the path of a custom cli that will be available inside the development container and used to run the integration tests. Signed-off-by: Arnaud Porterie (icecrime) <arnaud.porterie@docker.com> Signed-off-by: Tibor Vass <tibor@docker.com>
This commit is contained in:
parent
d624f9a7b0
commit
32915b1d0a
401 changed files with 139 additions and 40750 deletions
|
@ -241,11 +241,12 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
|
@ -192,7 +192,8 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
|
@ -173,7 +173,8 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
|
|
|
@ -179,7 +179,8 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
|
@ -172,7 +172,8 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
|
@ -64,7 +64,8 @@ ENV CGO_LDFLAGS -L/lib
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
ENV AUTO_GOPATH 1
|
||||
WORKDIR /usr/src/docker
|
||||
|
|
12
Makefile
12
Makefile
|
@ -7,7 +7,7 @@ DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMEN
|
|||
export DOCKER_INCREMENTAL_BINARY
|
||||
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}')
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
|
||||
|
||||
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
|
||||
|
@ -24,6 +24,7 @@ DOCKER_ENVS := \
|
|||
-e DOCKER_BUILD_ARGS \
|
||||
-e DOCKER_BUILD_GOGC \
|
||||
-e DOCKER_BUILD_PKGS \
|
||||
-e DOCKER_CLI_PATH \
|
||||
-e DOCKER_DEBUG \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
|
@ -63,7 +64,8 @@ PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64
|
|||
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
|
||||
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
|
||||
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE)
|
||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI)
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
|
@ -79,6 +81,11 @@ SWAGGER_DOCS_PORT ?= 9000
|
|||
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
|
||||
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
|
@ -98,6 +105,7 @@ binary: build ## build the linux binaries
|
|||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
build: bundles init-go-pkg-cache
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
|
|
25
cli/cli.go
Normal file
25
cli/cli.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
)
|
||||
|
||||
var (
|
||||
configDir = os.Getenv("DOCKER_CONFIG")
|
||||
configFileDir = ".docker"
|
||||
)
|
||||
|
||||
// ConfigurationDir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable.
|
||||
// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves
|
||||
func ConfigurationDir() string {
|
||||
return configDir
|
||||
}
|
||||
|
||||
func init() {
|
||||
if configDir == "" {
|
||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package bundlefile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Bundlefile stores the contents of a bundlefile
|
||||
type Bundlefile struct {
|
||||
Version string
|
||||
Services map[string]Service
|
||||
}
|
||||
|
||||
// Service is a service from a bundlefile
|
||||
type Service struct {
|
||||
Image string
|
||||
Command []string `json:",omitempty"`
|
||||
Args []string `json:",omitempty"`
|
||||
Env []string `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Ports []Port `json:",omitempty"`
|
||||
WorkingDir *string `json:",omitempty"`
|
||||
User *string `json:",omitempty"`
|
||||
Networks []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Port is a port as defined in a bundlefile
|
||||
type Port struct {
|
||||
Protocol string
|
||||
Port uint32
|
||||
}
|
||||
|
||||
// LoadFile loads a bundlefile from a path to the file
|
||||
func LoadFile(reader io.Reader) (*Bundlefile, error) {
|
||||
bundlefile := &Bundlefile{}
|
||||
|
||||
decoder := json.NewDecoder(reader)
|
||||
if err := decoder.Decode(bundlefile); err != nil {
|
||||
switch jsonErr := err.(type) {
|
||||
case *json.SyntaxError:
|
||||
return nil, errors.Errorf(
|
||||
"JSON syntax error at byte %v: %s",
|
||||
jsonErr.Offset,
|
||||
jsonErr.Error())
|
||||
case *json.UnmarshalTypeError:
|
||||
return nil, errors.Errorf(
|
||||
"Unexpected type at byte %v. Expected %s but received %s.",
|
||||
jsonErr.Offset,
|
||||
jsonErr.Type,
|
||||
jsonErr.Value)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bundlefile, nil
|
||||
}
|
||||
|
||||
// Print writes the contents of the bundlefile to the output writer
|
||||
// as human readable json
|
||||
func Print(out io.Writer, bundle *Bundlefile) error {
|
||||
bytes, err := json.MarshalIndent(*bundle, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = out.Write(bytes)
|
||||
return err
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package bundlefile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadFileV01Success(t *testing.T) {
|
||||
reader := strings.NewReader(`{
|
||||
"Version": "0.1",
|
||||
"Services": {
|
||||
"redis": {
|
||||
"Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce",
|
||||
"Networks": ["default"]
|
||||
},
|
||||
"web": {
|
||||
"Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d",
|
||||
"Networks": ["default"],
|
||||
"User": "web"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
bundle, err := LoadFile(reader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0.1", bundle.Version)
|
||||
assert.Len(t, bundle.Services, 2)
|
||||
}
|
||||
|
||||
func TestLoadFileSyntaxError(t *testing.T) {
|
||||
reader := strings.NewReader(`{
|
||||
"Version": "0.1",
|
||||
"Services": unquoted string
|
||||
}`)
|
||||
|
||||
_, err := LoadFile(reader)
|
||||
assert.EqualError(t, err, "JSON syntax error at byte 37: invalid character 'u' looking for beginning of value")
|
||||
}
|
||||
|
||||
func TestLoadFileTypeError(t *testing.T) {
|
||||
reader := strings.NewReader(`{
|
||||
"Version": "0.1",
|
||||
"Services": {
|
||||
"web": {
|
||||
"Image": "redis",
|
||||
"Networks": "none"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
_, err := LoadFile(reader)
|
||||
assert.EqualError(t, err, "Unexpected type at byte 94. Expected []string but received string.")
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
bundle := &Bundlefile{
|
||||
Version: "0.1",
|
||||
Services: map[string]Service{
|
||||
"web": {
|
||||
Image: "image",
|
||||
Command: []string{"echo", "something"},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, Print(&buffer, bundle))
|
||||
output := buffer.String()
|
||||
assert.Contains(t, output, "\"Image\": \"image\"")
|
||||
assert.Contains(t, output,
|
||||
`"Command": [
|
||||
"echo",
|
||||
"something"
|
||||
]`)
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental)
|
||||
func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "checkpoint",
|
||||
Short: "Manage checkpoints",
|
||||
Args: cli.NoArgs,
|
||||
RunE: dockerCli.ShowHelp,
|
||||
Tags: map[string]string{"experimental": "", "version": "1.25"},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newCreateCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type createOptions struct {
|
||||
container string
|
||||
checkpoint string
|
||||
checkpointDir string
|
||||
leaveRunning bool
|
||||
}
|
||||
|
||||
func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts createOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] CONTAINER CHECKPOINT",
|
||||
Short: "Create a checkpoint from a running container",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
opts.checkpoint = args[1]
|
||||
return runCreate(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint")
|
||||
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCreate(dockerCli *command.DockerCli, opts createOptions) error {
|
||||
client := dockerCli.Client()
|
||||
|
||||
checkpointOpts := types.CheckpointCreateOptions{
|
||||
CheckpointID: opts.checkpoint,
|
||||
CheckpointDir: opts.checkpointDir,
|
||||
Exit: !opts.leaveRunning,
|
||||
}
|
||||
|
||||
err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint)
|
||||
return nil
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/formatter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type listOptions struct {
|
||||
checkpointDir string
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts listOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ls [OPTIONS] CONTAINER",
|
||||
Aliases: []string{"list"},
|
||||
Short: "List checkpoints for a container",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(dockerCli, args[0], opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
func runList(dockerCli *command.DockerCli, container string, opts listOptions) error {
|
||||
client := dockerCli.Client()
|
||||
|
||||
listOpts := types.CheckpointListOptions{
|
||||
CheckpointDir: opts.checkpointDir,
|
||||
}
|
||||
|
||||
checkpoints, err := client.CheckpointList(context.Background(), container, listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewCheckpointFormat(formatter.TableFormatKey),
|
||||
}
|
||||
return formatter.CheckpointWrite(cpCtx, checkpoints)
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package checkpoint
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type removeOptions struct {
|
||||
checkpointDir string
|
||||
}
|
||||
|
||||
func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts removeOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS] CONTAINER CHECKPOINT",
|
||||
Aliases: []string{"remove"},
|
||||
Short: "Remove a checkpoint",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRemove(dockerCli, args[0], args[1], opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error {
|
||||
client := dockerCli.Client()
|
||||
|
||||
removeOpts := types.CheckpointDeleteOptions{
|
||||
CheckpointID: checkpoint,
|
||||
CheckpointDir: opts.checkpointDir,
|
||||
}
|
||||
|
||||
return client.CheckpointDelete(context.Background(), container, removeOpts)
|
||||
}
|
|
@ -1,310 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
cliconfig "github.com/docker/docker/cli/config"
|
||||
"github.com/docker/docker/cli/config/configfile"
|
||||
"github.com/docker/docker/cli/config/credentials"
|
||||
cliflags "github.com/docker/docker/cli/flags"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
dopts "github.com/docker/docker/opts"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Streams is an interface which exposes the standard input and output streams
|
||||
type Streams interface {
|
||||
In() *InStream
|
||||
Out() *OutStream
|
||||
Err() io.Writer
|
||||
}
|
||||
|
||||
// Cli represents the docker command line client.
|
||||
type Cli interface {
|
||||
Client() client.APIClient
|
||||
Out() *OutStream
|
||||
Err() io.Writer
|
||||
In() *InStream
|
||||
SetIn(in *InStream)
|
||||
ConfigFile() *configfile.ConfigFile
|
||||
CredentialsStore(serverAddress string) credentials.Store
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
keyFile string
|
||||
client client.APIClient
|
||||
defaultVersion string
|
||||
server ServerInfo
|
||||
}
|
||||
|
||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||
func (cli *DockerCli) DefaultVersion() string {
|
||||
return cli.defaultVersion
|
||||
}
|
||||
|
||||
// Client returns the APIClient
|
||||
func (cli *DockerCli) Client() client.APIClient {
|
||||
return cli.client
|
||||
}
|
||||
|
||||
// Out returns the writer used for stdout
|
||||
func (cli *DockerCli) Out() *OutStream {
|
||||
return cli.out
|
||||
}
|
||||
|
||||
// Err returns the writer used for stderr
|
||||
func (cli *DockerCli) Err() io.Writer {
|
||||
return cli.err
|
||||
}
|
||||
|
||||
// SetIn sets the reader used for stdin
|
||||
func (cli *DockerCli) SetIn(in *InStream) {
|
||||
cli.in = in
|
||||
}
|
||||
|
||||
// In returns the reader used for stdin
|
||||
func (cli *DockerCli) In() *InStream {
|
||||
return cli.in
|
||||
}
|
||||
|
||||
// ShowHelp shows the command help.
|
||||
func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error {
|
||||
cmd.SetOutput(cli.err)
|
||||
cmd.HelpFunc()(cmd, args)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigFile returns the ConfigFile
|
||||
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
|
||||
return cli.configFile
|
||||
}
|
||||
|
||||
// ServerInfo returns the server version details for the host this client is
|
||||
// connected to
|
||||
func (cli *DockerCli) ServerInfo() ServerInfo {
|
||||
return cli.server
|
||||
}
|
||||
|
||||
// GetAllCredentials returns all of the credentials stored in all of the
|
||||
// configured credential stores.
|
||||
func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) {
|
||||
auths := make(map[string]types.AuthConfig)
|
||||
for registry := range cli.configFile.CredentialHelpers {
|
||||
helper := cli.CredentialsStore(registry)
|
||||
newAuths, err := helper.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(auths, newAuths)
|
||||
}
|
||||
defaultStore := cli.CredentialsStore("")
|
||||
newAuths, err := defaultStore.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(auths, newAuths)
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
func addAll(to, from map[string]types.AuthConfig) {
|
||||
for reg, ac := range from {
|
||||
to[reg] = ac
|
||||
}
|
||||
}
|
||||
|
||||
// CredentialsStore returns a new credentials store based
|
||||
// on the settings provided in the configuration file. Empty string returns
|
||||
// the default credential store.
|
||||
func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" {
|
||||
return credentials.NewNativeStore(cli.configFile, helper)
|
||||
}
|
||||
return credentials.NewFileStore(cli.configFile)
|
||||
}
|
||||
|
||||
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||
// given registry, the default credsStore, or the empty string if neither are
|
||||
// configured.
|
||||
func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string {
|
||||
if c.CredentialHelpers != nil && serverAddress != "" {
|
||||
if helper, exists := c.CredentialHelpers[serverAddress]; exists {
|
||||
return helper
|
||||
}
|
||||
}
|
||||
return c.CredentialsStore
|
||||
}
|
||||
|
||||
// Initialize the dockerCli runs initialization that must happen after command
|
||||
// line flags are parsed.
|
||||
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||
cli.configFile = LoadDefaultConfigFile(cli.err)
|
||||
|
||||
var err error
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
var (
|
||||
passwd string
|
||||
giveup bool
|
||||
)
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
|
||||
for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ {
|
||||
// some code and comments borrowed from notary/trustmanager/keystore.go
|
||||
passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
opts.Common.TLSOptions.Passphrase = passwd
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.defaultVersion = cli.client.ClientVersion()
|
||||
|
||||
if opts.Common.TrustKey == "" {
|
||||
cli.keyFile = filepath.Join(cliconfig.Dir(), cliflags.DefaultTrustKeyFile)
|
||||
} else {
|
||||
cli.keyFile = opts.Common.TrustKey
|
||||
}
|
||||
|
||||
if ping, err := cli.client.Ping(context.Background()); err == nil {
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
|
||||
// since the new header was added in 1.25, assume server is 1.24 if header is not present.
|
||||
if ping.APIVersion == "" {
|
||||
ping.APIVersion = "1.24"
|
||||
}
|
||||
|
||||
// if server version is lower than the current cli, downgrade
|
||||
if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) {
|
||||
cli.client.UpdateClientVersion(ping.APIVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
// server
|
||||
type ServerInfo struct {
|
||||
HasExperimental bool
|
||||
OSType string
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile {
|
||||
configFile, e := cliconfig.Load(cliconfig.Dir())
|
||||
if e != nil {
|
||||
fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e)
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
credentials.DetectDefaultStore(configFile)
|
||||
}
|
||||
return configFile
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
|
||||
customHeaders := configFile.HTTPHeaders
|
||||
if customHeaders == nil {
|
||||
customHeaders = map[string]string{}
|
||||
}
|
||||
customHeaders["User-Agent"] = UserAgent()
|
||||
|
||||
verStr := api.DefaultVersion
|
||||
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
|
||||
verStr = tmpStr
|
||||
}
|
||||
|
||||
httpClient, err := newHTTPClient(host, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
|
||||
return client.NewClient(host, verStr, httpClient, customHeaders)
|
||||
}
|
||||
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) {
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
host = os.Getenv("DOCKER_HOST")
|
||||
case 1:
|
||||
host = hosts[0]
|
||||
default:
|
||||
return "", errors.New("Please specify only one -H")
|
||||
}
|
||||
|
||||
host, err = dopts.ParseHost(tlsOptions != nil, host)
|
||||
return
|
||||
}
|
||||
|
||||
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
|
||||
if tlsOptions == nil {
|
||||
// let the api client configure the default transport.
|
||||
return nil, nil
|
||||
}
|
||||
opts := *tlsOptions
|
||||
opts.ExclusiveRootPools = true
|
||||
config, err := tlsconfig.Client(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
}
|
||||
proto, addr, _, err := client.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sockets.ConfigureTransport(tr, proto, addr)
|
||||
|
||||
return &http.Client{
|
||||
Transport: tr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UserAgent returns the user agent string used for making API requests
|
||||
func UserAgent() string {
|
||||
return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")"
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/checkpoint"
|
||||
"github.com/docker/docker/cli/command/container"
|
||||
"github.com/docker/docker/cli/command/image"
|
||||
"github.com/docker/docker/cli/command/network"
|
||||
"github.com/docker/docker/cli/command/node"
|
||||
"github.com/docker/docker/cli/command/plugin"
|
||||
"github.com/docker/docker/cli/command/registry"
|
||||
"github.com/docker/docker/cli/command/secret"
|
||||
"github.com/docker/docker/cli/command/service"
|
||||
"github.com/docker/docker/cli/command/stack"
|
||||
"github.com/docker/docker/cli/command/swarm"
|
||||
"github.com/docker/docker/cli/command/system"
|
||||
"github.com/docker/docker/cli/command/volume"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// AddCommands adds all the commands from cli/command to the root command
|
||||
func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) {
|
||||
cmd.AddCommand(
|
||||
// checkpoint
|
||||
checkpoint.NewCheckpointCommand(dockerCli),
|
||||
|
||||
// container
|
||||
container.NewContainerCommand(dockerCli),
|
||||
container.NewRunCommand(dockerCli),
|
||||
|
||||
// image
|
||||
image.NewImageCommand(dockerCli),
|
||||
image.NewBuildCommand(dockerCli),
|
||||
|
||||
// node
|
||||
node.NewNodeCommand(dockerCli),
|
||||
|
||||
// network
|
||||
network.NewNetworkCommand(dockerCli),
|
||||
|
||||
// plugin
|
||||
plugin.NewPluginCommand(dockerCli),
|
||||
|
||||
// registry
|
||||
registry.NewLoginCommand(dockerCli),
|
||||
registry.NewLogoutCommand(dockerCli),
|
||||
registry.NewSearchCommand(dockerCli),
|
||||
|
||||
// secret
|
||||
secret.NewSecretCommand(dockerCli),
|
||||
|
||||
// service
|
||||
service.NewServiceCommand(dockerCli),
|
||||
|
||||
// system
|
||||
system.NewSystemCommand(dockerCli),
|
||||
system.NewVersionCommand(dockerCli),
|
||||
|
||||
// stack
|
||||
stack.NewStackCommand(dockerCli),
|
||||
stack.NewTopLevelDeployCommand(dockerCli),
|
||||
|
||||
// swarm
|
||||
swarm.NewSwarmCommand(dockerCli),
|
||||
|
||||
// volume
|
||||
volume.NewVolumeCommand(dockerCli),
|
||||
|
||||
// legacy commands may be hidden
|
||||
hide(system.NewEventsCommand(dockerCli)),
|
||||
hide(system.NewInfoCommand(dockerCli)),
|
||||
hide(system.NewInspectCommand(dockerCli)),
|
||||
hide(container.NewAttachCommand(dockerCli)),
|
||||
hide(container.NewCommitCommand(dockerCli)),
|
||||
hide(container.NewCopyCommand(dockerCli)),
|
||||
hide(container.NewCreateCommand(dockerCli)),
|
||||
hide(container.NewDiffCommand(dockerCli)),
|
||||
hide(container.NewExecCommand(dockerCli)),
|
||||
hide(container.NewExportCommand(dockerCli)),
|
||||
hide(container.NewKillCommand(dockerCli)),
|
||||
hide(container.NewLogsCommand(dockerCli)),
|
||||
hide(container.NewPauseCommand(dockerCli)),
|
||||
hide(container.NewPortCommand(dockerCli)),
|
||||
hide(container.NewPsCommand(dockerCli)),
|
||||
hide(container.NewRenameCommand(dockerCli)),
|
||||
hide(container.NewRestartCommand(dockerCli)),
|
||||
hide(container.NewRmCommand(dockerCli)),
|
||||
hide(container.NewStartCommand(dockerCli)),
|
||||
hide(container.NewStatsCommand(dockerCli)),
|
||||
hide(container.NewStopCommand(dockerCli)),
|
||||
hide(container.NewTopCommand(dockerCli)),
|
||||
hide(container.NewUnpauseCommand(dockerCli)),
|
||||
hide(container.NewUpdateCommand(dockerCli)),
|
||||
hide(container.NewWaitCommand(dockerCli)),
|
||||
hide(image.NewHistoryCommand(dockerCli)),
|
||||
hide(image.NewImagesCommand(dockerCli)),
|
||||
hide(image.NewImportCommand(dockerCli)),
|
||||
hide(image.NewLoadCommand(dockerCli)),
|
||||
hide(image.NewPullCommand(dockerCli)),
|
||||
hide(image.NewPushCommand(dockerCli)),
|
||||
hide(image.NewRemoveCommand(dockerCli)),
|
||||
hide(image.NewSaveCommand(dockerCli)),
|
||||
hide(image.NewTagCommand(dockerCli)),
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func hide(cmd *cobra.Command) *cobra.Command {
|
||||
// If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty,
|
||||
// these legacy commands (such as `docker ps`, `docker exec`, etc)
|
||||
// will not be shown in output console.
|
||||
if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" {
|
||||
return cmd
|
||||
}
|
||||
cmdCopy := *cmd
|
||||
cmdCopy.Hidden = true
|
||||
cmdCopy.Aliases = []string{}
|
||||
return &cmdCopy
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http/httputil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type attachOptions struct {
|
||||
noStdin bool
|
||||
proxy bool
|
||||
detachKeys string
|
||||
|
||||
container string
|
||||
}
|
||||
|
||||
// NewAttachCommand creates a new cobra.Command for `docker attach`
|
||||
func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts attachOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "attach [OPTIONS] CONTAINER",
|
||||
Short: "Attach local standard input, output, and error streams to a running container",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
return runAttach(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN")
|
||||
flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process")
|
||||
flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error {
|
||||
ctx := context.Background()
|
||||
client := dockerCli.Client()
|
||||
|
||||
c, err := client.ContainerInspect(ctx, opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.State.Running {
|
||||
return errors.New("You cannot attach to a stopped container, start it first")
|
||||
}
|
||||
|
||||
if c.State.Paused {
|
||||
return errors.New("You cannot attach to a paused container, unpause it first")
|
||||
}
|
||||
|
||||
if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.detachKeys != "" {
|
||||
dockerCli.ConfigFile().DetachKeys = opts.detachKeys
|
||||
}
|
||||
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: !opts.noStdin && c.Config.OpenStdin,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
}
|
||||
|
||||
var in io.ReadCloser
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
|
||||
if opts.proxy && !c.Config.Tty {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, opts.container)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
resp, errAttach := client.ContainerAttach(ctx, opts.container, options)
|
||||
if errAttach != nil && errAttach != httputil.ErrPersistEOF {
|
||||
// ContainerAttach returns an ErrPersistEOF (connection closed)
|
||||
// means server met an error and put it in Hijacked connection
|
||||
// keep the error and read detailed error message from hijacked connection later
|
||||
return errAttach
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
if c.Config.Tty && dockerCli.Out().IsTerminal() {
|
||||
height, width := dockerCli.Out().GetTtySize()
|
||||
// To handle the case where a user repeatedly attaches/detaches without resizing their
|
||||
// terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially
|
||||
// resize it, then go back to normal. Without this, every attach after the first will
|
||||
// require the user to manually resize or hit enter.
|
||||
resizeTtyTo(ctx, client, opts.container, height+1, width+1, false)
|
||||
|
||||
// After the above resizing occurs, the call to MonitorTtySize below will handle resetting back
|
||||
// to the actual size.
|
||||
if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil {
|
||||
logrus.Debugf("Error monitoring TTY size: %s", err)
|
||||
}
|
||||
}
|
||||
if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errAttach != nil {
|
||||
return errAttach
|
||||
}
|
||||
|
||||
_, status, err := getExitCode(ctx, dockerCli, opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewContainerCommand returns a cobra command for `container` subcommands
|
||||
func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "container",
|
||||
Short: "Manage containers",
|
||||
Args: cli.NoArgs,
|
||||
RunE: dockerCli.ShowHelp,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
NewAttachCommand(dockerCli),
|
||||
NewCommitCommand(dockerCli),
|
||||
NewCopyCommand(dockerCli),
|
||||
NewCreateCommand(dockerCli),
|
||||
NewDiffCommand(dockerCli),
|
||||
NewExecCommand(dockerCli),
|
||||
NewExportCommand(dockerCli),
|
||||
NewKillCommand(dockerCli),
|
||||
NewLogsCommand(dockerCli),
|
||||
NewPauseCommand(dockerCli),
|
||||
NewPortCommand(dockerCli),
|
||||
NewRenameCommand(dockerCli),
|
||||
NewRestartCommand(dockerCli),
|
||||
NewRmCommand(dockerCli),
|
||||
NewRunCommand(dockerCli),
|
||||
NewStartCommand(dockerCli),
|
||||
NewStatsCommand(dockerCli),
|
||||
NewStopCommand(dockerCli),
|
||||
NewTopCommand(dockerCli),
|
||||
NewUnpauseCommand(dockerCli),
|
||||
NewUpdateCommand(dockerCli),
|
||||
NewWaitCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newInspectCommand(dockerCli),
|
||||
NewPruneCommand(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
dockeropts "github.com/docker/docker/opts"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type commitOptions struct {
|
||||
container string
|
||||
reference string
|
||||
|
||||
pause bool
|
||||
comment string
|
||||
author string
|
||||
changes dockeropts.ListOpts
|
||||
}
|
||||
|
||||
// NewCommitCommand creates a new cobra.Command for `docker commit`
|
||||
func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts commitOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]",
|
||||
Short: "Create a new image from a container's changes",
|
||||
Args: cli.RequiresRangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
if len(args) > 1 {
|
||||
opts.reference = args[1]
|
||||
}
|
||||
return runCommit(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit")
|
||||
flags.StringVarP(&opts.comment, "message", "m", "", "Commit message")
|
||||
flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
|
||||
|
||||
opts.changes = dockeropts.NewListOpts(nil)
|
||||
flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
name := opts.container
|
||||
reference := opts.reference
|
||||
|
||||
options := types.ContainerCommitOptions{
|
||||
Reference: reference,
|
||||
Comment: opts.comment,
|
||||
Author: opts.author,
|
||||
Changes: opts.changes.GetAll(),
|
||||
Pause: opts.pause,
|
||||
}
|
||||
|
||||
response, err := dockerCli.Client().ContainerCommit(ctx, name, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), response.ID)
|
||||
return nil
|
||||
}
|
|
@ -1,305 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
source string
|
||||
destination string
|
||||
followLink bool
|
||||
copyUIDGID bool
|
||||
}
|
||||
|
||||
type copyDirection int
|
||||
|
||||
const (
|
||||
fromContainer copyDirection = (1 << iota)
|
||||
toContainer
|
||||
acrossContainers = fromContainer | toContainer
|
||||
)
|
||||
|
||||
type cpConfig struct {
|
||||
followLink bool
|
||||
}
|
||||
|
||||
// NewCopyCommand creates a new `docker cp` command
|
||||
func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts copyOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|-
|
||||
docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`,
|
||||
Short: "Copy files/folders between a container and the local filesystem",
|
||||
Long: strings.Join([]string{
|
||||
"Copy files/folders between a container and the local filesystem\n",
|
||||
"\nUse '-' as the source to read a tar archive from stdin\n",
|
||||
"and extract it to a directory destination in a container.\n",
|
||||
"Use '-' as the destination to stream a tar archive of a\n",
|
||||
"container source to stdout.",
|
||||
}, ""),
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if args[0] == "" {
|
||||
return errors.New("source can not be empty")
|
||||
}
|
||||
if args[1] == "" {
|
||||
return errors.New("destination can not be empty")
|
||||
}
|
||||
opts.source = args[0]
|
||||
opts.destination = args[1]
|
||||
return runCopy(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH")
|
||||
flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCopy(dockerCli *command.DockerCli, opts copyOptions) error {
|
||||
srcContainer, srcPath := splitCpArg(opts.source)
|
||||
dstContainer, dstPath := splitCpArg(opts.destination)
|
||||
|
||||
var direction copyDirection
|
||||
if srcContainer != "" {
|
||||
direction |= fromContainer
|
||||
}
|
||||
if dstContainer != "" {
|
||||
direction |= toContainer
|
||||
}
|
||||
|
||||
cpParam := &cpConfig{
|
||||
followLink: opts.followLink,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
switch direction {
|
||||
case fromContainer:
|
||||
return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam)
|
||||
case toContainer:
|
||||
return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam, opts.copyUIDGID)
|
||||
case acrossContainers:
|
||||
// Copying between containers isn't supported.
|
||||
return errors.New("copying between containers is not supported")
|
||||
default:
|
||||
// User didn't specify any container.
|
||||
return errors.New("must specify at least one container source")
|
||||
}
|
||||
}
|
||||
|
||||
func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) {
|
||||
return dockerCli.Client().ContainerStatPath(ctx, containerName, path)
|
||||
}
|
||||
|
||||
func resolveLocalPath(localPath string) (absPath string, err error) {
|
||||
if absPath, err = filepath.Abs(localPath); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil
|
||||
}
|
||||
|
||||
func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) {
|
||||
if dstPath != "-" {
|
||||
// Get an absolute destination path.
|
||||
dstPath, err = resolveLocalPath(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if client requests to follow symbol link, then must decide target file to be copied
|
||||
var rebaseName string
|
||||
if cpParam.followLink {
|
||||
srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath)
|
||||
|
||||
// If the destination is a symbolic link, we should follow it.
|
||||
if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := srcStat.LinkTarget
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
srcParent, _ := archive.SplitPathDirEntry(srcPath)
|
||||
linkTarget = filepath.Join(srcParent, linkTarget)
|
||||
}
|
||||
|
||||
linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget)
|
||||
srcPath = linkTarget
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
if dstPath == "-" {
|
||||
// Send the response to STDOUT.
|
||||
_, err = io.Copy(os.Stdout, content)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Prepare source copy info.
|
||||
srcInfo := archive.CopyInfo{
|
||||
Path: srcPath,
|
||||
Exists: true,
|
||||
IsDir: stat.Mode.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}
|
||||
|
||||
preArchive := content
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
_, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
|
||||
preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName)
|
||||
}
|
||||
// See comments in the implementation of `archive.CopyTo` for exactly what
|
||||
// goes into deciding how and whether the source archive needs to be
|
||||
// altered for the correct copy behavior.
|
||||
return archive.CopyTo(preArchive, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig, copyUIDGID bool) (err error) {
|
||||
if srcPath != "-" {
|
||||
// Get an absolute source path.
|
||||
srcPath, err = resolveLocalPath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
// about both the source and destination. The API is a simple tar
|
||||
// archive/extract API but we can use the stat info header about the
|
||||
// destination to be more informed about exactly what the destination is.
|
||||
|
||||
// Prepare destination copy info by stat-ing the container path.
|
||||
dstInfo := archive.CopyInfo{Path: dstPath}
|
||||
dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath)
|
||||
|
||||
// If the destination is a symbolic link, we should evaluate it.
|
||||
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := dstStat.LinkTarget
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := archive.SplitPathDirEntry(dstPath)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
dstInfo.Path = linkTarget
|
||||
dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget)
|
||||
}
|
||||
|
||||
// Ignore any error and assume that the parent directory of the destination
|
||||
// path exists, in which case the copy may still succeed. If there is any
|
||||
// type of conflict (e.g., non-directory overwriting an existing directory
|
||||
// or vice versa) the extraction will fail. If the destination simply did
|
||||
// not exist, but the parent directory does, the extraction will still
|
||||
// succeed.
|
||||
if err == nil {
|
||||
dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
|
||||
}
|
||||
|
||||
var (
|
||||
content io.Reader
|
||||
resolvedDstPath string
|
||||
)
|
||||
|
||||
if srcPath == "-" {
|
||||
// Use STDIN.
|
||||
content = os.Stdin
|
||||
resolvedDstPath = dstInfo.Path
|
||||
if !dstInfo.IsDir {
|
||||
return errors.Errorf("destination \"%s:%s\" must be a directory", dstContainer, dstPath)
|
||||
}
|
||||
} else {
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcArchive.Close()
|
||||
|
||||
// With the stat info about the local source as well as the
|
||||
// destination, we have enough information to know whether we need to
|
||||
// alter the archive that we upload so that when the server extracts
|
||||
// it to the specified directory in the container we get the desired
|
||||
// copy behavior.
|
||||
|
||||
// See comments in the implementation of `archive.PrepareArchiveCopy`
|
||||
// for exactly what goes into deciding how and whether the source
|
||||
// archive needs to be altered for the correct copy behavior when it is
|
||||
// extracted. This function also infers from the source and destination
|
||||
// info which directory to extract to, which may be the parent of the
|
||||
// destination that the user specified.
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer preparedArchive.Close()
|
||||
|
||||
resolvedDstPath = dstDir
|
||||
content = preparedArchive
|
||||
}
|
||||
|
||||
options := types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: false,
|
||||
CopyUIDGID: copyUIDGID,
|
||||
}
|
||||
|
||||
return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options)
|
||||
}
|
||||
|
||||
// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be
|
||||
// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by
|
||||
// requiring a LOCALPATH with a `:` to be made explicit with a relative or
|
||||
// absolute path:
|
||||
// `/path/to/file:name.txt` or `./file:name.txt`
|
||||
//
|
||||
// This is apparently how `scp` handles this as well:
|
||||
// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/
|
||||
//
|
||||
// We can't simply check for a filepath separator because container names may
|
||||
// have a separator, e.g., "host0/cname1" if container is in a Docker cluster,
|
||||
// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows
|
||||
// client, a `:` could be part of an absolute Windows path, in which case it
|
||||
// is immediately proceeded by a backslash.
|
||||
func splitCpArg(arg string) (container, path string) {
|
||||
if system.IsAbs(arg) {
|
||||
// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
|
||||
return "", arg
|
||||
}
|
||||
|
||||
parts := strings.SplitN(arg, ":", 2)
|
||||
|
||||
if len(parts) == 1 || strings.HasPrefix(parts[0], ".") {
|
||||
// Either there's no `:` in the arg
|
||||
// OR it's an explicit local relative path like `./file:name.txt`.
|
||||
return "", arg
|
||||
}
|
||||
|
||||
return parts[0], parts[1]
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/image"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type createOptions struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// NewCreateCommand creates a new cobra.Command for `docker create`
|
||||
func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts createOptions
|
||||
var copts *containerOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]",
|
||||
Short: "Create a new container",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
copts.Image = args[0]
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runCreate(dockerCli, cmd.Flags(), &opts, copts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
flags.StringVar(&opts.name, "name", "", "Assign a name to the container")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
copts = addFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *containerOptions) error {
|
||||
containerConfig, err := parse(flags, copts)
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
response, err := createContainer(context.Background(), dockerCli, containerConfig, opts.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), response.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
|
||||
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ImageCreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
return jsonmessage.DisplayJSONMessagesStream(
|
||||
responseBody,
|
||||
out,
|
||||
dockerCli.Out().FD(),
|
||||
dockerCli.Out().IsTerminal(),
|
||||
nil)
|
||||
}
|
||||
|
||||
type cidFile struct {
|
||||
path string
|
||||
file *os.File
|
||||
written bool
|
||||
}
|
||||
|
||||
func (cid *cidFile) Close() error {
|
||||
cid.file.Close()
|
||||
|
||||
if cid.written {
|
||||
return nil
|
||||
}
|
||||
if err := os.Remove(cid.path); err != nil {
|
||||
return errors.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cid *cidFile) Write(id string) error {
|
||||
if _, err := cid.file.Write([]byte(id)); err != nil {
|
||||
return errors.Errorf("Failed to write the container ID to the file: %s", err)
|
||||
}
|
||||
cid.written = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCIDFile(path string) (*cidFile, error) {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return nil, errors.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Failed to create the container ID file: %s", err)
|
||||
}
|
||||
|
||||
return &cidFile{path: path, file: f}, nil
|
||||
}
|
||||
|
||||
func createContainer(ctx context.Context, dockerCli *command.DockerCli, containerConfig *containerConfig, name string) (*container.ContainerCreateCreatedBody, error) {
|
||||
config := containerConfig.Config
|
||||
hostConfig := containerConfig.HostConfig
|
||||
networkingConfig := containerConfig.NetworkingConfig
|
||||
stderr := dockerCli.Err()
|
||||
|
||||
var (
|
||||
containerIDFile *cidFile
|
||||
trustedRef reference.Canonical
|
||||
namedRef reference.Named
|
||||
)
|
||||
|
||||
cidfile := hostConfig.ContainerIDFile
|
||||
if cidfile != "" {
|
||||
var err error
|
||||
if containerIDFile, err = newCIDFile(cidfile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer containerIDFile.Close()
|
||||
}
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && command.IsTrusted() {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
//create the container
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name)
|
||||
|
||||
//if image not found try to pull it
|
||||
if err != nil {
|
||||
if apiclient.IsErrImageNotFound(err) && namedRef != nil {
|
||||
fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
if err := image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Retry
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name)
|
||||
if retryErr != nil {
|
||||
return nil, retryErr
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, warning := range response.Warnings {
|
||||
fmt.Fprintf(stderr, "WARNING: %s\n", warning)
|
||||
}
|
||||
if containerIDFile != nil {
|
||||
if err = containerIDFile.Write(response.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &response, nil
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type diffOptions struct {
|
||||
container string
|
||||
}
|
||||
|
||||
// NewDiffCommand creates a new cobra.Command for `docker diff`
|
||||
func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts diffOptions
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "diff CONTAINER",
|
||||
Short: "Inspect changes to files or directories on a container's filesystem",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
return runDiff(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error {
|
||||
if opts.container == "" {
|
||||
return errors.New("Container name cannot be empty")
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diffCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewDiffFormat("{{.Type}} {{.Path}}"),
|
||||
}
|
||||
return formatter.DiffWrite(diffCtx, changes)
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
options "github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type execOptions struct {
|
||||
detachKeys string
|
||||
interactive bool
|
||||
tty bool
|
||||
detach bool
|
||||
user string
|
||||
privileged bool
|
||||
env *options.ListOpts
|
||||
}
|
||||
|
||||
func newExecOptions() *execOptions {
|
||||
var values []string
|
||||
return &execOptions{
|
||||
env: options.NewListOptsRef(&values, options.ValidateEnv),
|
||||
}
|
||||
}
|
||||
|
||||
// NewExecCommand creates a new cobra.Command for `docker exec`
|
||||
func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
opts := newExecOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]",
|
||||
Short: "Run a command in a running container",
|
||||
Args: cli.RequiresMinArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
container := args[0]
|
||||
execCmd := args[1:]
|
||||
return runExec(dockerCli, opts, container, execCmd)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container")
|
||||
flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
|
||||
flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY")
|
||||
flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background")
|
||||
flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command")
|
||||
flags.VarP(opts.env, "env", "e", "Set environment variables")
|
||||
flags.SetAnnotation("env", "version", []string{"1.25"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error {
|
||||
execConfig, err := parseExec(opts, execCmd)
|
||||
// just in case the ParseExec does not exit
|
||||
if container == "" || err != nil {
|
||||
return cli.StatusError{StatusCode: 1}
|
||||
}
|
||||
|
||||
if opts.detachKeys != "" {
|
||||
dockerCli.ConfigFile().DetachKeys = opts.detachKeys
|
||||
}
|
||||
|
||||
// Send client escape keys
|
||||
execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys
|
||||
|
||||
ctx := context.Background()
|
||||
client := dockerCli.Client()
|
||||
|
||||
response, err := client.ContainerExecCreate(ctx, container, *execConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execID := response.ID
|
||||
if execID == "" {
|
||||
fmt.Fprintln(dockerCli.Out(), "exec ID empty")
|
||||
return nil
|
||||
}
|
||||
|
||||
//Temp struct for execStart so that we don't need to transfer all the execConfig
|
||||
if !execConfig.Detach {
|
||||
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
execStartCheck := types.ExecStartCheck{
|
||||
Detach: execConfig.Detach,
|
||||
Tty: execConfig.Tty,
|
||||
}
|
||||
|
||||
if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
// For now don't print this - wait for when we support exec wait()
|
||||
// fmt.Fprintf(dockerCli.Out(), "%s\n", execID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Interactive exec requested.
|
||||
var (
|
||||
out, stderr io.Writer
|
||||
in io.ReadCloser
|
||||
errCh chan error
|
||||
)
|
||||
|
||||
if execConfig.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if execConfig.AttachStdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if execConfig.AttachStderr {
|
||||
if execConfig.Tty {
|
||||
stderr = dockerCli.Out()
|
||||
} else {
|
||||
stderr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := client.ContainerExecAttach(ctx, execID, *execConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Close()
|
||||
errCh = promise.Go(func() error {
|
||||
return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp)
|
||||
})
|
||||
|
||||
if execConfig.Tty && dockerCli.In().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := <-errCh; err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var status int
|
||||
if _, status, err = getExecExitCode(ctx, client, execID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getExecExitCode perform an inspect on the exec command. It returns
|
||||
// the running state and the exit code.
|
||||
func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) {
|
||||
resp, err := client.ContainerExecInspect(ctx, execID)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if !apiclient.IsErrConnectionFailed(err) {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
return resp.Running, resp.ExitCode, nil
|
||||
}
|
||||
|
||||
// parseExec parses the specified args for the specified command and generates
|
||||
// an ExecConfig from it.
|
||||
func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) {
|
||||
execConfig := &types.ExecConfig{
|
||||
User: opts.user,
|
||||
Privileged: opts.privileged,
|
||||
Tty: opts.tty,
|
||||
Cmd: execCmd,
|
||||
Detach: opts.detach,
|
||||
}
|
||||
|
||||
// If -d is not set, attach to everything by default
|
||||
if !opts.detach {
|
||||
execConfig.AttachStdout = true
|
||||
execConfig.AttachStderr = true
|
||||
if opts.interactive {
|
||||
execConfig.AttachStdin = true
|
||||
}
|
||||
}
|
||||
|
||||
if opts.env != nil {
|
||||
execConfig.Env = opts.env.GetAll()
|
||||
}
|
||||
|
||||
return execConfig, nil
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
type arguments struct {
|
||||
options execOptions
|
||||
execCmd []string
|
||||
}
|
||||
|
||||
func TestParseExec(t *testing.T) {
|
||||
valids := map[*arguments]*types.ExecConfig{
|
||||
{
|
||||
execCmd: []string{"command"},
|
||||
}: {
|
||||
Cmd: []string{"command"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
},
|
||||
{
|
||||
execCmd: []string{"command1", "command2"},
|
||||
}: {
|
||||
Cmd: []string{"command1", "command2"},
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
},
|
||||
{
|
||||
options: execOptions{
|
||||
interactive: true,
|
||||
tty: true,
|
||||
user: "uid",
|
||||
},
|
||||
execCmd: []string{"command"},
|
||||
}: {
|
||||
User: "uid",
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
{
|
||||
options: execOptions{
|
||||
detach: true,
|
||||
},
|
||||
execCmd: []string{"command"},
|
||||
}: {
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
Detach: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
{
|
||||
options: execOptions{
|
||||
tty: true,
|
||||
interactive: true,
|
||||
detach: true,
|
||||
},
|
||||
execCmd: []string{"command"},
|
||||
}: {
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
Detach: true,
|
||||
Tty: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
}
|
||||
|
||||
for valid, expectedExecConfig := range valids {
|
||||
execConfig, err := parseExec(&valid.options, valid.execCmd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !compareExecConfig(expectedExecConfig, execConfig) {
|
||||
t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool {
|
||||
if config1.AttachStderr != config2.AttachStderr {
|
||||
return false
|
||||
}
|
||||
if config1.AttachStdin != config2.AttachStdin {
|
||||
return false
|
||||
}
|
||||
if config1.AttachStdout != config2.AttachStdout {
|
||||
return false
|
||||
}
|
||||
if config1.Detach != config2.Detach {
|
||||
return false
|
||||
}
|
||||
if config1.Privileged != config2.Privileged {
|
||||
return false
|
||||
}
|
||||
if config1.Tty != config2.Tty {
|
||||
return false
|
||||
}
|
||||
if config1.User != config2.User {
|
||||
return false
|
||||
}
|
||||
if len(config1.Cmd) != len(config2.Cmd) {
|
||||
return false
|
||||
}
|
||||
for index, value := range config1.Cmd {
|
||||
if value != config2.Cmd[index] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type exportOptions struct {
|
||||
container string
|
||||
output string
|
||||
}
|
||||
|
||||
// NewExportCommand creates a new `docker export` command
|
||||
func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts exportOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "export [OPTIONS] CONTAINER",
|
||||
Short: "Export a container's filesystem as a tar archive",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
return runExport(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runExport(dockerCli *command.DockerCli, opts exportOptions) error {
|
||||
if opts.output == "" && dockerCli.Out().IsTerminal() {
|
||||
return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
|
||||
}
|
||||
|
||||
clnt := dockerCli.Client()
|
||||
|
||||
responseBody, err := clnt.ContainerExport(context.Background(), opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if opts.output == "" {
|
||||
_, err := io.Copy(dockerCli.Out(), responseBody)
|
||||
return err
|
||||
}
|
||||
|
||||
return command.CopyToFile(opts.output, responseBody)
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// holdHijackedConnection handles copying input to and output from streams to the
|
||||
// connection
|
||||
func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error {
|
||||
var (
|
||||
err error
|
||||
restoreOnce sync.Once
|
||||
)
|
||||
if inputStream != nil && tty {
|
||||
if err := setRawTerminal(streams); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
restoreOnce.Do(func() {
|
||||
restoreTerminal(streams, inputStream)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
receiveStdout := make(chan error, 1)
|
||||
if outputStream != nil || errorStream != nil {
|
||||
go func() {
|
||||
// When TTY is ON, use regular copy
|
||||
if tty && outputStream != nil {
|
||||
_, err = io.Copy(outputStream, resp.Reader)
|
||||
// we should restore the terminal as soon as possible once connection end
|
||||
// so any following print messages will be in normal type.
|
||||
if inputStream != nil {
|
||||
restoreOnce.Do(func() {
|
||||
restoreTerminal(streams, inputStream)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
|
||||
}
|
||||
|
||||
logrus.Debug("[hijack] End of stdout")
|
||||
receiveStdout <- err
|
||||
}()
|
||||
}
|
||||
|
||||
stdinDone := make(chan struct{})
|
||||
go func() {
|
||||
if inputStream != nil {
|
||||
io.Copy(resp.Conn, inputStream)
|
||||
// we should restore the terminal as soon as possible once connection end
|
||||
// so any following print messages will be in normal type.
|
||||
if tty {
|
||||
restoreOnce.Do(func() {
|
||||
restoreTerminal(streams, inputStream)
|
||||
})
|
||||
}
|
||||
logrus.Debug("[hijack] End of stdin")
|
||||
}
|
||||
|
||||
if err := resp.CloseWrite(); err != nil {
|
||||
logrus.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
close(stdinDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-receiveStdout:
|
||||
if err != nil {
|
||||
logrus.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
case <-stdinDone:
|
||||
if outputStream != nil || errorStream != nil {
|
||||
select {
|
||||
case err := <-receiveStdout:
|
||||
if err != nil {
|
||||
logrus.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setRawTerminal(streams command.Streams) error {
|
||||
if err := streams.In().SetRawTerminal(); err != nil {
|
||||
return err
|
||||
}
|
||||
return streams.Out().SetRawTerminal()
|
||||
}
|
||||
|
||||
func restoreTerminal(streams command.Streams, in io.Closer) error {
|
||||
streams.In().RestoreTerminal()
|
||||
streams.Out().RestoreTerminal()
|
||||
// WARNING: DO NOT REMOVE THE OS CHECKS !!!
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exits right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
//
|
||||
// This can also cause the client on Windows to get stuck in Win32 CloseHandle()
|
||||
// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442
|
||||
// Tracked internally at Microsoft by VSO #11352156. In the
|
||||
// Windows case, you hit this if you are using the native/v2 console,
|
||||
// not the "legacy" console, and you start the client in a new window. eg
|
||||
// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`
|
||||
// will hang. Remove start, and it won't repro.
|
||||
if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" {
|
||||
return in.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/inspect"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
format string
|
||||
size bool
|
||||
refs []string
|
||||
}
|
||||
|
||||
// newInspectCommand creates a new cobra.Command for `docker container inspect`
|
||||
func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts inspectOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Display detailed information on one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.refs = args
|
||||
return runInspect(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
return client.ContainerInspectWithRaw(ctx, ref, opts.size)
|
||||
}
|
||||
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc)
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type killOptions struct {
|
||||
signal string
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewKillCommand creates a new cobra.Command for `docker kill`
|
||||
func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts killOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "kill [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Kill one or more running containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runKill(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runKill(dockerCli *command.DockerCli, opts *killOptions) error {
|
||||
var errs []string
|
||||
ctx := context.Background()
|
||||
errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error {
|
||||
return dockerCli.Client().ContainerKill(ctx, container, opts.signal)
|
||||
})
|
||||
for _, name := range opts.containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/formatter"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/templates"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type psOptions struct {
|
||||
quiet bool
|
||||
size bool
|
||||
all bool
|
||||
noTrunc bool
|
||||
nLatest bool
|
||||
last int
|
||||
format string
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// NewPsCommand creates a new cobra.Command for `docker ps`
|
||||
func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
opts := psOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ps [OPTIONS]",
|
||||
Short: "List containers",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runPs(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs")
|
||||
flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
||||
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)")
|
||||
flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)")
|
||||
flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template")
|
||||
flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
cmd := *NewPsCommand(dockerCli)
|
||||
cmd.Aliases = []string{"ps", "list"}
|
||||
cmd.Use = "ls [OPTIONS]"
|
||||
return &cmd
|
||||
}
|
||||
|
||||
// listOptionsProcessor is used to set any container list options which may only
|
||||
// be embedded in the format template.
|
||||
// This is passed directly into tmpl.Execute in order to allow the preprocessor
|
||||
// to set any list options that were not provided by flags (e.g. `.Size`).
|
||||
// It is using a `map[string]bool` so that unknown fields passed into the
|
||||
// template format do not cause errors. These errors will get picked up when
|
||||
// running through the actual template processor.
|
||||
type listOptionsProcessor map[string]bool
|
||||
|
||||
// Size sets the size of the map when called by a template execution.
|
||||
func (o listOptionsProcessor) Size() bool {
|
||||
o["size"] = true
|
||||
return true
|
||||
}
|
||||
|
||||
// Label is needed here as it allows the correct pre-processing
|
||||
// because Label() is a method with arguments
|
||||
func (o listOptionsProcessor) Label(name string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) {
|
||||
options := &types.ContainerListOptions{
|
||||
All: opts.all,
|
||||
Limit: opts.last,
|
||||
Size: opts.size,
|
||||
Filters: opts.filter.Value(),
|
||||
}
|
||||
|
||||
if opts.nLatest && opts.last == -1 {
|
||||
options.Limit = 1
|
||||
}
|
||||
|
||||
tmpl, err := templates.Parse(opts.format)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
optionsProcessor := listOptionsProcessor{}
|
||||
// This shouldn't error out but swallowing the error makes it harder
|
||||
// to track down if preProcessor issues come up. Ref #24696
|
||||
if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// At the moment all we need is to capture .Size for preprocessor
|
||||
options.Size = opts.size || optionsProcessor["size"]
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func runPs(dockerCli *command.DockerCli, opts *psOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
listOptions, err := buildContainerListOptions(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containers, err := dockerCli.Client().ContainerList(ctx, *listOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := opts.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet {
|
||||
format = dockerCli.ConfigFile().PsFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
|
||||
containerCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size),
|
||||
Trunc: !opts.noTrunc,
|
||||
}
|
||||
return formatter.ContainerWrite(containerCtx, containers)
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type logsOptions struct {
|
||||
follow bool
|
||||
since string
|
||||
timestamps bool
|
||||
details bool
|
||||
tail string
|
||||
|
||||
container string
|
||||
}
|
||||
|
||||
// NewLogsCommand creates a new cobra.Command for `docker logs`
|
||||
func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts logsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs [OPTIONS] CONTAINER",
|
||||
Short: "Fetch the logs of a container",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
return runLogs(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output")
|
||||
flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)")
|
||||
flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps")
|
||||
flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs")
|
||||
flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
options := types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: opts.since,
|
||||
Timestamps: opts.timestamps,
|
||||
Follow: opts.follow,
|
||||
Tail: opts.tail,
|
||||
Details: opts.details,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,900 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
deviceCgroupRuleRegexp = regexp.MustCompile("^[acb] ([0-9]+|\\*):([0-9]+|\\*) [rwm]{1,3}$")
|
||||
)
|
||||
|
||||
// containerOptions is a data object with all the options for creating a container
|
||||
type containerOptions struct {
|
||||
attach opts.ListOpts
|
||||
volumes opts.ListOpts
|
||||
tmpfs opts.ListOpts
|
||||
mounts opts.MountOpt
|
||||
blkioWeightDevice opts.WeightdeviceOpt
|
||||
deviceReadBps opts.ThrottledeviceOpt
|
||||
deviceWriteBps opts.ThrottledeviceOpt
|
||||
links opts.ListOpts
|
||||
aliases opts.ListOpts
|
||||
linkLocalIPs opts.ListOpts
|
||||
deviceReadIOps opts.ThrottledeviceOpt
|
||||
deviceWriteIOps opts.ThrottledeviceOpt
|
||||
env opts.ListOpts
|
||||
labels opts.ListOpts
|
||||
deviceCgroupRules opts.ListOpts
|
||||
devices opts.ListOpts
|
||||
ulimits *opts.UlimitOpt
|
||||
sysctls *opts.MapOpts
|
||||
publish opts.ListOpts
|
||||
expose opts.ListOpts
|
||||
dns opts.ListOpts
|
||||
dnsSearch opts.ListOpts
|
||||
dnsOptions opts.ListOpts
|
||||
extraHosts opts.ListOpts
|
||||
volumesFrom opts.ListOpts
|
||||
envFile opts.ListOpts
|
||||
capAdd opts.ListOpts
|
||||
capDrop opts.ListOpts
|
||||
groupAdd opts.ListOpts
|
||||
securityOpt opts.ListOpts
|
||||
storageOpt opts.ListOpts
|
||||
labelsFile opts.ListOpts
|
||||
loggingOpts opts.ListOpts
|
||||
privileged bool
|
||||
pidMode string
|
||||
utsMode string
|
||||
usernsMode string
|
||||
publishAll bool
|
||||
stdin bool
|
||||
tty bool
|
||||
oomKillDisable bool
|
||||
oomScoreAdj int
|
||||
containerIDFile string
|
||||
entrypoint string
|
||||
hostname string
|
||||
memory opts.MemBytes
|
||||
memoryReservation opts.MemBytes
|
||||
memorySwap opts.MemSwapBytes
|
||||
kernelMemory opts.MemBytes
|
||||
user string
|
||||
workingDir string
|
||||
cpuCount int64
|
||||
cpuShares int64
|
||||
cpuPercent int64
|
||||
cpuPeriod int64
|
||||
cpuRealtimePeriod int64
|
||||
cpuRealtimeRuntime int64
|
||||
cpuQuota int64
|
||||
cpus opts.NanoCPUs
|
||||
cpusetCpus string
|
||||
cpusetMems string
|
||||
blkioWeight uint16
|
||||
ioMaxBandwidth opts.MemBytes
|
||||
ioMaxIOps uint64
|
||||
swappiness int64
|
||||
netMode string
|
||||
macAddress string
|
||||
ipv4Address string
|
||||
ipv6Address string
|
||||
ipcMode string
|
||||
pidsLimit int64
|
||||
restartPolicy string
|
||||
readonlyRootfs bool
|
||||
loggingDriver string
|
||||
cgroupParent string
|
||||
volumeDriver string
|
||||
stopSignal string
|
||||
stopTimeout int
|
||||
isolation string
|
||||
shmSize opts.MemBytes
|
||||
noHealthcheck bool
|
||||
healthCmd string
|
||||
healthInterval time.Duration
|
||||
healthTimeout time.Duration
|
||||
healthStartPeriod time.Duration
|
||||
healthRetries int
|
||||
runtime string
|
||||
autoRemove bool
|
||||
init bool
|
||||
|
||||
Image string
|
||||
Args []string
|
||||
}
|
||||
|
||||
// addFlags adds all command line flags that will be used by parse to the FlagSet
|
||||
func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
copts := &containerOptions{
|
||||
aliases: opts.NewListOpts(nil),
|
||||
attach: opts.NewListOpts(validateAttach),
|
||||
blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice),
|
||||
capAdd: opts.NewListOpts(nil),
|
||||
capDrop: opts.NewListOpts(nil),
|
||||
dns: opts.NewListOpts(opts.ValidateIPAddress),
|
||||
dnsOptions: opts.NewListOpts(nil),
|
||||
dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch),
|
||||
deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule),
|
||||
deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice),
|
||||
deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice),
|
||||
deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice),
|
||||
deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice),
|
||||
devices: opts.NewListOpts(validateDevice),
|
||||
env: opts.NewListOpts(opts.ValidateEnv),
|
||||
envFile: opts.NewListOpts(nil),
|
||||
expose: opts.NewListOpts(nil),
|
||||
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
|
||||
groupAdd: opts.NewListOpts(nil),
|
||||
labels: opts.NewListOpts(opts.ValidateEnv),
|
||||
labelsFile: opts.NewListOpts(nil),
|
||||
linkLocalIPs: opts.NewListOpts(nil),
|
||||
links: opts.NewListOpts(opts.ValidateLink),
|
||||
loggingOpts: opts.NewListOpts(nil),
|
||||
publish: opts.NewListOpts(nil),
|
||||
securityOpt: opts.NewListOpts(nil),
|
||||
storageOpt: opts.NewListOpts(nil),
|
||||
sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl),
|
||||
tmpfs: opts.NewListOpts(nil),
|
||||
ulimits: opts.NewUlimitOpt(nil),
|
||||
volumes: opts.NewListOpts(nil),
|
||||
volumesFrom: opts.NewListOpts(nil),
|
||||
}
|
||||
|
||||
// General purpose flags
|
||||
flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR")
|
||||
flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list")
|
||||
flags.Var(&copts.devices, "device", "Add a host device to the container")
|
||||
flags.VarP(&copts.env, "env", "e", "Set environment variables")
|
||||
flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables")
|
||||
flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image")
|
||||
flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join")
|
||||
flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name")
|
||||
flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached")
|
||||
flags.VarP(&copts.labels, "label", "l", "Set meta data on a container")
|
||||
flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels")
|
||||
flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only")
|
||||
flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits")
|
||||
flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container")
|
||||
flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container")
|
||||
flags.SetAnnotation("stop-timeout", "version", []string{"1.25"})
|
||||
flags.Var(copts.sysctls, "sysctl", "Sysctl options")
|
||||
flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY")
|
||||
flags.Var(copts.ulimits, "ulimit", "Ulimit options")
|
||||
flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container")
|
||||
flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits")
|
||||
|
||||
// Security
|
||||
flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities")
|
||||
flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities")
|
||||
flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container")
|
||||
flags.Var(&copts.securityOpt, "security-opt", "Security Options")
|
||||
flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use")
|
||||
|
||||
// Network and port publishing flag
|
||||
flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
|
||||
flags.Var(&copts.dns, "dns", "Set custom DNS servers")
|
||||
// We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way.
|
||||
// This is to be consistent with service create/update
|
||||
flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options")
|
||||
flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options")
|
||||
flags.MarkHidden("dns-opt")
|
||||
flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains")
|
||||
flags.Var(&copts.expose, "expose", "Expose a port or a range of ports")
|
||||
flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)")
|
||||
flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)")
|
||||
flags.Var(&copts.links, "link", "Add link to another container")
|
||||
flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses")
|
||||
flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)")
|
||||
flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host")
|
||||
flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports")
|
||||
// We allow for both "--net" and "--network", although the latter is the recommended way.
|
||||
flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network")
|
||||
flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network")
|
||||
flags.MarkHidden("net")
|
||||
// We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way.
|
||||
flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container")
|
||||
flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container")
|
||||
flags.MarkHidden("net-alias")
|
||||
|
||||
// Logging and storage
|
||||
flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container")
|
||||
flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container")
|
||||
flags.Var(&copts.loggingOpts, "log-opt", "Log driver options")
|
||||
flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container")
|
||||
flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory")
|
||||
flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
|
||||
flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume")
|
||||
flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container")
|
||||
|
||||
// Health-checking
|
||||
flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health")
|
||||
flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ms|s|m|h) (default 0s)")
|
||||
flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy")
|
||||
flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)")
|
||||
flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)")
|
||||
flags.SetAnnotation("health-start-period", "version", []string{"1.29"})
|
||||
flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK")
|
||||
|
||||
// Resource management
|
||||
flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)")
|
||||
flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)")
|
||||
flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file")
|
||||
flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||
flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)")
|
||||
flags.SetAnnotation("cpu-count", "ostype", []string{"windows"})
|
||||
flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)")
|
||||
flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"})
|
||||
flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period")
|
||||
flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds")
|
||||
flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"})
|
||||
flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds")
|
||||
flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"})
|
||||
flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
flags.Var(&copts.cpus, "cpus", "Number of CPUs")
|
||||
flags.SetAnnotation("cpus", "version", []string{"1.25"})
|
||||
flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device")
|
||||
flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device")
|
||||
flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device")
|
||||
flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device")
|
||||
flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)")
|
||||
flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"})
|
||||
flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)")
|
||||
flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"})
|
||||
flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit")
|
||||
flags.VarP(&copts.memory, "memory", "m", "Memory limit")
|
||||
flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit")
|
||||
flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||
flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)")
|
||||
flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer")
|
||||
flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)")
|
||||
flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)")
|
||||
|
||||
// Low-level execution (cgroups, namespaces, ...)
|
||||
flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||
flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use")
|
||||
flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology")
|
||||
flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use")
|
||||
flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm")
|
||||
flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use")
|
||||
flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container")
|
||||
|
||||
flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes")
|
||||
flags.SetAnnotation("init", "version", []string{"1.25"})
|
||||
return copts
|
||||
}
|
||||
|
||||
type containerConfig struct {
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *networktypes.NetworkingConfig
|
||||
}
|
||||
|
||||
// parse parses the args for the specified command and generates a Config,
|
||||
// a HostConfig and returns them with the specified command.
|
||||
// If the specified args are not valid, it will return an error.
|
||||
func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, error) {
|
||||
var (
|
||||
attachStdin = copts.attach.Get("stdin")
|
||||
attachStdout = copts.attach.Get("stdout")
|
||||
attachStderr = copts.attach.Get("stderr")
|
||||
)
|
||||
|
||||
// Validate the input mac address
|
||||
if copts.macAddress != "" {
|
||||
if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil {
|
||||
return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress)
|
||||
}
|
||||
}
|
||||
if copts.stdin {
|
||||
attachStdin = true
|
||||
}
|
||||
// If -a is not set, attach to stdout and stderr
|
||||
if copts.attach.Len() == 0 {
|
||||
attachStdout = true
|
||||
attachStderr = true
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
swappiness := copts.swappiness
|
||||
if swappiness != -1 && (swappiness < 0 || swappiness > 100) {
|
||||
return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness)
|
||||
}
|
||||
|
||||
mounts := copts.mounts.Value()
|
||||
if len(mounts) > 0 && copts.volumeDriver != "" {
|
||||
logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.")
|
||||
}
|
||||
var binds []string
|
||||
volumes := copts.volumes.GetMap()
|
||||
// add any bind targets to the list of container volumes
|
||||
for bind := range copts.volumes.GetMap() {
|
||||
if arr := volumeSplitN(bind, 2); len(arr) > 1 {
|
||||
// after creating the bind mount we want to delete it from the copts.volumes values because
|
||||
// we do not want bind mounts being committed to image configs
|
||||
binds = append(binds, bind)
|
||||
// We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if
|
||||
// there are duplicates entries.
|
||||
delete(volumes, bind)
|
||||
}
|
||||
}
|
||||
|
||||
// Can't evaluate options passed into --tmpfs until we actually mount
|
||||
tmpfs := make(map[string]string)
|
||||
for _, t := range copts.tmpfs.GetAll() {
|
||||
if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
|
||||
tmpfs[arr[0]] = arr[1]
|
||||
} else {
|
||||
tmpfs[arr[0]] = ""
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
runCmd strslice.StrSlice
|
||||
entrypoint strslice.StrSlice
|
||||
)
|
||||
|
||||
if len(copts.Args) > 0 {
|
||||
runCmd = strslice.StrSlice(copts.Args)
|
||||
}
|
||||
|
||||
if copts.entrypoint != "" {
|
||||
entrypoint = strslice.StrSlice{copts.entrypoint}
|
||||
} else if flags.Changed("entrypoint") {
|
||||
// if `--entrypoint=` is parsed then Entrypoint is reset
|
||||
entrypoint = []string{""}
|
||||
}
|
||||
|
||||
ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Merge in exposed ports to the map of published ports
|
||||
for _, e := range copts.expose.GetAll() {
|
||||
if strings.Contains(e, ":") {
|
||||
return nil, errors.Errorf("invalid port format for --expose: %s", e)
|
||||
}
|
||||
//support two formats for expose, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
|
||||
proto, port := nat.SplitProtoPort(e)
|
||||
//parse the start and end port and create a sequence of ports to expose
|
||||
//if expose a port, the start and end port are the same
|
||||
start, end, err := nat.ParsePortRange(port)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err)
|
||||
}
|
||||
for i := start; i <= end; i++ {
|
||||
p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, exists := ports[p]; !exists {
|
||||
ports[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse device mappings
|
||||
deviceMappings := []container.DeviceMapping{}
|
||||
for _, device := range copts.devices.GetAll() {
|
||||
deviceMapping, err := parseDevice(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceMappings = append(deviceMappings, deviceMapping)
|
||||
}
|
||||
|
||||
// collect all the environment variables for the container
|
||||
envVariables, err := runconfigopts.ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// collect all the labels for the container
|
||||
labels, err := runconfigopts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipcMode := container.IpcMode(copts.ipcMode)
|
||||
if !ipcMode.Valid() {
|
||||
return nil, errors.Errorf("--ipc: invalid IPC mode")
|
||||
}
|
||||
|
||||
pidMode := container.PidMode(copts.pidMode)
|
||||
if !pidMode.Valid() {
|
||||
return nil, errors.Errorf("--pid: invalid PID mode")
|
||||
}
|
||||
|
||||
utsMode := container.UTSMode(copts.utsMode)
|
||||
if !utsMode.Valid() {
|
||||
return nil, errors.Errorf("--uts: invalid UTS mode")
|
||||
}
|
||||
|
||||
usernsMode := container.UsernsMode(copts.usernsMode)
|
||||
if !usernsMode.Valid() {
|
||||
return nil, errors.Errorf("--userns: invalid USER mode")
|
||||
}
|
||||
|
||||
restartPolicy, err := runconfigopts.ParseRestartPolicy(copts.restartPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Healthcheck
|
||||
var healthConfig *container.HealthConfig
|
||||
haveHealthSettings := copts.healthCmd != "" ||
|
||||
copts.healthInterval != 0 ||
|
||||
copts.healthTimeout != 0 ||
|
||||
copts.healthStartPeriod != 0 ||
|
||||
copts.healthRetries != 0
|
||||
if copts.noHealthcheck {
|
||||
if haveHealthSettings {
|
||||
return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options")
|
||||
}
|
||||
test := strslice.StrSlice{"NONE"}
|
||||
healthConfig = &container.HealthConfig{Test: test}
|
||||
} else if haveHealthSettings {
|
||||
var probe strslice.StrSlice
|
||||
if copts.healthCmd != "" {
|
||||
args := []string{"CMD-SHELL", copts.healthCmd}
|
||||
probe = strslice.StrSlice(args)
|
||||
}
|
||||
if copts.healthInterval < 0 {
|
||||
return nil, errors.Errorf("--health-interval cannot be negative")
|
||||
}
|
||||
if copts.healthTimeout < 0 {
|
||||
return nil, errors.Errorf("--health-timeout cannot be negative")
|
||||
}
|
||||
if copts.healthRetries < 0 {
|
||||
return nil, errors.Errorf("--health-retries cannot be negative")
|
||||
}
|
||||
if copts.healthStartPeriod < 0 {
|
||||
return nil, fmt.Errorf("--health-start-period cannot be negative")
|
||||
}
|
||||
|
||||
healthConfig = &container.HealthConfig{
|
||||
Test: probe,
|
||||
Interval: copts.healthInterval,
|
||||
Timeout: copts.healthTimeout,
|
||||
StartPeriod: copts.healthStartPeriod,
|
||||
Retries: copts.healthRetries,
|
||||
}
|
||||
}
|
||||
|
||||
resources := container.Resources{
|
||||
CgroupParent: copts.cgroupParent,
|
||||
Memory: copts.memory.Value(),
|
||||
MemoryReservation: copts.memoryReservation.Value(),
|
||||
MemorySwap: copts.memorySwap.Value(),
|
||||
MemorySwappiness: &copts.swappiness,
|
||||
KernelMemory: copts.kernelMemory.Value(),
|
||||
OomKillDisable: &copts.oomKillDisable,
|
||||
NanoCPUs: copts.cpus.Value(),
|
||||
CPUCount: copts.cpuCount,
|
||||
CPUPercent: copts.cpuPercent,
|
||||
CPUShares: copts.cpuShares,
|
||||
CPUPeriod: copts.cpuPeriod,
|
||||
CpusetCpus: copts.cpusetCpus,
|
||||
CpusetMems: copts.cpusetMems,
|
||||
CPUQuota: copts.cpuQuota,
|
||||
CPURealtimePeriod: copts.cpuRealtimePeriod,
|
||||
CPURealtimeRuntime: copts.cpuRealtimeRuntime,
|
||||
PidsLimit: copts.pidsLimit,
|
||||
BlkioWeight: copts.blkioWeight,
|
||||
BlkioWeightDevice: copts.blkioWeightDevice.GetList(),
|
||||
BlkioDeviceReadBps: copts.deviceReadBps.GetList(),
|
||||
BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(),
|
||||
BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(),
|
||||
BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(),
|
||||
IOMaximumIOps: copts.ioMaxIOps,
|
||||
IOMaximumBandwidth: uint64(copts.ioMaxBandwidth),
|
||||
Ulimits: copts.ulimits.GetList(),
|
||||
DeviceCgroupRules: copts.deviceCgroupRules.GetAll(),
|
||||
Devices: deviceMappings,
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
Hostname: copts.hostname,
|
||||
ExposedPorts: ports,
|
||||
User: copts.user,
|
||||
Tty: copts.tty,
|
||||
// TODO: deprecated, it comes from -n, --networking
|
||||
// it's still needed internally to set the network to disabled
|
||||
// if e.g. bridge is none in daemon opts, and in inspect
|
||||
NetworkDisabled: false,
|
||||
OpenStdin: copts.stdin,
|
||||
AttachStdin: attachStdin,
|
||||
AttachStdout: attachStdout,
|
||||
AttachStderr: attachStderr,
|
||||
Env: envVariables,
|
||||
Cmd: runCmd,
|
||||
Image: copts.Image,
|
||||
Volumes: volumes,
|
||||
MacAddress: copts.macAddress,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: copts.workingDir,
|
||||
Labels: runconfigopts.ConvertKVStringsToMap(labels),
|
||||
Healthcheck: healthConfig,
|
||||
}
|
||||
if flags.Changed("stop-signal") {
|
||||
config.StopSignal = copts.stopSignal
|
||||
}
|
||||
if flags.Changed("stop-timeout") {
|
||||
config.StopTimeout = &copts.stopTimeout
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: binds,
|
||||
ContainerIDFile: copts.containerIDFile,
|
||||
OomScoreAdj: copts.oomScoreAdj,
|
||||
AutoRemove: copts.autoRemove,
|
||||
Privileged: copts.privileged,
|
||||
PortBindings: portBindings,
|
||||
Links: copts.links.GetAll(),
|
||||
PublishAllPorts: copts.publishAll,
|
||||
// Make sure the dns fields are never nil.
|
||||
// New containers don't ever have those fields nil,
|
||||
// but pre created containers can still have those nil values.
|
||||
// See https://github.com/docker/docker/pull/17779
|
||||
// for a more detailed explanation on why we don't want that.
|
||||
DNS: copts.dns.GetAllOrEmpty(),
|
||||
DNSSearch: copts.dnsSearch.GetAllOrEmpty(),
|
||||
DNSOptions: copts.dnsOptions.GetAllOrEmpty(),
|
||||
ExtraHosts: copts.extraHosts.GetAll(),
|
||||
VolumesFrom: copts.volumesFrom.GetAll(),
|
||||
NetworkMode: container.NetworkMode(copts.netMode),
|
||||
IpcMode: ipcMode,
|
||||
PidMode: pidMode,
|
||||
UTSMode: utsMode,
|
||||
UsernsMode: usernsMode,
|
||||
CapAdd: strslice.StrSlice(copts.capAdd.GetAll()),
|
||||
CapDrop: strslice.StrSlice(copts.capDrop.GetAll()),
|
||||
GroupAdd: copts.groupAdd.GetAll(),
|
||||
RestartPolicy: restartPolicy,
|
||||
SecurityOpt: securityOpts,
|
||||
StorageOpt: storageOpts,
|
||||
ReadonlyRootfs: copts.readonlyRootfs,
|
||||
LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts},
|
||||
VolumeDriver: copts.volumeDriver,
|
||||
Isolation: container.Isolation(copts.isolation),
|
||||
ShmSize: copts.shmSize.Value(),
|
||||
Resources: resources,
|
||||
Tmpfs: tmpfs,
|
||||
Sysctls: copts.sysctls.GetAll(),
|
||||
Runtime: copts.runtime,
|
||||
Mounts: mounts,
|
||||
}
|
||||
|
||||
if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
return nil, errors.Errorf("Conflicting options: --restart and --rm")
|
||||
}
|
||||
|
||||
// only set this value if the user provided the flag, else it should default to nil
|
||||
if flags.Changed("init") {
|
||||
hostConfig.Init = &copts.init
|
||||
}
|
||||
|
||||
// When allocating stdin in attached mode, close stdin at client disconnect
|
||||
if config.OpenStdin && config.AttachStdin {
|
||||
config.StdinOnce = true
|
||||
}
|
||||
|
||||
networkingConfig := &networktypes.NetworkingConfig{
|
||||
EndpointsConfig: make(map[string]*networktypes.EndpointSettings),
|
||||
}
|
||||
|
||||
if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 {
|
||||
epConfig := &networktypes.EndpointSettings{}
|
||||
networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
|
||||
|
||||
epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{
|
||||
IPv4Address: copts.ipv4Address,
|
||||
IPv6Address: copts.ipv6Address,
|
||||
}
|
||||
|
||||
if copts.linkLocalIPs.Len() > 0 {
|
||||
epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len())
|
||||
copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll())
|
||||
}
|
||||
}
|
||||
|
||||
if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 {
|
||||
epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
|
||||
if epConfig == nil {
|
||||
epConfig = &networktypes.EndpointSettings{}
|
||||
}
|
||||
epConfig.Links = make([]string, len(hostConfig.Links))
|
||||
copy(epConfig.Links, hostConfig.Links)
|
||||
networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
|
||||
}
|
||||
|
||||
if copts.aliases.Len() > 0 {
|
||||
epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
|
||||
if epConfig == nil {
|
||||
epConfig = &networktypes.EndpointSettings{}
|
||||
}
|
||||
epConfig.Aliases = make([]string, copts.aliases.Len())
|
||||
copy(epConfig.Aliases, copts.aliases.GetAll())
|
||||
networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
|
||||
}
|
||||
|
||||
return &containerConfig{
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) {
|
||||
loggingOptsMap := runconfigopts.ConvertKVStringsToMap(loggingOpts)
|
||||
if loggingDriver == "none" && len(loggingOpts) > 0 {
|
||||
return map[string]string{}, errors.Errorf("invalid logging opts for driver %s", loggingDriver)
|
||||
}
|
||||
return loggingOptsMap, nil
|
||||
}
|
||||
|
||||
// takes a local seccomp daemon, reads the file contents for sending to the daemon
|
||||
func parseSecurityOpts(securityOpts []string) ([]string, error) {
|
||||
for key, opt := range securityOpts {
|
||||
con := strings.SplitN(opt, "=", 2)
|
||||
if len(con) == 1 && con[0] != "no-new-privileges" {
|
||||
if strings.Contains(opt, ":") {
|
||||
con = strings.SplitN(opt, ":", 2)
|
||||
} else {
|
||||
return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt)
|
||||
}
|
||||
}
|
||||
if con[0] == "seccomp" && con[1] != "unconfined" {
|
||||
f, err := ioutil.ReadFile(con[1])
|
||||
if err != nil {
|
||||
return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err)
|
||||
}
|
||||
b := bytes.NewBuffer(nil)
|
||||
if err := json.Compact(b, f); err != nil {
|
||||
return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err)
|
||||
}
|
||||
securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
return securityOpts, nil
|
||||
}
|
||||
|
||||
// parses storage options per container into a map
|
||||
func parseStorageOpts(storageOpts []string) (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
for _, option := range storageOpts {
|
||||
if strings.Contains(option, "=") {
|
||||
opt := strings.SplitN(option, "=", 2)
|
||||
m[opt[0]] = opt[1]
|
||||
} else {
|
||||
return nil, errors.Errorf("invalid storage option")
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// parseDevice parses a device mapping string to a container.DeviceMapping struct
|
||||
func parseDevice(device string) (container.DeviceMapping, error) {
|
||||
src := ""
|
||||
dst := ""
|
||||
permissions := "rwm"
|
||||
arr := strings.Split(device, ":")
|
||||
switch len(arr) {
|
||||
case 3:
|
||||
permissions = arr[2]
|
||||
fallthrough
|
||||
case 2:
|
||||
if validDeviceMode(arr[1]) {
|
||||
permissions = arr[1]
|
||||
} else {
|
||||
dst = arr[1]
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
src = arr[0]
|
||||
default:
|
||||
return container.DeviceMapping{}, errors.Errorf("invalid device specification: %s", device)
|
||||
}
|
||||
|
||||
if dst == "" {
|
||||
dst = src
|
||||
}
|
||||
|
||||
deviceMapping := container.DeviceMapping{
|
||||
PathOnHost: src,
|
||||
PathInContainer: dst,
|
||||
CgroupPermissions: permissions,
|
||||
}
|
||||
return deviceMapping, nil
|
||||
}
|
||||
|
||||
// validateDeviceCgroupRule validates a device cgroup rule string format
|
||||
// It will make sure 'val' is in the form:
|
||||
// 'type major:minor mode'
|
||||
func validateDeviceCgroupRule(val string) (string, error) {
|
||||
if deviceCgroupRuleRegexp.MatchString(val) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
return val, errors.Errorf("invalid device cgroup format '%s'", val)
|
||||
}
|
||||
|
||||
// validDeviceMode checks if the mode for device is valid or not.
|
||||
// Valid mode is a composition of r (read), w (write), and m (mknod).
|
||||
func validDeviceMode(mode string) bool {
|
||||
var legalDeviceMode = map[rune]bool{
|
||||
'r': true,
|
||||
'w': true,
|
||||
'm': true,
|
||||
}
|
||||
if mode == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range mode {
|
||||
if !legalDeviceMode[c] {
|
||||
return false
|
||||
}
|
||||
legalDeviceMode[c] = false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// validateDevice validates a path for devices
|
||||
// It will make sure 'val' is in the form:
|
||||
// [host-dir:]container-path[:mode]
|
||||
// It also validates the device mode.
|
||||
func validateDevice(val string) (string, error) {
|
||||
return validatePath(val, validDeviceMode)
|
||||
}
|
||||
|
||||
func validatePath(val string, validator func(string) bool) (string, error) {
|
||||
var containerPath string
|
||||
var mode string
|
||||
|
||||
if strings.Count(val, ":") > 2 {
|
||||
return val, errors.Errorf("bad format for path: %s", val)
|
||||
}
|
||||
|
||||
split := strings.SplitN(val, ":", 3)
|
||||
if split[0] == "" {
|
||||
return val, errors.Errorf("bad format for path: %s", val)
|
||||
}
|
||||
switch len(split) {
|
||||
case 1:
|
||||
containerPath = split[0]
|
||||
val = path.Clean(containerPath)
|
||||
case 2:
|
||||
if isValid := validator(split[1]); isValid {
|
||||
containerPath = split[0]
|
||||
mode = split[1]
|
||||
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
|
||||
} else {
|
||||
containerPath = split[1]
|
||||
val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath))
|
||||
}
|
||||
case 3:
|
||||
containerPath = split[1]
|
||||
mode = split[2]
|
||||
if isValid := validator(split[2]); !isValid {
|
||||
return val, errors.Errorf("bad mode specified: %s", mode)
|
||||
}
|
||||
val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode)
|
||||
}
|
||||
|
||||
if !path.IsAbs(containerPath) {
|
||||
return val, errors.Errorf("%s is not an absolute path", containerPath)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon.
|
||||
// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped).
|
||||
// In Windows driver letter appears in two situations:
|
||||
// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option)
|
||||
// b. A string in the format like `\\?\C:\Windows\...` (UNC).
|
||||
// Therefore, a driver letter can only follow either a `:` or `\\`
|
||||
// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`.
|
||||
func volumeSplitN(raw string, n int) []string {
|
||||
var array []string
|
||||
if len(raw) == 0 || raw[0] == ':' {
|
||||
// invalid
|
||||
return nil
|
||||
}
|
||||
// numberOfParts counts the number of parts separated by a separator colon
|
||||
numberOfParts := 0
|
||||
// left represents the left-most cursor in raw, updated at every `:` character considered as a separator.
|
||||
left := 0
|
||||
// right represents the right-most cursor in raw incremented with the loop. Note this
|
||||
// starts at index 1 as index 0 is already handle above as a special case.
|
||||
for right := 1; right < len(raw); right++ {
|
||||
// stop parsing if reached maximum number of parts
|
||||
if n >= 0 && numberOfParts >= n {
|
||||
break
|
||||
}
|
||||
if raw[right] != ':' {
|
||||
continue
|
||||
}
|
||||
potentialDriveLetter := raw[right-1]
|
||||
if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') {
|
||||
if right > 1 {
|
||||
beforePotentialDriveLetter := raw[right-2]
|
||||
// Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`)
|
||||
if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' {
|
||||
// e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`.
|
||||
array = append(array, raw[left:right])
|
||||
left = right + 1
|
||||
numberOfParts++
|
||||
}
|
||||
// else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing.
|
||||
}
|
||||
// if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing.
|
||||
} else {
|
||||
// if `:` is not preceded by a potential drive letter, then consider it as a delimiter.
|
||||
array = append(array, raw[left:right])
|
||||
left = right + 1
|
||||
numberOfParts++
|
||||
}
|
||||
}
|
||||
// need to take care of the last part
|
||||
if left < len(raw) {
|
||||
if n >= 0 && numberOfParts >= n {
|
||||
// if the maximum number of parts is reached, just append the rest to the last part
|
||||
// left-1 is at the last `:` that needs to be included since not considered a separator.
|
||||
array[n-1] += raw[left-1:]
|
||||
} else {
|
||||
array = append(array, raw[left:])
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
// validateAttach validates that the specified string is a valid attach option.
|
||||
func validateAttach(val string) (string, error) {
|
||||
s := strings.ToLower(val)
|
||||
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
||||
if s == str {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return val, errors.Errorf("valid streams are STDIN, STDOUT and STDERR")
|
||||
}
|
|
@ -1,870 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/pkg/testutil"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateAttach(t *testing.T) {
|
||||
valid := []string{
|
||||
"stdin",
|
||||
"stdout",
|
||||
"stderr",
|
||||
"STDIN",
|
||||
"STDOUT",
|
||||
"STDERR",
|
||||
}
|
||||
if _, err := validateAttach("invalid"); err == nil {
|
||||
t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing")
|
||||
}
|
||||
|
||||
for _, attach := range valid {
|
||||
value, err := validateAttach(attach)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if value != strings.ToLower(attach) {
|
||||
t.Fatalf("Expected [%v], got [%v]", attach, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
|
||||
flags := pflag.NewFlagSet("run", pflag.ContinueOnError)
|
||||
flags.SetOutput(ioutil.Discard)
|
||||
flags.Usage = nil
|
||||
copts := addFlags(flags)
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// TODO: fix tests to accept ContainerConfig
|
||||
containerConfig, err := parse(flags, copts)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err
|
||||
}
|
||||
|
||||
func parsetest(t *testing.T, args string) (*container.Config, *container.HostConfig, error) {
|
||||
config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "))
|
||||
return config, hostConfig, err
|
||||
}
|
||||
|
||||
func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) {
|
||||
config, hostConfig, err := parsetest(t, args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return config, hostConfig
|
||||
}
|
||||
|
||||
func TestParseRunLinks(t *testing.T) {
|
||||
if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
|
||||
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunAttach(t *testing.T) {
|
||||
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
|
||||
if _, _, err := parsetest(t, "-a"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a invalid"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a invalid -a stdout"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a stdout -a stderr -d"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a stdin -d"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a stdin -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a stdout -d"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a stdout -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-a stderr -d"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parsetest(t, "-d --rm"); err == nil {
|
||||
t.Fatal("Error parsing attach flags, `-d --rm` should be an error but is not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunVolumes(t *testing.T) {
|
||||
|
||||
// A single volume
|
||||
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
|
||||
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes)
|
||||
}
|
||||
|
||||
// Two volumes
|
||||
arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`})
|
||||
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes)
|
||||
} else if _, exists := config.Volumes[arr[1]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes)
|
||||
}
|
||||
|
||||
// A single bind-mount
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`})
|
||||
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] {
|
||||
t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes)
|
||||
}
|
||||
|
||||
// Two bind-mounts.
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`})
|
||||
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||
}
|
||||
|
||||
// Two bind-mounts, first read-only, second read-write.
|
||||
// TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`})
|
||||
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||
}
|
||||
|
||||
// Similar to previous test but with alternate modes which are only supported by Linux
|
||||
if runtime.GOOS != "windows" {
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{})
|
||||
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||
}
|
||||
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{})
|
||||
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
|
||||
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
|
||||
}
|
||||
}
|
||||
|
||||
// One bind mount and one volume
|
||||
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`})
|
||||
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] {
|
||||
t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes[arr[1]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes)
|
||||
}
|
||||
|
||||
// Root to non-c: drive letter (Windows specific)
|
||||
if runtime.GOOS == "windows" {
|
||||
arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`})
|
||||
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 {
|
||||
t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// setupPlatformVolume takes two arrays of volume specs - a Unix style
|
||||
// spec and a Windows style spec. Depending on the platform being unit tested,
|
||||
// it returns one of them, along with a volume string that would be passed
|
||||
// on the docker CLI (e.g. -v /bar -v /foo).
|
||||
func setupPlatformVolume(u []string, w []string) ([]string, string) {
|
||||
var a []string
|
||||
if runtime.GOOS == "windows" {
|
||||
a = w
|
||||
} else {
|
||||
a = u
|
||||
}
|
||||
s := ""
|
||||
for _, v := range a {
|
||||
s = s + "-v " + v + " "
|
||||
}
|
||||
return a, s
|
||||
}
|
||||
|
||||
// check if (a == c && b == d) || (a == d && b == c)
|
||||
// because maps are randomized
|
||||
func compareRandomizedStrings(a, b, c, d string) error {
|
||||
if a == c && b == d {
|
||||
return nil
|
||||
}
|
||||
if a == d && b == c {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("strings don't match")
|
||||
}
|
||||
|
||||
// Simple parse with MacAddress validation
|
||||
func TestParseWithMacAddress(t *testing.T) {
|
||||
invalidMacAddress := "--mac-address=invalidMacAddress"
|
||||
validMacAddress := "--mac-address=92:d0:c6:0a:29:33"
|
||||
if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" {
|
||||
t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err)
|
||||
}
|
||||
if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" {
|
||||
t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithMemory(t *testing.T) {
|
||||
invalidMemory := "--memory=invalid"
|
||||
_, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"})
|
||||
testutil.ErrorContains(t, err, invalidMemory)
|
||||
|
||||
_, hostconfig := mustParse(t, "--memory=1G")
|
||||
assert.Equal(t, int64(1073741824), hostconfig.Memory)
|
||||
}
|
||||
|
||||
func TestParseWithMemorySwap(t *testing.T) {
|
||||
invalidMemory := "--memory-swap=invalid"
|
||||
|
||||
_, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"})
|
||||
testutil.ErrorContains(t, err, invalidMemory)
|
||||
|
||||
_, hostconfig := mustParse(t, "--memory-swap=1G")
|
||||
assert.Equal(t, int64(1073741824), hostconfig.MemorySwap)
|
||||
|
||||
_, hostconfig = mustParse(t, "--memory-swap=-1")
|
||||
assert.Equal(t, int64(-1), hostconfig.MemorySwap)
|
||||
}
|
||||
|
||||
func TestParseHostname(t *testing.T) {
|
||||
validHostnames := map[string]string{
|
||||
"hostname": "hostname",
|
||||
"host-name": "host-name",
|
||||
"hostname123": "hostname123",
|
||||
"123hostname": "123hostname",
|
||||
"hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error",
|
||||
}
|
||||
hostnameWithDomain := "--hostname=hostname.domainname"
|
||||
hostnameWithDomainTld := "--hostname=hostname.domainname.tld"
|
||||
for hostname, expectedHostname := range validHostnames {
|
||||
if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname {
|
||||
t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname)
|
||||
}
|
||||
}
|
||||
if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" {
|
||||
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname)
|
||||
}
|
||||
if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" {
|
||||
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithExpose(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
":": "invalid port format for --expose: :",
|
||||
"8080:9090": "invalid port format for --expose: 8080:9090",
|
||||
"/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.",
|
||||
"/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.",
|
||||
"NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||
"NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||
"8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
|
||||
"1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`,
|
||||
}
|
||||
valids := map[string][]nat.Port{
|
||||
"8080/tcp": {"8080/tcp"},
|
||||
"8080/udp": {"8080/udp"},
|
||||
"8080/ncp": {"8080/ncp"},
|
||||
"8080-8080/udp": {"8080/udp"},
|
||||
"8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"},
|
||||
}
|
||||
for expose, expectedError := range invalids {
|
||||
if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err)
|
||||
}
|
||||
}
|
||||
for expose, exposedPorts := range valids {
|
||||
config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.ExposedPorts) != len(exposedPorts) {
|
||||
t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts))
|
||||
}
|
||||
for _, port := range exposedPorts {
|
||||
if _, ok := config.ExposedPorts[port]; !ok {
|
||||
t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Merge with actual published port
|
||||
config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.ExposedPorts) != 2 {
|
||||
t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts)
|
||||
}
|
||||
ports := []nat.Port{"80/tcp", "81/tcp"}
|
||||
for _, port := range ports {
|
||||
if _, ok := config.ExposedPorts[port]; !ok {
|
||||
t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDevice(t *testing.T) {
|
||||
valids := map[string]container.DeviceMapping{
|
||||
"/dev/snd": {
|
||||
PathOnHost: "/dev/snd",
|
||||
PathInContainer: "/dev/snd",
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
"/dev/snd:rw": {
|
||||
PathOnHost: "/dev/snd",
|
||||
PathInContainer: "/dev/snd",
|
||||
CgroupPermissions: "rw",
|
||||
},
|
||||
"/dev/snd:/something": {
|
||||
PathOnHost: "/dev/snd",
|
||||
PathInContainer: "/something",
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
"/dev/snd:/something:rw": {
|
||||
PathOnHost: "/dev/snd",
|
||||
PathInContainer: "/something",
|
||||
CgroupPermissions: "rw",
|
||||
},
|
||||
}
|
||||
for device, deviceMapping := range valids {
|
||||
_, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(hostconfig.Devices) != 1 {
|
||||
t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices)
|
||||
}
|
||||
if hostconfig.Devices[0] != deviceMapping {
|
||||
t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseModes(t *testing.T) {
|
||||
// ipc ko
|
||||
if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" {
|
||||
t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err)
|
||||
}
|
||||
// ipc ok
|
||||
_, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !hostconfig.IpcMode.Valid() {
|
||||
t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode)
|
||||
}
|
||||
// pid ko
|
||||
if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" {
|
||||
t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err)
|
||||
}
|
||||
// pid ok
|
||||
_, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !hostconfig.PidMode.Valid() {
|
||||
t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode)
|
||||
}
|
||||
// uts ko
|
||||
if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" {
|
||||
t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err)
|
||||
}
|
||||
// uts ok
|
||||
_, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !hostconfig.UTSMode.Valid() {
|
||||
t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode)
|
||||
}
|
||||
// shm-size ko
|
||||
expectedErr := `invalid argument "a128m" for --shm-size=a128m: invalid size: 'a128m'`
|
||||
if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != expectedErr {
|
||||
t.Fatalf("Expected an error with message '%v', got %v", expectedErr, err)
|
||||
}
|
||||
// shm-size ok
|
||||
_, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hostconfig.ShmSize != 134217728 {
|
||||
t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRestartPolicy(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"always:2:3": "invalid restart policy format",
|
||||
"on-failure:invalid": "maximum retry count must be an integer",
|
||||
}
|
||||
valids := map[string]container.RestartPolicy{
|
||||
"": {},
|
||||
"always": {
|
||||
Name: "always",
|
||||
MaximumRetryCount: 0,
|
||||
},
|
||||
"on-failure:1": {
|
||||
Name: "on-failure",
|
||||
MaximumRetryCount: 1,
|
||||
},
|
||||
}
|
||||
for restart, expectedError := range invalids {
|
||||
if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err)
|
||||
}
|
||||
}
|
||||
for restart, expected := range valids {
|
||||
_, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hostconfig.RestartPolicy != expected {
|
||||
t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRestartPolicyAutoRemove(t *testing.T) {
|
||||
expected := "Conflicting options: --restart and --rm"
|
||||
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"})
|
||||
if err == nil || err.Error() != expected {
|
||||
t.Fatalf("Expected error %v, but got none", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHealth(t *testing.T) {
|
||||
checkOk := func(args ...string) *container.HealthConfig {
|
||||
config, _, _, err := parseRun(args)
|
||||
if err != nil {
|
||||
t.Fatalf("%#v: %v", args, err)
|
||||
}
|
||||
return config.Healthcheck
|
||||
}
|
||||
checkError := func(expected string, args ...string) {
|
||||
config, _, _, err := parseRun(args)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, but got %#v", config)
|
||||
}
|
||||
if err.Error() != expected {
|
||||
t.Fatalf("Expected %#v, got %#v", expected, err)
|
||||
}
|
||||
}
|
||||
health := checkOk("--no-healthcheck", "img", "cmd")
|
||||
if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" {
|
||||
t.Fatalf("--no-healthcheck failed: %#v", health)
|
||||
}
|
||||
|
||||
health = checkOk("--health-cmd=/check.sh -q", "img", "cmd")
|
||||
if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" {
|
||||
t.Fatalf("--health-cmd: got %#v", health.Test)
|
||||
}
|
||||
if health.Timeout != 0 {
|
||||
t.Fatalf("--health-cmd: timeout = %s", health.Timeout)
|
||||
}
|
||||
|
||||
checkError("--no-healthcheck conflicts with --health-* options",
|
||||
"--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd")
|
||||
|
||||
health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd")
|
||||
if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second {
|
||||
t.Fatalf("--health-*: got %#v", health)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLoggingOpts(t *testing.T) {
|
||||
// logging opts ko
|
||||
if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" {
|
||||
t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err)
|
||||
}
|
||||
// logging opts ok
|
||||
_, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 {
|
||||
t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEnvfileVariables(t *testing.T) {
|
||||
e := "open nonexistent: no such file or directory"
|
||||
if runtime.GOOS == "windows" {
|
||||
e = "open nonexistent: The system cannot find the file specified."
|
||||
}
|
||||
// env ko
|
||||
if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e {
|
||||
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
||||
}
|
||||
// env ok
|
||||
config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" {
|
||||
t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env)
|
||||
}
|
||||
config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" {
|
||||
t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) {
|
||||
// UTF8 with BOM
|
||||
config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"}
|
||||
if len(config.Env) != len(env) {
|
||||
t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env)
|
||||
}
|
||||
for i, v := range env {
|
||||
if config.Env[i] != v {
|
||||
t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// UTF16 with BOM
|
||||
e := "contains invalid utf8 bytes at line"
|
||||
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
|
||||
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
||||
}
|
||||
// UTF16BE with BOM
|
||||
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
|
||||
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLabelfileVariables(t *testing.T) {
|
||||
e := "open nonexistent: no such file or directory"
|
||||
if runtime.GOOS == "windows" {
|
||||
e = "open nonexistent: The system cannot find the file specified."
|
||||
}
|
||||
// label ko
|
||||
if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e {
|
||||
t.Fatalf("Expected an error with message '%s', got %v", e, err)
|
||||
}
|
||||
// label ok
|
||||
config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" {
|
||||
t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels)
|
||||
}
|
||||
config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" {
|
||||
t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEntryPoint(t *testing.T) {
|
||||
config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" {
|
||||
t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests the cases for binds which are generated through
|
||||
// DecodeContainerConfig rather than Parse()
|
||||
func TestDecodeContainerConfigVolumes(t *testing.T) {
|
||||
|
||||
// Root to root
|
||||
bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`})
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// No destination path
|
||||
bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`})
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// // No destination path or mode
|
||||
bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`})
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// A whole lot of nothing
|
||||
bindsOrVols = []string{`:`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// A whole lot of nothing with no mode
|
||||
bindsOrVols = []string{`::`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// Too much including an invalid mode
|
||||
wTmp := os.Getenv("TEMP")
|
||||
bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp})
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// Windows specific error tests
|
||||
if runtime.GOOS == "windows" {
|
||||
// Volume which does not include a drive letter
|
||||
bindsOrVols = []string{`\tmp`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// Root to C-Drive
|
||||
bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// Container path that does not include a drive letter
|
||||
bindsOrVols = []string{`c:\windows:\somewhere`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
}
|
||||
|
||||
// Linux-specific error tests
|
||||
if runtime.GOOS != "windows" {
|
||||
// Just root
|
||||
bindsOrVols = []string{`/`}
|
||||
if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil {
|
||||
t.Fatalf("binds %v should have failed", bindsOrVols)
|
||||
}
|
||||
if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil {
|
||||
t.Fatalf("volume %v should have failed", bindsOrVols)
|
||||
}
|
||||
|
||||
// A single volume that looks like a bind mount passed in Volumes.
|
||||
// This should be handled as a bind mount, not a volume.
|
||||
vols := []string{`/foo:/bar`}
|
||||
if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil {
|
||||
t.Fatal("Volume /foo:/bar should have succeeded as a volume name")
|
||||
} else if hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes[vols[0]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes
|
||||
// to call DecodeContainerConfig. It effectively does what a client would
|
||||
// do when calling the daemon by constructing a JSON stream of a
|
||||
// ContainerConfigWrapper which is populated by the set of volume specs
|
||||
// passed into it. It returns a config and a hostconfig which can be
|
||||
// validated to ensure DecodeContainerConfig has manipulated the structures
|
||||
// correctly.
|
||||
func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) {
|
||||
var (
|
||||
b []byte
|
||||
err error
|
||||
c *container.Config
|
||||
h *container.HostConfig
|
||||
)
|
||||
w := runconfig.ContainerConfigWrapper{
|
||||
Config: &container.Config{
|
||||
Volumes: map[string]struct{}{},
|
||||
},
|
||||
HostConfig: &container.HostConfig{
|
||||
NetworkMode: "none",
|
||||
Binds: binds,
|
||||
},
|
||||
}
|
||||
for _, v := range volumes {
|
||||
w.Config.Volumes[v] = struct{}{}
|
||||
}
|
||||
if b, err = json.Marshal(w); err != nil {
|
||||
return nil, nil, errors.Errorf("Error on marshal %s", err.Error())
|
||||
}
|
||||
c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("Error parsing %s: %v", string(b), err)
|
||||
}
|
||||
if c == nil || h == nil {
|
||||
return nil, nil, errors.Errorf("Empty config or hostconfig")
|
||||
}
|
||||
|
||||
return c, h, err
|
||||
}
|
||||
|
||||
func TestVolumeSplitN(t *testing.T) {
|
||||
for _, x := range []struct {
|
||||
input string
|
||||
n int
|
||||
expected []string
|
||||
}{
|
||||
{`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}},
|
||||
{`:C:\foo:d:`, -1, nil},
|
||||
{`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}},
|
||||
{`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}},
|
||||
{`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}},
|
||||
|
||||
{`d:\`, -1, []string{`d:\`}},
|
||||
{`d:`, -1, []string{`d:`}},
|
||||
{`d:\path`, -1, []string{`d:\path`}},
|
||||
{`d:\path with space`, -1, []string{`d:\path with space`}},
|
||||
{`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}},
|
||||
{`c:\:d:\`, -1, []string{`c:\`, `d:\`}},
|
||||
{`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}},
|
||||
{`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}},
|
||||
{`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}},
|
||||
{`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}},
|
||||
{`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}},
|
||||
{`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}},
|
||||
{`name:D:`, -1, []string{`name`, `D:`}},
|
||||
{`name:D::rW`, -1, []string{`name`, `D:`, `rW`}},
|
||||
{`name:D::RW`, -1, []string{`name`, `D:`, `RW`}},
|
||||
{`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}},
|
||||
{`c:\Windows`, -1, []string{`c:\Windows`}},
|
||||
{`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}},
|
||||
|
||||
{``, -1, nil},
|
||||
{`.`, -1, []string{`.`}},
|
||||
{`..\`, -1, []string{`..\`}},
|
||||
{`c:\:..\`, -1, []string{`c:\`, `..\`}},
|
||||
{`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}},
|
||||
|
||||
// Cover directories with one-character name
|
||||
{`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}},
|
||||
} {
|
||||
res := volumeSplitN(x.input, x.n)
|
||||
if len(res) < len(x.expected) {
|
||||
t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res)
|
||||
}
|
||||
for i, e := range res {
|
||||
if e != x.expected[i] {
|
||||
t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDevice(t *testing.T) {
|
||||
valid := []string{
|
||||
"/home",
|
||||
"/home:/home",
|
||||
"/home:/something/else",
|
||||
"/with space",
|
||||
"/home:/with space",
|
||||
"relative:/absolute-path",
|
||||
"hostPath:/containerPath:r",
|
||||
"/hostPath:/containerPath:rw",
|
||||
"/hostPath:/containerPath:mrw",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "bad format for path: ",
|
||||
"./": "./ is not an absolute path",
|
||||
"../": "../ is not an absolute path",
|
||||
"/:../": "../ is not an absolute path",
|
||||
"/:path": "path is not an absolute path",
|
||||
":": "bad format for path: :",
|
||||
"/tmp:": " is not an absolute path",
|
||||
":test": "bad format for path: :test",
|
||||
":/test": "bad format for path: :/test",
|
||||
"tmp:": " is not an absolute path",
|
||||
":test:": "bad format for path: :test:",
|
||||
"::": "bad format for path: ::",
|
||||
":::": "bad format for path: :::",
|
||||
"/tmp:::": "bad format for path: /tmp:::",
|
||||
":/tmp::": "bad format for path: :/tmp::",
|
||||
"path:ro": "ro is not an absolute path",
|
||||
"path:rr": "rr is not an absolute path",
|
||||
"a:/b:ro": "bad mode specified: ro",
|
||||
"a:/b:rr": "bad mode specified: rr",
|
||||
}
|
||||
|
||||
for _, path := range valid {
|
||||
if _, err := validateDevice(path); err != nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
for path, expectedError := range invalid {
|
||||
if _, err := validateDevice(path); err == nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type pauseOptions struct {
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewPauseCommand creates a new cobra.Command for `docker pause`
|
||||
func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts pauseOptions
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "pause CONTAINER [CONTAINER...]",
|
||||
Short: "Pause all processes within one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runPause(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
var errs []string
|
||||
errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause)
|
||||
for _, container := range opts.containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), container)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type portOptions struct {
|
||||
container string
|
||||
|
||||
port string
|
||||
}
|
||||
|
||||
// NewPortCommand creates a new cobra.Command for `docker port`
|
||||
func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts portOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]",
|
||||
Short: "List port mappings or a specific mapping for the container",
|
||||
Args: cli.RequiresRangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
if len(args) > 1 {
|
||||
opts.port = args[1]
|
||||
}
|
||||
return runPort(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPort(dockerCli *command.DockerCli, opts *portOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, opts.container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.port != "" {
|
||||
port := opts.port
|
||||
proto := "tcp"
|
||||
parts := strings.SplitN(port, "/", 2)
|
||||
|
||||
if len(parts) == 2 && len(parts[1]) != 0 {
|
||||
port = parts[0]
|
||||
proto = parts[1]
|
||||
}
|
||||
natPort := port + "/" + proto
|
||||
newP, err := nat.NewPort(proto, port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("Error: No public port '%s' published for %s", natPort, opts.container)
|
||||
}
|
||||
|
||||
for from, frontends := range c.NetworkSettings.Ports {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/opts"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// NewPruneCommand returns a new cobra prune command for containers
|
||||
func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
opts := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune [OPTIONS]",
|
||||
Short: "Remove all stopped containers",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
spaceReclaimed, output, err := runPrune(dockerCli, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if output != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), output)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
|
||||
return nil
|
||||
},
|
||||
Tags: map[string]string{"version": "1.25"},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
flags.Var(&opts.filter, "filter", "Provide filter values (e.g. 'until=<timestamp>')")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
const warning = `WARNING! This will remove all stopped containers.
|
||||
Are you sure you want to continue?`
|
||||
|
||||
func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := command.PruneFilters(dockerCli, opts.filter.Value())
|
||||
|
||||
if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().ContainersPrune(context.Background(), pruneFilters)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(report.ContainersDeleted) > 0 {
|
||||
output = "Deleted Containers:\n"
|
||||
for _, id := range report.ContainersDeleted {
|
||||
output += id + "\n"
|
||||
}
|
||||
spaceReclaimed = report.SpaceReclaimed
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RunPrune calls the Container Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli *command.DockerCli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildContainerListOptions(t *testing.T) {
|
||||
filters := opts.NewFilterOpt()
|
||||
assert.NoError(t, filters.Set("foo=bar"))
|
||||
assert.NoError(t, filters.Set("baz=foo"))
|
||||
|
||||
contexts := []struct {
|
||||
psOpts *psOptions
|
||||
expectedAll bool
|
||||
expectedSize bool
|
||||
expectedLimit int
|
||||
expectedFilters map[string]string
|
||||
}{
|
||||
{
|
||||
psOpts: &psOptions{
|
||||
all: true,
|
||||
size: true,
|
||||
last: 5,
|
||||
filter: filters,
|
||||
},
|
||||
expectedAll: true,
|
||||
expectedSize: true,
|
||||
expectedLimit: 5,
|
||||
expectedFilters: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
psOpts: &psOptions{
|
||||
all: true,
|
||||
size: true,
|
||||
last: -1,
|
||||
nLatest: true,
|
||||
},
|
||||
expectedAll: true,
|
||||
expectedSize: true,
|
||||
expectedLimit: 1,
|
||||
expectedFilters: make(map[string]string),
|
||||
},
|
||||
{
|
||||
psOpts: &psOptions{
|
||||
all: true,
|
||||
size: false,
|
||||
last: 5,
|
||||
filter: filters,
|
||||
// With .Size, size should be true
|
||||
format: "{{.Size}}",
|
||||
},
|
||||
expectedAll: true,
|
||||
expectedSize: true,
|
||||
expectedLimit: 5,
|
||||
expectedFilters: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
psOpts: &psOptions{
|
||||
all: true,
|
||||
size: false,
|
||||
last: 5,
|
||||
filter: filters,
|
||||
// With .Size, size should be true
|
||||
format: "{{.Size}} {{.CreatedAt}} {{.Networks}}",
|
||||
},
|
||||
expectedAll: true,
|
||||
expectedSize: true,
|
||||
expectedLimit: 5,
|
||||
expectedFilters: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
psOpts: &psOptions{
|
||||
all: true,
|
||||
size: false,
|
||||
last: 5,
|
||||
filter: filters,
|
||||
// Without .Size, size should be false
|
||||
format: "{{.CreatedAt}} {{.Networks}}",
|
||||
},
|
||||
expectedAll: true,
|
||||
expectedSize: false,
|
||||
expectedLimit: 5,
|
||||
expectedFilters: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range contexts {
|
||||
options, err := buildContainerListOptions(c.psOpts)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, c.expectedAll, options.All)
|
||||
assert.Equal(t, c.expectedSize, options.Size)
|
||||
assert.Equal(t, c.expectedLimit, options.Limit)
|
||||
assert.Equal(t, len(c.expectedFilters), options.Filters.Len())
|
||||
|
||||
for k, v := range c.expectedFilters {
|
||||
f := options.Filters
|
||||
if !f.ExactMatch(k, v) {
|
||||
t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type renameOptions struct {
|
||||
oldName string
|
||||
newName string
|
||||
}
|
||||
|
||||
// NewRenameCommand creates a new cobra.Command for `docker rename`
|
||||
func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts renameOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rename CONTAINER NEW_NAME",
|
||||
Short: "Rename a container",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.oldName = args[0]
|
||||
opts.newName = args[1]
|
||||
return runRename(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRename(dockerCli *command.DockerCli, opts *renameOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
oldName := strings.TrimSpace(opts.oldName)
|
||||
newName := strings.TrimSpace(opts.newName)
|
||||
|
||||
if oldName == "" || newName == "" {
|
||||
return errors.New("Error: Neither old nor new names may be empty")
|
||||
}
|
||||
|
||||
if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
return errors.Errorf("Error: failed to rename container named %s", oldName)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type restartOptions struct {
|
||||
nSeconds int
|
||||
nSecondsChanged bool
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewRestartCommand creates a new cobra.Command for `docker restart`
|
||||
func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts restartOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "restart [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Restart one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
opts.nSecondsChanged = cmd.Flags().Changed("time")
|
||||
return runRestart(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error {
|
||||
ctx := context.Background()
|
||||
var errs []string
|
||||
var timeout *time.Duration
|
||||
if opts.nSecondsChanged {
|
||||
timeoutValue := time.Duration(opts.nSeconds) * time.Second
|
||||
timeout = &timeoutValue
|
||||
}
|
||||
|
||||
for _, name := range opts.containers {
|
||||
if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
rmVolumes bool
|
||||
rmLink bool
|
||||
force bool
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewRmCommand creates a new cobra.Command for `docker rm`
|
||||
func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts rmOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Remove one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runRm(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container")
|
||||
flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link")
|
||||
flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRm(dockerCli *command.DockerCli, opts *rmOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
var errs []string
|
||||
options := types.ContainerRemoveOptions{
|
||||
RemoveVolumes: opts.rmVolumes,
|
||||
RemoveLinks: opts.rmLink,
|
||||
Force: opts.force,
|
||||
}
|
||||
|
||||
errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error {
|
||||
container = strings.Trim(container, "/")
|
||||
if container == "" {
|
||||
return errors.New("Container name cannot be empty")
|
||||
}
|
||||
return dockerCli.Client().ContainerRemove(ctx, container, options)
|
||||
})
|
||||
|
||||
for _, name := range opts.containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,296 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/libnetwork/resolvconf/dns"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type runOptions struct {
|
||||
detach bool
|
||||
sigProxy bool
|
||||
name string
|
||||
detachKeys string
|
||||
}
|
||||
|
||||
// NewRunCommand create a new `docker run` command
|
||||
func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts runOptions
|
||||
var copts *containerOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]",
|
||||
Short: "Run a command in a new container",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
copts.Image = args[0]
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runRun(dockerCli, cmd.Flags(), &opts, copts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process")
|
||||
flags.StringVar(&opts.name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
copts = addFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
|
||||
fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.")
|
||||
}
|
||||
}
|
||||
|
||||
// check the DNS settings passed via --dns against localhost regexp to warn if
|
||||
// they are trying to set a DNS to a localhost address
|
||||
func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
for _, dnsIP := range hostConfig.DNS {
|
||||
if dns.IsLocalhost(dnsIP) {
|
||||
fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *containerOptions) error {
|
||||
containerConfig, err := parse(flags, copts)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
return runContainer(dockerCli, opts, copts, containerConfig)
|
||||
}
|
||||
|
||||
func runContainer(dockerCli *command.DockerCli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error {
|
||||
config := containerConfig.Config
|
||||
hostConfig := containerConfig.HostConfig
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
client := dockerCli.Client()
|
||||
|
||||
// TODO: pass this as an argument
|
||||
cmdPath := "run"
|
||||
|
||||
warnOnOomKillDisable(*hostConfig, stderr)
|
||||
warnOnLocalhostDNS(*hostConfig, stderr)
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if !opts.detach {
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if copts.attach.Len() != 0 {
|
||||
return errors.New("Conflicting options: -a and -d")
|
||||
}
|
||||
|
||||
config.AttachStdin = false
|
||||
config.AttachStdout = false
|
||||
config.AttachStderr = false
|
||||
config.StdinOnce = false
|
||||
}
|
||||
|
||||
// Disable sigProxy when in TTY mode
|
||||
if config.Tty {
|
||||
opts.sigProxy = false
|
||||
}
|
||||
|
||||
// Telling the Windows daemon the initial size of the tty during start makes
|
||||
// a far better user experience rather than relying on subsequent resizes
|
||||
// to cause things to catch up.
|
||||
if runtime.GOOS == "windows" {
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
|
||||
createResponse, err := createContainer(ctx, dockerCli, containerConfig, opts.name)
|
||||
if err != nil {
|
||||
reportError(stderr, cmdPath, err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
fmt.Fprintln(stdout, createResponse.ID)
|
||||
}()
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
if opts.detachKeys != "" {
|
||||
dockerCli.ConfigFile().DetachKeys = opts.detachKeys
|
||||
}
|
||||
|
||||
close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID)
|
||||
defer close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove)
|
||||
|
||||
//start the container
|
||||
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
|
||||
// If we have holdHijackedConnection, we should notify
|
||||
// holdHijackedConnection we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
if attach {
|
||||
cancelFun()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
reportError(stderr, cmdPath, err.Error(), false)
|
||||
if copts.autoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil {
|
||||
fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return nil
|
||||
}
|
||||
|
||||
status := <-statusChan
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
errCh *chan error,
|
||||
config *container.Config,
|
||||
containerID string,
|
||||
) (func(), error) {
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if config.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if config.AttachStdout {
|
||||
out = stdout
|
||||
}
|
||||
if config.AttachStderr {
|
||||
if config.Tty {
|
||||
cerr = stdout
|
||||
} else {
|
||||
cerr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
}
|
||||
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil && errAttach != httputil.ErrPersistEOF {
|
||||
// ContainerAttach returns an ErrPersistEOF (connection closed)
|
||||
// means server met an error and put it in Hijacked connection
|
||||
// keep the error and read detailed error message from hijacked connection later
|
||||
return nil, errAttach
|
||||
}
|
||||
|
||||
*errCh = promise.Go(func() error {
|
||||
if errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp); errHijack != nil {
|
||||
return errHijack
|
||||
}
|
||||
return errAttach
|
||||
})
|
||||
return resp.Close, nil
|
||||
}
|
||||
|
||||
// reportError is a utility method that prints a user-friendly message
|
||||
// containing the error that occurred during parsing and a suggestion to get help
|
||||
func reportError(stderr io.Writer, name string, str string, withHelp bool) {
|
||||
str = strings.TrimSuffix(str, ".") + "."
|
||||
if withHelp {
|
||||
str += "\nSee '" + os.Args[0] + " " + name + " --help'."
|
||||
}
|
||||
fmt.Fprintf(stderr, "%s: %s\n", os.Args[0], str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
// if container start fails with 'permission denied' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
|
||||
statusError := cli.StatusError{StatusCode: 125}
|
||||
if strings.Contains(trimmedErr, "executable file not found") ||
|
||||
strings.Contains(trimmedErr, "no such file or directory") ||
|
||||
strings.Contains(trimmedErr, "system cannot find the file specified") {
|
||||
statusError = cli.StatusError{StatusCode: 127}
|
||||
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) {
|
||||
statusError = cli.StatusError{StatusCode: 126}
|
||||
}
|
||||
|
||||
return statusError
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type startOptions struct {
|
||||
attach bool
|
||||
openStdin bool
|
||||
detachKeys string
|
||||
checkpoint string
|
||||
checkpointDir string
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewStartCommand creates a new cobra.Command for `docker start`
|
||||
func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts startOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "start [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Start one or more stopped containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runStart(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals")
|
||||
flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN")
|
||||
flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container")
|
||||
|
||||
flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint")
|
||||
flags.SetAnnotation("checkpoint", "experimental", nil)
|
||||
flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory")
|
||||
flags.SetAnnotation("checkpoint-dir", "experimental", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runStart(dockerCli *command.DockerCli, opts *startOptions) error {
|
||||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
|
||||
if opts.attach || opts.openStdin {
|
||||
// We're going to attach to a container.
|
||||
// 1. Ensure we only have one container.
|
||||
if len(opts.containers) > 1 {
|
||||
return errors.New("You cannot start and attach multiple containers at once.")
|
||||
}
|
||||
|
||||
// 2. Attach to the container.
|
||||
container := opts.containers[0]
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We always use c.ID instead of container to maintain consistency during `docker start`
|
||||
if !c.Config.Tty {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, c.ID)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
if opts.detachKeys != "" {
|
||||
dockerCli.ConfigFile().DetachKeys = opts.detachKeys
|
||||
}
|
||||
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: opts.openStdin && c.Config.OpenStdin,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
}
|
||||
|
||||
var in io.ReadCloser
|
||||
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options)
|
||||
if errAttach != nil && errAttach != httputil.ErrPersistEOF {
|
||||
// ContainerAttach return an ErrPersistEOF (connection closed)
|
||||
// means server met an error and already put it in Hijacked connection,
|
||||
// we would keep the error and read the detailed error message from hijacked connection
|
||||
return errAttach
|
||||
}
|
||||
defer resp.Close()
|
||||
cErr := promise.Go(func() error {
|
||||
errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp)
|
||||
if errHijack == nil {
|
||||
return errAttach
|
||||
}
|
||||
return errHijack
|
||||
})
|
||||
|
||||
// 3. We should open a channel for receiving status code of the container
|
||||
// no matter it's detached, removed on daemon side(--rm) or exit normally.
|
||||
statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove)
|
||||
startOptions := types.ContainerStartOptions{
|
||||
CheckpointID: opts.checkpoint,
|
||||
CheckpointDir: opts.checkpointDir,
|
||||
}
|
||||
|
||||
// 4. Start the container.
|
||||
if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil {
|
||||
cancelFun()
|
||||
<-cErr
|
||||
if c.HostConfig.AutoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// 5. Wait for attachment to break.
|
||||
if c.Config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
if attchErr := <-cErr; attchErr != nil {
|
||||
return attchErr
|
||||
}
|
||||
|
||||
if status := <-statusChan; status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
} else if opts.checkpoint != "" {
|
||||
if len(opts.containers) > 1 {
|
||||
return errors.New("You cannot restore multiple containers at once.")
|
||||
}
|
||||
container := opts.containers[0]
|
||||
startOptions := types.ContainerStartOptions{
|
||||
CheckpointID: opts.checkpoint,
|
||||
CheckpointDir: opts.checkpointDir,
|
||||
}
|
||||
return dockerCli.Client().ContainerStart(ctx, container, startOptions)
|
||||
|
||||
} else {
|
||||
// We're not going to attach to anything.
|
||||
// Start as many containers as we want.
|
||||
return startContainersWithoutAttachments(ctx, dockerCli, opts.containers)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error {
|
||||
var failedContainers []string
|
||||
for _, container := range containers {
|
||||
if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
failedContainers = append(failedContainers, container)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), container)
|
||||
}
|
||||
|
||||
if len(failedContainers) > 0 {
|
||||
return errors.Errorf("Error: failed to start containers: %s", strings.Join(failedContainers, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/formatter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type statsOptions struct {
|
||||
all bool
|
||||
noStream bool
|
||||
format string
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewStatsCommand creates a new cobra.Command for `docker stats`
|
||||
func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts statsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "stats [OPTIONS] [CONTAINER...]",
|
||||
Short: "Display a live stream of container(s) resource usage statistics",
|
||||
Args: cli.RequiresMinArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runStats(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
||||
flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result")
|
||||
flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// runStats displays a live stream of resource usage statistics for one or more containers.
|
||||
// This shows real-time information on CPU usage, memory usage, and network I/O.
|
||||
func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
||||
showAll := len(opts.containers) == 0
|
||||
closeChan := make(chan error)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// monitorContainerEvents watches for container creation and removal (only
|
||||
// used when calling `docker stats` without arguments).
|
||||
monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) {
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
options := types.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
eventq, errq := dockerCli.Client().Events(ctx, options)
|
||||
|
||||
// Whether we successfully subscribed to eventq or not, we can now
|
||||
// unblock the main goroutine.
|
||||
close(started)
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventq:
|
||||
c <- event
|
||||
case err := <-errq:
|
||||
closeChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the daemonOSType if not set already
|
||||
if daemonOSType == "" {
|
||||
svctx := context.Background()
|
||||
sv, err := dockerCli.Client().ServerVersion(svctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daemonOSType = sv.Os
|
||||
}
|
||||
|
||||
// waitFirst is a WaitGroup to wait first stat data's reach for each container
|
||||
waitFirst := &sync.WaitGroup{}
|
||||
|
||||
cStats := stats{}
|
||||
// getContainerList simulates creation event for all previously existing
|
||||
// containers (only used when calling `docker stats` without arguments).
|
||||
getContainerList := func() {
|
||||
options := types.ContainerListOptions{
|
||||
All: opts.all,
|
||||
}
|
||||
cs, err := dockerCli.Client().ContainerList(ctx, options)
|
||||
if err != nil {
|
||||
closeChan <- err
|
||||
}
|
||||
for _, container := range cs {
|
||||
s := formatter.NewContainerStats(container.ID[:12], daemonOSType)
|
||||
if cStats.add(s) {
|
||||
waitFirst.Add(1)
|
||||
go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if showAll {
|
||||
// If no names were specified, start a long running goroutine which
|
||||
// monitors container events. We make sure we're subscribed before
|
||||
// retrieving the list of running containers to avoid a race where we
|
||||
// would "miss" a creation.
|
||||
started := make(chan struct{})
|
||||
eh := command.InitEventHandler()
|
||||
eh.Handle("create", func(e events.Message) {
|
||||
if opts.all {
|
||||
s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
|
||||
if cStats.add(s) {
|
||||
waitFirst.Add(1)
|
||||
go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
eh.Handle("start", func(e events.Message) {
|
||||
s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
|
||||
if cStats.add(s) {
|
||||
waitFirst.Add(1)
|
||||
go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst)
|
||||
}
|
||||
})
|
||||
|
||||
eh.Handle("die", func(e events.Message) {
|
||||
if !opts.all {
|
||||
cStats.remove(e.ID[:12])
|
||||
}
|
||||
})
|
||||
|
||||
eventChan := make(chan events.Message)
|
||||
go eh.Watch(eventChan)
|
||||
go monitorContainerEvents(started, eventChan)
|
||||
defer close(eventChan)
|
||||
<-started
|
||||
|
||||
// Start a short-lived goroutine to retrieve the initial list of
|
||||
// containers.
|
||||
getContainerList()
|
||||
} else {
|
||||
// Artificially send creation events for the containers we were asked to
|
||||
// monitor (same code path than we use when monitoring all containers).
|
||||
for _, name := range opts.containers {
|
||||
s := formatter.NewContainerStats(name, daemonOSType)
|
||||
if cStats.add(s) {
|
||||
waitFirst.Add(1)
|
||||
go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst)
|
||||
}
|
||||
}
|
||||
|
||||
// We don't expect any asynchronous errors: closeChan can be closed.
|
||||
close(closeChan)
|
||||
|
||||
// Do a quick pause to detect any error with the provided list of
|
||||
// container names.
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
var errs []string
|
||||
cStats.mu.Lock()
|
||||
for _, c := range cStats.cs {
|
||||
if err := c.GetError(); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
}
|
||||
cStats.mu.Unlock()
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// before print to screen, make sure each container get at least one valid stat data
|
||||
waitFirst.Wait()
|
||||
format := opts.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().StatsFormat) > 0 {
|
||||
format = dockerCli.ConfigFile().StatsFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
statsCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewStatsFormat(format, daemonOSType),
|
||||
}
|
||||
cleanScreen := func() {
|
||||
if !opts.noStream {
|
||||
fmt.Fprint(dockerCli.Out(), "\033[2J")
|
||||
fmt.Fprint(dockerCli.Out(), "\033[H")
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
for range time.Tick(500 * time.Millisecond) {
|
||||
cleanScreen()
|
||||
ccstats := []formatter.StatsEntry{}
|
||||
cStats.mu.Lock()
|
||||
for _, c := range cStats.cs {
|
||||
ccstats = append(ccstats, c.GetStatistics())
|
||||
}
|
||||
cStats.mu.Unlock()
|
||||
if err = formatter.ContainerStatsWrite(statsCtx, ccstats, daemonOSType); err != nil {
|
||||
break
|
||||
}
|
||||
if len(cStats.cs) == 0 && !showAll {
|
||||
break
|
||||
}
|
||||
if opts.noStream {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case err, ok := <-closeChan:
|
||||
if ok {
|
||||
if err != nil {
|
||||
// this is suppressing "unexpected EOF" in the cli when the
|
||||
// daemon restarts so it shutdowns cleanly
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
// just skip
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,229 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli/command/formatter"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
ostype string
|
||||
mu sync.Mutex
|
||||
cs []*formatter.ContainerStats
|
||||
}
|
||||
|
||||
// daemonOSType is set once we have at least one stat for a container
|
||||
// from the daemon. It is used to ensure we print the right header based
|
||||
// on the daemon platform.
|
||||
var daemonOSType string
|
||||
|
||||
func (s *stats) add(cs *formatter.ContainerStats) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if _, exists := s.isKnownContainer(cs.Container); !exists {
|
||||
s.cs = append(s.cs, cs)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *stats) remove(id string) {
|
||||
s.mu.Lock()
|
||||
if i, exists := s.isKnownContainer(id); exists {
|
||||
s.cs = append(s.cs[:i], s.cs[i+1:]...)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *stats) isKnownContainer(cid string) (int, bool) {
|
||||
for i, c := range s.cs {
|
||||
if c.Container == cid {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) {
|
||||
logrus.Debugf("collecting stats for %s", s.Container)
|
||||
var (
|
||||
getFirst bool
|
||||
previousCPU uint64
|
||||
previousSystem uint64
|
||||
u = make(chan error, 1)
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// if error happens and we get nothing of stats, release wait group whatever
|
||||
if !getFirst {
|
||||
getFirst = true
|
||||
waitFirst.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
response, err := cli.ContainerStats(ctx, s.Container, streamStats)
|
||||
if err != nil {
|
||||
s.SetError(err)
|
||||
return
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(response.Body)
|
||||
go func() {
|
||||
for {
|
||||
var (
|
||||
v *types.StatsJSON
|
||||
memPercent, cpuPercent float64
|
||||
blkRead, blkWrite uint64 // Only used on Linux
|
||||
mem, memLimit, memPerc float64
|
||||
pidsStatsCurrent uint64
|
||||
)
|
||||
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body))
|
||||
u <- err
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
daemonOSType = response.OSType
|
||||
|
||||
if daemonOSType != "windows" {
|
||||
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
|
||||
// got any data from cgroup
|
||||
if v.MemoryStats.Limit != 0 {
|
||||
memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
|
||||
previousSystem = v.PreCPUStats.SystemUsage
|
||||
cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v)
|
||||
blkRead, blkWrite = calculateBlockIO(v.BlkioStats)
|
||||
mem = float64(v.MemoryStats.Usage)
|
||||
memLimit = float64(v.MemoryStats.Limit)
|
||||
memPerc = memPercent
|
||||
pidsStatsCurrent = v.PidsStats.Current
|
||||
} else {
|
||||
cpuPercent = calculateCPUPercentWindows(v)
|
||||
blkRead = v.StorageStats.ReadSizeBytes
|
||||
blkWrite = v.StorageStats.WriteSizeBytes
|
||||
mem = float64(v.MemoryStats.PrivateWorkingSet)
|
||||
}
|
||||
netRx, netTx := calculateNetwork(v.Networks)
|
||||
s.SetStatistics(formatter.StatsEntry{
|
||||
Name: v.Name,
|
||||
ID: v.ID,
|
||||
CPUPercentage: cpuPercent,
|
||||
Memory: mem,
|
||||
MemoryPercentage: memPerc,
|
||||
MemoryLimit: memLimit,
|
||||
NetworkRx: netRx,
|
||||
NetworkTx: netTx,
|
||||
BlockRead: float64(blkRead),
|
||||
BlockWrite: float64(blkWrite),
|
||||
PidsCurrent: pidsStatsCurrent,
|
||||
})
|
||||
u <- nil
|
||||
if !streamStats {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
// zero out the values if we have not received an update within
|
||||
// the specified duration.
|
||||
s.SetErrorAndReset(errors.New("timeout waiting for stats"))
|
||||
// if this is the first stat you get, release WaitGroup
|
||||
if !getFirst {
|
||||
getFirst = true
|
||||
waitFirst.Done()
|
||||
}
|
||||
case err := <-u:
|
||||
s.SetError(err)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// if this is the first stat you get, release WaitGroup
|
||||
if !getFirst {
|
||||
getFirst = true
|
||||
waitFirst.Done()
|
||||
}
|
||||
}
|
||||
if !streamStats {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
||||
var (
|
||||
cpuPercent = 0.0
|
||||
// calculate the change for the cpu usage of the container in between readings
|
||||
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
|
||||
// calculate the change for the entire system between readings
|
||||
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
|
||||
onlineCPUs = float64(v.CPUStats.OnlineCPUs)
|
||||
)
|
||||
|
||||
if onlineCPUs == 0.0 {
|
||||
onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage))
|
||||
}
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
|
||||
// Max number of 100ns intervals between the previous time read and now
|
||||
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
|
||||
possIntervals /= 100 // Convert to number of 100ns intervals
|
||||
possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
|
||||
|
||||
// Intervals used
|
||||
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
|
||||
|
||||
// Percentage avoiding divide-by-zero
|
||||
if possIntervals > 0 {
|
||||
return float64(intervalsUsed) / float64(possIntervals) * 100.0
|
||||
}
|
||||
return 0.00
|
||||
}
|
||||
|
||||
func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) {
|
||||
for _, bioEntry := range blkio.IoServiceBytesRecursive {
|
||||
switch strings.ToLower(bioEntry.Op) {
|
||||
case "read":
|
||||
blkRead = blkRead + bioEntry.Value
|
||||
case "write":
|
||||
blkWrite = blkWrite + bioEntry.Value
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) {
|
||||
var rx, tx float64
|
||||
|
||||
for _, v := range network {
|
||||
rx += float64(v.RxBytes)
|
||||
tx += float64(v.TxBytes)
|
||||
}
|
||||
return rx, tx
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
func TestCalculateBlockIO(t *testing.T) {
|
||||
blkio := types.BlkioStats{
|
||||
IoServiceBytesRecursive: []types.BlkioStatEntry{{Major: 8, Minor: 0, Op: "read", Value: 1234}, {Major: 8, Minor: 1, Op: "read", Value: 4567}, {Major: 8, Minor: 0, Op: "write", Value: 123}, {Major: 8, Minor: 1, Op: "write", Value: 456}},
|
||||
}
|
||||
blkRead, blkWrite := calculateBlockIO(blkio)
|
||||
if blkRead != 5801 {
|
||||
t.Fatalf("blkRead = %d, want 5801", blkRead)
|
||||
}
|
||||
if blkWrite != 579 {
|
||||
t.Fatalf("blkWrite = %d, want 579", blkWrite)
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type stopOptions struct {
|
||||
time int
|
||||
timeChanged bool
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewStopCommand creates a new cobra.Command for `docker stop`
|
||||
func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts stopOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "stop [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Stop one or more running containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
opts.timeChanged = cmd.Flags().Changed("time")
|
||||
return runStop(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runStop(dockerCli *command.DockerCli, opts *stopOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
var timeout *time.Duration
|
||||
if opts.timeChanged {
|
||||
timeoutValue := time.Duration(opts.time) * time.Second
|
||||
timeout = &timeoutValue
|
||||
}
|
||||
|
||||
var errs []string
|
||||
|
||||
errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error {
|
||||
return dockerCli.Client().ContainerStop(ctx, id, timeout)
|
||||
})
|
||||
for _, container := range opts.containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), container)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
BIN
cli/command/container/testdata/utf16.env
vendored
BIN
cli/command/container/testdata/utf16.env
vendored
Binary file not shown.
BIN
cli/command/container/testdata/utf16be.env
vendored
BIN
cli/command/container/testdata/utf16be.env
vendored
Binary file not shown.
3
cli/command/container/testdata/utf8.env
vendored
3
cli/command/container/testdata/utf8.env
vendored
|
@ -1,3 +0,0 @@
|
|||
FOO=BAR
|
||||
HELLO=您好
|
||||
BAR=FOO
|
1
cli/command/container/testdata/valid.env
vendored
1
cli/command/container/testdata/valid.env
vendored
|
@ -1 +0,0 @@
|
|||
ENV1=value1
|
1
cli/command/container/testdata/valid.label
vendored
1
cli/command/container/testdata/valid.label
vendored
|
@ -1 +0,0 @@
|
|||
LABEL1=value1
|
|
@ -1,57 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type topOptions struct {
|
||||
container string
|
||||
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewTopCommand creates a new cobra.Command for `docker top`
|
||||
func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts topOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "top CONTAINER [ps OPTIONS]",
|
||||
Short: "Display the running processes of a container",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.container = args[0]
|
||||
opts.args = args[1:]
|
||||
return runTop(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runTop(dockerCli *command.DockerCli, opts *topOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
|
||||
fmt.Fprintln(w, strings.Join(procList.Titles, "\t"))
|
||||
|
||||
for _, proc := range procList.Processes {
|
||||
fmt.Fprintln(w, strings.Join(proc, "\t"))
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) {
|
||||
if height == 0 && width == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
options := types.ResizeOptions{
|
||||
Height: height,
|
||||
Width: width,
|
||||
}
|
||||
|
||||
var err error
|
||||
if isExec {
|
||||
err = client.ContainerExecResize(ctx, id, options)
|
||||
} else {
|
||||
err = client.ContainerResize(ctx, id, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Debugf("Error resize: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// MonitorTtySize updates the container tty size when the terminal tty changes size
|
||||
func MonitorTtySize(ctx context.Context, cli *command.DockerCli, id string, isExec bool) error {
|
||||
resizeTty := func() {
|
||||
height, width := cli.Out().GetTtySize()
|
||||
resizeTtyTo(ctx, cli.Client(), id, height, width, isExec)
|
||||
}
|
||||
|
||||
resizeTty()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
go func() {
|
||||
prevH, prevW := cli.Out().GetTtySize()
|
||||
for {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
h, w := cli.Out().GetTtySize()
|
||||
|
||||
if prevW != w || prevH != h {
|
||||
resizeTty()
|
||||
}
|
||||
prevH = h
|
||||
prevW = w
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
gosignal.Notify(sigchan, signal.SIGWINCH)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
resizeTty()
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForwardAllSignals forwards signals to the container
|
||||
func ForwardAllSignals(ctx context.Context, cli *command.DockerCli, cid string) chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
signal.CatchAll(sigc)
|
||||
go func() {
|
||||
for s := range sigc {
|
||||
if s == signal.SIGCHLD || s == signal.SIGPIPE {
|
||||
continue
|
||||
}
|
||||
var sig string
|
||||
for sigStr, sigN := range signal.SignalMap {
|
||||
if sigN == s {
|
||||
sig = sigStr
|
||||
break
|
||||
}
|
||||
}
|
||||
if sig == "" {
|
||||
fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil {
|
||||
logrus.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return sigc
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type unpauseOptions struct {
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewUnpauseCommand creates a new cobra.Command for `docker unpause`
|
||||
func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts unpauseOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "unpause CONTAINER [CONTAINER...]",
|
||||
Short: "Unpause all processes within one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runUnpause(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
var errs []string
|
||||
errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause)
|
||||
for _, container := range opts.containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), container)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/opts"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type updateOptions struct {
|
||||
blkioWeight uint16
|
||||
cpuPeriod int64
|
||||
cpuQuota int64
|
||||
cpuRealtimePeriod int64
|
||||
cpuRealtimeRuntime int64
|
||||
cpusetCpus string
|
||||
cpusetMems string
|
||||
cpuShares int64
|
||||
memory opts.MemBytes
|
||||
memoryReservation opts.MemBytes
|
||||
memorySwap opts.MemSwapBytes
|
||||
kernelMemory opts.MemBytes
|
||||
restartPolicy string
|
||||
cpus opts.NanoCPUs
|
||||
|
||||
nFlag int
|
||||
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewUpdateCommand creates a new cobra.Command for `docker update`
|
||||
func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts updateOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "update [OPTIONS] CONTAINER [CONTAINER...]",
|
||||
Short: "Update configuration of one or more containers",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
opts.nFlag = cmd.Flags().NFlag()
|
||||
return runUpdate(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)")
|
||||
flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period")
|
||||
flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds")
|
||||
flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"})
|
||||
flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds")
|
||||
flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"})
|
||||
flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||
flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
flags.VarP(&opts.memory, "memory", "m", "Memory limit")
|
||||
flags.Var(&opts.memoryReservation, "memory-reservation", "Memory soft limit")
|
||||
flags.Var(&opts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||
flags.Var(&opts.kernelMemory, "kernel-memory", "Kernel memory limit")
|
||||
flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits")
|
||||
|
||||
flags.Var(&opts.cpus, "cpus", "Number of CPUs")
|
||||
flags.SetAnnotation("cpus", "version", []string{"1.29"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error {
|
||||
var err error
|
||||
|
||||
if opts.nFlag == 0 {
|
||||
return errors.New("You must provide one or more flags when using this command.")
|
||||
}
|
||||
|
||||
var restartPolicy containertypes.RestartPolicy
|
||||
if opts.restartPolicy != "" {
|
||||
restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
resources := containertypes.Resources{
|
||||
BlkioWeight: opts.blkioWeight,
|
||||
CpusetCpus: opts.cpusetCpus,
|
||||
CpusetMems: opts.cpusetMems,
|
||||
CPUShares: opts.cpuShares,
|
||||
Memory: opts.memory.Value(),
|
||||
MemoryReservation: opts.memoryReservation.Value(),
|
||||
MemorySwap: opts.memorySwap.Value(),
|
||||
KernelMemory: opts.kernelMemory.Value(),
|
||||
CPUPeriod: opts.cpuPeriod,
|
||||
CPUQuota: opts.cpuQuota,
|
||||
CPURealtimePeriod: opts.cpuRealtimePeriod,
|
||||
CPURealtimeRuntime: opts.cpuRealtimeRuntime,
|
||||
NanoCPUs: opts.cpus.Value(),
|
||||
}
|
||||
|
||||
updateConfig := containertypes.UpdateConfig{
|
||||
Resources: resources,
|
||||
RestartPolicy: restartPolicy,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var (
|
||||
warns []string
|
||||
errs []string
|
||||
)
|
||||
for _, container := range opts.containers {
|
||||
r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig)
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(dockerCli.Out(), container)
|
||||
}
|
||||
warns = append(warns, r.Warnings...)
|
||||
}
|
||||
if len(warns) > 0 {
|
||||
fmt.Fprintln(dockerCli.Out(), strings.Join(warns, "\n"))
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/cli/command"
|
||||
clientapi "github.com/docker/docker/client"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int {
|
||||
if len(containerID) == 0 {
|
||||
// containerID can never be empty
|
||||
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
|
||||
}
|
||||
|
||||
var removeErr error
|
||||
statusChan := make(chan int)
|
||||
exitCode := 125
|
||||
|
||||
// Get events via Events API
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
f.Add("container", containerID)
|
||||
options := types.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
eventCtx, cancel := context.WithCancel(ctx)
|
||||
eventq, errq := dockerCli.Client().Events(eventCtx, options)
|
||||
|
||||
eventProcessor := func(e events.Message) bool {
|
||||
stopProcessing := false
|
||||
switch e.Status {
|
||||
case "die":
|
||||
if v, ok := e.Actor.Attributes["exitCode"]; ok {
|
||||
code, cerr := strconv.Atoi(v)
|
||||
if cerr != nil {
|
||||
logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr)
|
||||
} else {
|
||||
exitCode = code
|
||||
}
|
||||
}
|
||||
if !waitRemove {
|
||||
stopProcessing = true
|
||||
} else {
|
||||
// If we are talking to an older daemon, `AutoRemove` is not supported.
|
||||
// We need to fall back to the old behavior, which is client-side removal
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") {
|
||||
go func() {
|
||||
removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
if removeErr != nil {
|
||||
logrus.Errorf("error removing container: %v", removeErr)
|
||||
cancel() // cancel the event Q
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
case "detach":
|
||||
exitCode = 0
|
||||
stopProcessing = true
|
||||
case "destroy":
|
||||
stopProcessing = true
|
||||
}
|
||||
return stopProcessing
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
statusChan <- exitCode // must always send an exit code or the caller will block
|
||||
cancel()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-eventCtx.Done():
|
||||
if removeErr != nil {
|
||||
return
|
||||
}
|
||||
case evt := <-eventq:
|
||||
if eventProcessor(evt) {
|
||||
return
|
||||
}
|
||||
case err := <-errq:
|
||||
logrus.Errorf("error getting events from daemon: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan
|
||||
}
|
||||
|
||||
// getExitCode performs an inspect on the container. It returns
|
||||
// the running state and the exit code.
|
||||
func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if !clientapi.IsErrConnectionFailed(err) {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
return c.State.Running, c.State.ExitCode, nil
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
const defaultParallel int = 50
|
||||
sem := make(chan struct{}, defaultParallel)
|
||||
errChan := make(chan error)
|
||||
|
||||
// make sure result is printed in correct order
|
||||
output := map[string]chan error{}
|
||||
for _, c := range containers {
|
||||
output[c] = make(chan error, 1)
|
||||
}
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
err := <-output[c]
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
sem <- struct{}{} // Wait for active queue sem to drain.
|
||||
go func(container string) {
|
||||
output[container] <- op(ctx, container)
|
||||
<-sem
|
||||
}(c)
|
||||
}
|
||||
}()
|
||||
return errChan
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type waitOptions struct {
|
||||
containers []string
|
||||
}
|
||||
|
||||
// NewWaitCommand creates a new cobra.Command for `docker wait`
|
||||
func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
var opts waitOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "wait CONTAINER [CONTAINER...]",
|
||||
Short: "Block until one or more containers stop, then print their exit codes",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.containers = args
|
||||
return runWait(dockerCli, &opts)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runWait(dockerCli *command.DockerCli, opts *waitOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
var errs []string
|
||||
for _, container := range opts.containers {
|
||||
status, err := dockerCli.Client().ContainerWait(ctx, container)
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(dockerCli.Out(), "%d\n", status)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
)
|
||||
|
||||
type eventProcessor func(eventtypes.Message, error) error
|
||||
|
||||
// EventHandler is abstract interface for user to customize
|
||||
// own handle functions of each type of events
|
||||
type EventHandler interface {
|
||||
Handle(action string, h func(eventtypes.Message))
|
||||
Watch(c <-chan eventtypes.Message)
|
||||
}
|
||||
|
||||
// InitEventHandler initializes and returns an EventHandler
|
||||
func InitEventHandler() EventHandler {
|
||||
return &eventHandler{handlers: make(map[string]func(eventtypes.Message))}
|
||||
}
|
||||
|
||||
type eventHandler struct {
|
||||
handlers map[string]func(eventtypes.Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) {
|
||||
w.mu.Lock()
|
||||
w.handlers[action] = h
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
// Watch ranges over the passed in event chan and processes the events based on the
|
||||
// handlers created for a given action.
|
||||
// To stop watching, close the event chan.
|
||||
func (w *eventHandler) Watch(c <-chan eventtypes.Message) {
|
||||
for e := range c {
|
||||
w.mu.Lock()
|
||||
h, exists := w.handlers[e.Action]
|
||||
w.mu.Unlock()
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("event handler: received event: %v", e)
|
||||
go h(e)
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import "github.com/docker/docker/api/types"
|
||||
|
||||
const (
|
||||
defaultCheckpointFormat = "table {{.Name}}"
|
||||
|
||||
checkpointNameHeader = "CHECKPOINT NAME"
|
||||
)
|
||||
|
||||
// NewCheckpointFormat returns a format for use with a checkpoint Context
|
||||
func NewCheckpointFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
return defaultCheckpointFormat
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// CheckpointWrite writes formatted checkpoints using the Context
|
||||
func CheckpointWrite(ctx Context, checkpoints []types.Checkpoint) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, checkpoint := range checkpoints {
|
||||
if err := format(&checkpointContext{c: checkpoint}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newCheckpointContext(), render)
|
||||
}
|
||||
|
||||
type checkpointContext struct {
|
||||
HeaderContext
|
||||
c types.Checkpoint
|
||||
}
|
||||
|
||||
func newCheckpointContext() *checkpointContext {
|
||||
cpCtx := checkpointContext{}
|
||||
cpCtx.header = volumeHeaderContext{
|
||||
"Name": checkpointNameHeader,
|
||||
}
|
||||
return &cpCtx
|
||||
}
|
||||
|
||||
func (c *checkpointContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *checkpointContext) Name() string {
|
||||
return c.c.Name
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCheckpointContextFormatWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{Format: NewCheckpointFormat(defaultCheckpointFormat)},
|
||||
`CHECKPOINT NAME
|
||||
checkpoint-1
|
||||
checkpoint-2
|
||||
checkpoint-3
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewCheckpointFormat("{{.Name}}")},
|
||||
`checkpoint-1
|
||||
checkpoint-2
|
||||
checkpoint-3
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewCheckpointFormat("{{.Name}}:")},
|
||||
`checkpoint-1:
|
||||
checkpoint-2:
|
||||
checkpoint-3:
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
checkpoints := []types.Checkpoint{
|
||||
{"checkpoint-1"},
|
||||
{"checkpoint-2"},
|
||||
{"checkpoint-3"},
|
||||
}
|
||||
for _, testcase := range cases {
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := CheckpointWrite(testcase.context, checkpoints)
|
||||
if err != nil {
|
||||
assert.Error(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, out.String(), testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,259 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}"
|
||||
|
||||
containerIDHeader = "CONTAINER ID"
|
||||
namesHeader = "NAMES"
|
||||
commandHeader = "COMMAND"
|
||||
runningForHeader = "CREATED"
|
||||
statusHeader = "STATUS"
|
||||
portsHeader = "PORTS"
|
||||
mountsHeader = "MOUNTS"
|
||||
localVolumes = "LOCAL VOLUMES"
|
||||
networksHeader = "NETWORKS"
|
||||
)
|
||||
|
||||
// NewContainerFormat returns a Format for rendering using a Context
|
||||
func NewContainerFormat(source string, quiet bool, size bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
format := defaultContainerTableFormat
|
||||
if size {
|
||||
format += `\t{{.Size}}`
|
||||
}
|
||||
return Format(format)
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `container_id: {{.ID}}`
|
||||
}
|
||||
format := `container_id: {{.ID}}
|
||||
image: {{.Image}}
|
||||
command: {{.Command}}
|
||||
created_at: {{.CreatedAt}}
|
||||
status: {{- pad .Status 1 0}}
|
||||
names: {{.Names}}
|
||||
labels: {{- pad .Labels 1 0}}
|
||||
ports: {{- pad .Ports 1 0}}
|
||||
`
|
||||
if size {
|
||||
format += `size: {{.Size}}\n`
|
||||
}
|
||||
return Format(format)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// ContainerWrite renders the context for a list of containers
|
||||
func ContainerWrite(ctx Context, containers []types.Container) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, container := range containers {
|
||||
err := format(&containerContext{trunc: ctx.Trunc, c: container})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newContainerContext(), render)
|
||||
}
|
||||
|
||||
type containerHeaderContext map[string]string
|
||||
|
||||
func (c containerHeaderContext) Label(name string) string {
|
||||
n := strings.Split(name, ".")
|
||||
r := strings.NewReplacer("-", " ", "_", " ")
|
||||
h := r.Replace(n[len(n)-1])
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type containerContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
c types.Container
|
||||
}
|
||||
|
||||
func newContainerContext() *containerContext {
|
||||
containerCtx := containerContext{}
|
||||
containerCtx.header = containerHeaderContext{
|
||||
"ID": containerIDHeader,
|
||||
"Names": namesHeader,
|
||||
"Image": imageHeader,
|
||||
"Command": commandHeader,
|
||||
"CreatedAt": createdAtHeader,
|
||||
"RunningFor": runningForHeader,
|
||||
"Ports": portsHeader,
|
||||
"Status": statusHeader,
|
||||
"Size": sizeHeader,
|
||||
"Labels": labelsHeader,
|
||||
"Mounts": mountsHeader,
|
||||
"LocalVolumes": localVolumes,
|
||||
"Networks": networksHeader,
|
||||
}
|
||||
return &containerCtx
|
||||
}
|
||||
|
||||
func (c *containerContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *containerContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.c.ID)
|
||||
}
|
||||
return c.c.ID
|
||||
}
|
||||
|
||||
func (c *containerContext) Names() string {
|
||||
names := stripNamePrefix(c.c.Names)
|
||||
if c.trunc {
|
||||
for _, name := range names {
|
||||
if len(strings.Split(name, "/")) == 1 {
|
||||
names = []string{name}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(names, ",")
|
||||
}
|
||||
|
||||
func (c *containerContext) Image() string {
|
||||
if c.c.Image == "" {
|
||||
return "<no image>"
|
||||
}
|
||||
if c.trunc {
|
||||
if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) {
|
||||
return trunc
|
||||
}
|
||||
// truncate digest if no-trunc option was not selected
|
||||
ref, err := reference.ParseNormalizedNamed(c.c.Image)
|
||||
if err == nil {
|
||||
if nt, ok := ref.(reference.NamedTagged); ok {
|
||||
// case for when a tag is provided
|
||||
if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil {
|
||||
return reference.FamiliarString(namedTagged)
|
||||
}
|
||||
} else {
|
||||
// case for when a tag is not provided
|
||||
named := reference.TrimNamed(ref)
|
||||
return reference.FamiliarString(named)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return c.c.Image
|
||||
}
|
||||
|
||||
func (c *containerContext) Command() string {
|
||||
command := c.c.Command
|
||||
if c.trunc {
|
||||
command = stringutils.Ellipsis(command, 20)
|
||||
}
|
||||
return strconv.Quote(command)
|
||||
}
|
||||
|
||||
func (c *containerContext) CreatedAt() string {
|
||||
return time.Unix(int64(c.c.Created), 0).String()
|
||||
}
|
||||
|
||||
func (c *containerContext) RunningFor() string {
|
||||
createdAt := time.Unix(int64(c.c.Created), 0)
|
||||
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *containerContext) Ports() string {
|
||||
return api.DisplayablePorts(c.c.Ports)
|
||||
}
|
||||
|
||||
func (c *containerContext) Status() string {
|
||||
return c.c.Status
|
||||
}
|
||||
|
||||
func (c *containerContext) Size() string {
|
||||
srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)
|
||||
sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3)
|
||||
|
||||
sf := srw
|
||||
if c.c.SizeRootFs > 0 {
|
||||
sf = fmt.Sprintf("%s (virtual %s)", srw, sv)
|
||||
}
|
||||
return sf
|
||||
}
|
||||
|
||||
func (c *containerContext) Labels() string {
|
||||
if c.c.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
for k, v := range c.c.Labels {
|
||||
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return strings.Join(joinLabels, ",")
|
||||
}
|
||||
|
||||
func (c *containerContext) Label(name string) string {
|
||||
if c.c.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
return c.c.Labels[name]
|
||||
}
|
||||
|
||||
func (c *containerContext) Mounts() string {
|
||||
var name string
|
||||
var mounts []string
|
||||
for _, m := range c.c.Mounts {
|
||||
if m.Name == "" {
|
||||
name = m.Source
|
||||
} else {
|
||||
name = m.Name
|
||||
}
|
||||
if c.trunc {
|
||||
name = stringutils.Ellipsis(name, 15)
|
||||
}
|
||||
mounts = append(mounts, name)
|
||||
}
|
||||
return strings.Join(mounts, ",")
|
||||
}
|
||||
|
||||
func (c *containerContext) LocalVolumes() string {
|
||||
count := 0
|
||||
for _, m := range c.c.Mounts {
|
||||
if m.Driver == "local" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", count)
|
||||
}
|
||||
|
||||
func (c *containerContext) Networks() string {
|
||||
if c.c.NetworkSettings == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
networks := []string{}
|
||||
for k := range c.c.NetworkSettings.Networks {
|
||||
networks = append(networks, k)
|
||||
}
|
||||
|
||||
return strings.Join(networks, ",")
|
||||
}
|
|
@ -1,385 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestContainerPsContext(t *testing.T) {
|
||||
containerID := stringid.GenerateRandomID()
|
||||
unix := time.Now().Add(-65 * time.Second).Unix()
|
||||
|
||||
var ctx containerContext
|
||||
cases := []struct {
|
||||
container types.Container
|
||||
trunc bool
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{types.Container{ID: containerID}, true, stringid.TruncateID(containerID), ctx.ID},
|
||||
{types.Container{ID: containerID}, false, containerID, ctx.ID},
|
||||
{types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", ctx.Names},
|
||||
{types.Container{Image: "ubuntu"}, true, "ubuntu", ctx.Image},
|
||||
{types.Container{Image: "verylongimagename"}, true, "verylongimagename", ctx.Image},
|
||||
{types.Container{Image: "verylongimagename"}, false, "verylongimagename", ctx.Image},
|
||||
{types.Container{
|
||||
Image: "a5a665ff33eced1e0803148700880edab4",
|
||||
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
|
||||
},
|
||||
true,
|
||||
"a5a665ff33ec",
|
||||
ctx.Image,
|
||||
},
|
||||
{types.Container{
|
||||
Image: "a5a665ff33eced1e0803148700880edab4",
|
||||
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
|
||||
},
|
||||
false,
|
||||
"a5a665ff33eced1e0803148700880edab4",
|
||||
ctx.Image,
|
||||
},
|
||||
{types.Container{Image: ""}, true, "<no image>", ctx.Image},
|
||||
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, ctx.Command},
|
||||
{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), ctx.CreatedAt},
|
||||
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", ctx.Ports},
|
||||
{types.Container{Status: "RUNNING"}, true, "RUNNING", ctx.Status},
|
||||
{types.Container{SizeRw: 10}, true, "10B", ctx.Size},
|
||||
{types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10B (virtual 20B)", ctx.Size},
|
||||
{types.Container{}, true, "", ctx.Labels},
|
||||
{types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", ctx.Labels},
|
||||
{types.Container{Created: unix}, true, "About a minute ago", ctx.RunningFor},
|
||||
{types.Container{
|
||||
Mounts: []types.MountPoint{
|
||||
{
|
||||
Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set",
|
||||
Driver: "local",
|
||||
Source: "/a/path",
|
||||
},
|
||||
},
|
||||
}, true, "this-is-a-lo...", ctx.Mounts},
|
||||
{types.Container{
|
||||
Mounts: []types.MountPoint{
|
||||
{
|
||||
Driver: "local",
|
||||
Source: "/a/path",
|
||||
},
|
||||
},
|
||||
}, false, "/a/path", ctx.Mounts},
|
||||
{types.Container{
|
||||
Mounts: []types.MountPoint{
|
||||
{
|
||||
Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203",
|
||||
Driver: "local",
|
||||
Source: "/a/path",
|
||||
},
|
||||
},
|
||||
}, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", ctx.Mounts},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = containerContext{c: c.container, trunc: c.trunc}
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
|
||||
c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}}
|
||||
ctx = containerContext{c: c1, trunc: true}
|
||||
|
||||
sid := ctx.Label("com.docker.swarm.swarm-id")
|
||||
node := ctx.Label("com.docker.swarm.node_name")
|
||||
if sid != "33" {
|
||||
t.Fatalf("Expected 33, was %s\n", sid)
|
||||
}
|
||||
|
||||
if node != "ubuntu" {
|
||||
t.Fatalf("Expected ubuntu, was %s\n", node)
|
||||
}
|
||||
|
||||
c2 := types.Container{}
|
||||
ctx = containerContext{c: c2, trunc: true}
|
||||
|
||||
label := ctx.Label("anything.really")
|
||||
if label != "" {
|
||||
t.Fatalf("Expected an empty string, was %s", label)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerContextWrite(t *testing.T) {
|
||||
unixTime := time.Now().AddDate(0, 0, -1).Unix()
|
||||
expectedTime := time.Unix(unixTime, 0).String()
|
||||
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table Format
|
||||
{
|
||||
Context{Format: NewContainerFormat("table", false, true)},
|
||||
`CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
|
||||
containerID1 ubuntu "" 24 hours ago foobar_baz 0B
|
||||
containerID2 ubuntu "" 24 hours ago foobar_bar 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("table", false, false)},
|
||||
`CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
containerID1 ubuntu "" 24 hours ago foobar_baz
|
||||
containerID2 ubuntu "" 24 hours ago foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("table {{.Image}}", false, false)},
|
||||
"IMAGE\nubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("table {{.Image}}", false, true)},
|
||||
"IMAGE\nubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("table {{.Image}}", true, false)},
|
||||
"IMAGE\nubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("table", true, false)},
|
||||
"containerID1\ncontainerID2\n",
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewContainerFormat("raw", false, false)},
|
||||
fmt.Sprintf(`container_id: containerID1
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: %s
|
||||
status:
|
||||
names: foobar_baz
|
||||
labels:
|
||||
ports:
|
||||
|
||||
container_id: containerID2
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: %s
|
||||
status:
|
||||
names: foobar_bar
|
||||
labels:
|
||||
ports:
|
||||
|
||||
`, expectedTime, expectedTime),
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("raw", false, true)},
|
||||
fmt.Sprintf(`container_id: containerID1
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: %s
|
||||
status:
|
||||
names: foobar_baz
|
||||
labels:
|
||||
ports:
|
||||
size: 0B
|
||||
|
||||
container_id: containerID2
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: %s
|
||||
status:
|
||||
names: foobar_bar
|
||||
labels:
|
||||
ports:
|
||||
size: 0B
|
||||
|
||||
`, expectedTime, expectedTime),
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("raw", true, false)},
|
||||
"container_id: containerID1\ncontainer_id: containerID2\n",
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: "{{.Image}}"},
|
||||
"ubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{Format: NewContainerFormat("{{.Image}}", false, true)},
|
||||
"ubuntu\nubuntu\n",
|
||||
},
|
||||
// Special headers for customerized table format
|
||||
{
|
||||
Context{Format: NewContainerFormat(`table {{truncate .ID 5}}\t{{json .Image}} {{.RunningFor}}/{{title .Status}}/{{pad .Ports 2 2}}.{{upper .Names}} {{lower .Status}}`, false, true)},
|
||||
`CONTAINER ID IMAGE CREATED/STATUS/ PORTS .NAMES STATUS
|
||||
conta "ubuntu" 24 hours ago//.FOOBAR_BAZ
|
||||
conta "ubuntu" 24 hours ago//.FOOBAR_BAR
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
containers := []types.Container{
|
||||
{ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime},
|
||||
{ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := ContainerWrite(testcase.context, containers)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerContextWriteWithNoContainers(t *testing.T) {
|
||||
out := bytes.NewBufferString("")
|
||||
containers := []types.Container{}
|
||||
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Image}}",
|
||||
Output: out,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
Output: out,
|
||||
},
|
||||
"IMAGE\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: NewContainerFormat("{{.Image}}", false, true),
|
||||
Output: out,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: NewContainerFormat("table {{.Image}}", false, true),
|
||||
Output: out,
|
||||
},
|
||||
"IMAGE\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}\t{{.Size}}",
|
||||
Output: out,
|
||||
},
|
||||
"IMAGE SIZE\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true),
|
||||
Output: out,
|
||||
},
|
||||
"IMAGE SIZE\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
ContainerWrite(context.context, containers)
|
||||
assert.Equal(t, context.expected, out.String())
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerContextWriteJSON(t *testing.T) {
|
||||
unix := time.Now().Add(-65 * time.Second).Unix()
|
||||
containers := []types.Container{
|
||||
{ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix},
|
||||
{ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix},
|
||||
}
|
||||
expectedCreated := time.Unix(unix, 0).String()
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute ago", "Size": "0B", "Status": ""},
|
||||
{"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute ago", "Size": "0B", "Status": ""},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerContextWriteJSONField(t *testing.T) {
|
||||
containers := []types.Container{
|
||||
{ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"},
|
||||
{ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, containers[i].ID, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerBackCompat(t *testing.T) {
|
||||
containers := []types.Container{{ID: "brewhaha"}}
|
||||
cases := []string{
|
||||
"ID",
|
||||
"Names",
|
||||
"Image",
|
||||
"Command",
|
||||
"CreatedAt",
|
||||
"RunningFor",
|
||||
"Ports",
|
||||
"Status",
|
||||
"Size",
|
||||
"Labels",
|
||||
"Mounts",
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for _, c := range cases {
|
||||
ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf}
|
||||
if err := ContainerWrite(ctx, containers); err != nil {
|
||||
t.Logf("could not render template for field '%s': %v", c, err)
|
||||
t.Fail()
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package formatter
|
||||
|
||||
const (
|
||||
imageHeader = "IMAGE"
|
||||
createdSinceHeader = "CREATED"
|
||||
createdAtHeader = "CREATED AT"
|
||||
sizeHeader = "SIZE"
|
||||
labelsHeader = "LABELS"
|
||||
nameHeader = "NAME"
|
||||
driverHeader = "DRIVER"
|
||||
scopeHeader = "SCOPE"
|
||||
)
|
||||
|
||||
type subContext interface {
|
||||
FullHeader() interface{}
|
||||
}
|
||||
|
||||
// HeaderContext provides the subContext interface for managing headers
|
||||
type HeaderContext struct {
|
||||
header interface{}
|
||||
}
|
||||
|
||||
// FullHeader returns the header as an interface
|
||||
func (c *HeaderContext) FullHeader() interface{} {
|
||||
return c.header
|
||||
}
|
||||
|
||||
func stripNamePrefix(ss []string) []string {
|
||||
sss := make([]string, len(ss))
|
||||
for i, s := range ss {
|
||||
sss[i] = s[1:]
|
||||
}
|
||||
|
||||
return sss
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func compareMultipleValues(t *testing.T, value, expected string) {
|
||||
// comma-separated values means probably a map input, which won't
|
||||
// be guaranteed to have the same order as our expected value
|
||||
// We'll create maps and use reflect.DeepEquals to check instead:
|
||||
entriesMap := make(map[string]string)
|
||||
expMap := make(map[string]string)
|
||||
entries := strings.Split(value, ",")
|
||||
expectedEntries := strings.Split(expected, ",")
|
||||
for _, entry := range entries {
|
||||
keyval := strings.Split(entry, "=")
|
||||
entriesMap[keyval[0]] = keyval[1]
|
||||
}
|
||||
for _, expected := range expectedEntries {
|
||||
keyval := strings.Split(expected, "=")
|
||||
expMap[keyval[0]] = keyval[1]
|
||||
}
|
||||
if !reflect.DeepEqual(expMap, entriesMap) {
|
||||
t.Fatalf("Expected entries: %v, got: %v", expected, value)
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDiffTableFormat = "table {{.Type}}\t{{.Path}}"
|
||||
|
||||
changeTypeHeader = "CHANGE TYPE"
|
||||
pathHeader = "PATH"
|
||||
)
|
||||
|
||||
// NewDiffFormat returns a format for use with a diff Context
|
||||
func NewDiffFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
return defaultDiffTableFormat
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// DiffWrite writes formatted diff using the Context
|
||||
func DiffWrite(ctx Context, changes []container.ContainerChangeResponseItem) error {
|
||||
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, change := range changes {
|
||||
if err := format(&diffContext{c: change}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newDiffContext(), render)
|
||||
}
|
||||
|
||||
type diffContext struct {
|
||||
HeaderContext
|
||||
c container.ContainerChangeResponseItem
|
||||
}
|
||||
|
||||
func newDiffContext() *diffContext {
|
||||
diffCtx := diffContext{}
|
||||
diffCtx.header = map[string]string{
|
||||
"Type": changeTypeHeader,
|
||||
"Path": pathHeader,
|
||||
}
|
||||
return &diffCtx
|
||||
}
|
||||
|
||||
func (d *diffContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(d)
|
||||
}
|
||||
|
||||
func (d *diffContext) Type() string {
|
||||
var kind string
|
||||
switch d.c.Kind {
|
||||
case archive.ChangeModify:
|
||||
kind = "C"
|
||||
case archive.ChangeAdd:
|
||||
kind = "A"
|
||||
case archive.ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return kind
|
||||
|
||||
}
|
||||
|
||||
func (d *diffContext) Path() string {
|
||||
return d.c.Path
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDiffContextFormatWrite(t *testing.T) {
|
||||
// Check default output format (verbose and non-verbose mode) for table headers
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{Format: NewDiffFormat("table")},
|
||||
`CHANGE TYPE PATH
|
||||
C /var/log/app.log
|
||||
A /usr/app/app.js
|
||||
D /usr/app/old_app.js
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewDiffFormat("table {{.Path}}")},
|
||||
`PATH
|
||||
/var/log/app.log
|
||||
/usr/app/app.js
|
||||
/usr/app/old_app.js
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewDiffFormat("{{.Type}}: {{.Path}}")},
|
||||
`C: /var/log/app.log
|
||||
A: /usr/app/app.js
|
||||
D: /usr/app/old_app.js
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
diffs := []container.ContainerChangeResponseItem{
|
||||
{archive.ChangeModify, "/var/log/app.log"},
|
||||
{archive.ChangeAdd, "/usr/app/app.js"},
|
||||
{archive.ChangeDelete, "/usr/app/old_app.js"},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := DiffWrite(testcase.context, diffs)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,358 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
|
||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
|
||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||
|
||||
typeHeader = "TYPE"
|
||||
totalHeader = "TOTAL"
|
||||
activeHeader = "ACTIVE"
|
||||
reclaimableHeader = "RECLAIMABLE"
|
||||
containersHeader = "CONTAINERS"
|
||||
sharedSizeHeader = "SHARED SIZE"
|
||||
uniqueSizeHeader = "UNIQUE SiZE"
|
||||
)
|
||||
|
||||
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
|
||||
type DiskUsageContext struct {
|
||||
Context
|
||||
Verbose bool
|
||||
LayersSize int64
|
||||
Images []*types.ImageSummary
|
||||
Containers []*types.Container
|
||||
Volumes []*types.Volume
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
|
||||
ctx.buffer = bytes.NewBufferString("")
|
||||
ctx.header = ""
|
||||
ctx.Format = Format(format)
|
||||
ctx.preFormat()
|
||||
|
||||
return ctx.parseFormat()
|
||||
}
|
||||
|
||||
//
|
||||
// NewDiskUsageFormat returns a format for rendering an DiskUsageContext
|
||||
func NewDiskUsageFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
format := defaultDiskUsageTableFormat
|
||||
return Format(format)
|
||||
case RawFormatKey:
|
||||
format := `type: {{.Type}}
|
||||
total: {{.TotalCount}}
|
||||
active: {{.Active}}
|
||||
size: {{.Size}}
|
||||
reclaimable: {{.Reclaimable}}
|
||||
`
|
||||
return Format(format)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) Write() (err error) {
|
||||
if ctx.Verbose == false {
|
||||
ctx.buffer = bytes.NewBufferString("")
|
||||
ctx.preFormat()
|
||||
|
||||
tmpl, err := ctx.parseFormat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
|
||||
totalSize: ctx.LayersSize,
|
||||
images: ctx.Images,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
|
||||
containers: ctx.Containers,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
|
||||
volumes: ctx.Volumes,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}}
|
||||
diskUsageContainersCtx.header = map[string]string{
|
||||
"Type": typeHeader,
|
||||
"TotalCount": totalHeader,
|
||||
"Active": activeHeader,
|
||||
"Size": sizeHeader,
|
||||
"Reclaimable": reclaimableHeader,
|
||||
}
|
||||
ctx.postFormat(tmpl, &diskUsageContainersCtx)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// First images
|
||||
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Output.Write([]byte("Images space usage:\n\n"))
|
||||
for _, i := range ctx.Images {
|
||||
repo := "<none>"
|
||||
tag := "<none>"
|
||||
if len(i.RepoTags) > 0 && !isDangling(*i) {
|
||||
// Only show the first tag
|
||||
ref, err := reference.ParseNormalizedNamed(i.RepoTags[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if nt, ok := ref.(reference.NamedTagged); ok {
|
||||
repo = reference.FamiliarName(ref)
|
||||
tag = nt.Tag()
|
||||
}
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &imageContext{
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
trunc: true,
|
||||
i: *i,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newImageContext())
|
||||
|
||||
// Now containers
|
||||
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, c := range ctx.Containers {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
err = ctx.contextFormat(tmpl, &containerContext{
|
||||
trunc: true,
|
||||
c: *c,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newContainerContext())
|
||||
|
||||
// And volumes
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, v := range ctx.Volumes {
|
||||
err = ctx.contextFormat(tmpl, &volumeContext{
|
||||
v: *v,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newVolumeContext())
|
||||
return
|
||||
}
|
||||
|
||||
type diskUsageImagesContext struct {
|
||||
HeaderContext
|
||||
totalSize int64
|
||||
images []*types.ImageSummary
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Type() string {
|
||||
return "Images"
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) TotalCount() string {
|
||||
return fmt.Sprintf("%d", len(c.images))
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Active() string {
|
||||
used := 0
|
||||
for _, i := range c.images {
|
||||
if i.Containers > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", used)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Size() string {
|
||||
return units.HumanSize(float64(c.totalSize))
|
||||
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Reclaimable() string {
|
||||
var used int64
|
||||
|
||||
for _, i := range c.images {
|
||||
if i.Containers != 0 {
|
||||
if i.VirtualSize == -1 || i.SharedSize == -1 {
|
||||
continue
|
||||
}
|
||||
used += i.VirtualSize - i.SharedSize
|
||||
}
|
||||
}
|
||||
|
||||
reclaimable := c.totalSize - used
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
|
||||
}
|
||||
return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
|
||||
}
|
||||
|
||||
type diskUsageContainersContext struct {
|
||||
HeaderContext
|
||||
verbose bool
|
||||
containers []*types.Container
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Type() string {
|
||||
return "Containers"
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) TotalCount() string {
|
||||
return fmt.Sprintf("%d", len(c.containers))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) isActive(container types.Container) bool {
|
||||
return strings.Contains(container.State, "running") ||
|
||||
strings.Contains(container.State, "paused") ||
|
||||
strings.Contains(container.State, "restarting")
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Active() string {
|
||||
used := 0
|
||||
for _, container := range c.containers {
|
||||
if c.isActive(*container) {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", used)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, container := range c.containers {
|
||||
size += container.SizeRw
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Reclaimable() string {
|
||||
var reclaimable int64
|
||||
var totalSize int64
|
||||
|
||||
for _, container := range c.containers {
|
||||
if !c.isActive(*container) {
|
||||
reclaimable += container.SizeRw
|
||||
}
|
||||
totalSize += container.SizeRw
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
|
||||
}
|
||||
|
||||
type diskUsageVolumesContext struct {
|
||||
HeaderContext
|
||||
verbose bool
|
||||
volumes []*types.Volume
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Type() string {
|
||||
return "Local Volumes"
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) TotalCount() string {
|
||||
return fmt.Sprintf("%d", len(c.volumes))
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Active() string {
|
||||
|
||||
used := 0
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.RefCount > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", used)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
size += v.UsageData.Size
|
||||
}
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Reclaimable() string {
|
||||
var reclaimable int64
|
||||
var totalSize int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
if v.UsageData.RefCount == 0 {
|
||||
reclaimable += v.UsageData.Size
|
||||
}
|
||||
totalSize += v.UsageData.Size
|
||||
}
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
|
||||
}
|
|
@ -1,125 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDiskUsageContextFormatWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context DiskUsageContext
|
||||
expected string
|
||||
}{
|
||||
// Check default output format (verbose and non-verbose mode) for table headers
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
},
|
||||
Verbose: false},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
Images 0 0 0B 0B
|
||||
Containers 0 0 0B 0B
|
||||
Local Volumes 0 0 0B 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true},
|
||||
`Images space usage:
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS
|
||||
|
||||
Containers space usage:
|
||||
|
||||
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES
|
||||
|
||||
Local Volumes space usage:
|
||||
|
||||
VOLUME NAME LINKS SIZE
|
||||
`,
|
||||
},
|
||||
// Errors
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: "{{InvalidFunction}}",
|
||||
},
|
||||
},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: "{{nil}}",
|
||||
},
|
||||
},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table Format
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
},
|
||||
},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
Images 0 0 0B 0B
|
||||
Containers 0 0 0B 0B
|
||||
Local Volumes 0 0 0B 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}"),
|
||||
},
|
||||
},
|
||||
`TYPE ACTIVE
|
||||
Images 0
|
||||
Containers 0
|
||||
Local Volumes 0
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("raw"),
|
||||
},
|
||||
},
|
||||
`type: Images
|
||||
total: 0
|
||||
active: 0
|
||||
size: 0B
|
||||
reclaimable: 0B
|
||||
|
||||
type: Containers
|
||||
total: 0
|
||||
active: 0
|
||||
size: 0B
|
||||
reclaimable: 0B
|
||||
|
||||
type: Local Volumes
|
||||
total: 0
|
||||
active: 0
|
||||
size: 0B
|
||||
reclaimable: 0B
|
||||
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
if err := testcase.context.Write(); err != nil {
|
||||
assert.Equal(t, testcase.expected, err.Error())
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/pkg/templates"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Format keys used to specify certain kinds of output formats
|
||||
const (
|
||||
TableFormatKey = "table"
|
||||
RawFormatKey = "raw"
|
||||
PrettyFormatKey = "pretty"
|
||||
|
||||
defaultQuietFormat = "{{.ID}}"
|
||||
)
|
||||
|
||||
// Format is the format string rendered using the Context
|
||||
type Format string
|
||||
|
||||
// IsTable returns true if the format is a table-type format
|
||||
func (f Format) IsTable() bool {
|
||||
return strings.HasPrefix(string(f), TableFormatKey)
|
||||
}
|
||||
|
||||
// Contains returns true if the format contains the substring
|
||||
func (f Format) Contains(sub string) bool {
|
||||
return strings.Contains(string(f), sub)
|
||||
}
|
||||
|
||||
// Context contains information required by the formatter to print the output as desired.
|
||||
type Context struct {
|
||||
// Output is the output stream to which the formatted string is written.
|
||||
Output io.Writer
|
||||
// Format is used to choose raw, table or custom format for the output.
|
||||
Format Format
|
||||
// Trunc when set to true will truncate the output of certain fields such as Container ID.
|
||||
Trunc bool
|
||||
|
||||
// internal element
|
||||
finalFormat string
|
||||
header interface{}
|
||||
buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func (c *Context) preFormat() {
|
||||
c.finalFormat = string(c.Format)
|
||||
|
||||
// TODO: handle this in the Format type
|
||||
if c.Format.IsTable() {
|
||||
c.finalFormat = c.finalFormat[len(TableFormatKey):]
|
||||
}
|
||||
|
||||
c.finalFormat = strings.Trim(c.finalFormat, " ")
|
||||
r := strings.NewReplacer(`\t`, "\t", `\n`, "\n")
|
||||
c.finalFormat = r.Replace(c.finalFormat)
|
||||
}
|
||||
|
||||
func (c *Context) parseFormat() (*template.Template, error) {
|
||||
tmpl, err := templates.Parse(c.finalFormat)
|
||||
if err != nil {
|
||||
return tmpl, errors.Errorf("Template parsing error: %v\n", err)
|
||||
}
|
||||
return tmpl, err
|
||||
}
|
||||
|
||||
func (c *Context) postFormat(tmpl *template.Template, subContext subContext) {
|
||||
if c.Format.IsTable() {
|
||||
t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0)
|
||||
buffer := bytes.NewBufferString("")
|
||||
tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader())
|
||||
buffer.WriteTo(t)
|
||||
t.Write([]byte("\n"))
|
||||
c.buffer.WriteTo(t)
|
||||
t.Flush()
|
||||
} else {
|
||||
c.buffer.WriteTo(c.Output)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error {
|
||||
if err := tmpl.Execute(c.buffer, subContext); err != nil {
|
||||
return errors.Errorf("Template parsing error: %v\n", err)
|
||||
}
|
||||
if c.Format.IsTable() && c.header != nil {
|
||||
c.header = subContext.FullHeader()
|
||||
}
|
||||
c.buffer.WriteString("\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubFormat is a function type accepted by Write()
|
||||
type SubFormat func(func(subContext) error) error
|
||||
|
||||
// Write the template to the buffer using this Context
|
||||
func (c *Context) Write(sub subContext, f SubFormat) error {
|
||||
c.buffer = bytes.NewBufferString("")
|
||||
c.preFormat()
|
||||
|
||||
tmpl, err := c.parseFormat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subFormat := func(subContext subContext) error {
|
||||
return c.contextFormat(tmpl, subContext)
|
||||
}
|
||||
if err := f(subFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.postFormat(tmpl, sub)
|
||||
return nil
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHistoryTableFormat = "table {{.ID}}\t{{.CreatedSince}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}"
|
||||
nonHumanHistoryTableFormat = "table {{.ID}}\t{{.CreatedAt}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}"
|
||||
|
||||
historyIDHeader = "IMAGE"
|
||||
createdByHeader = "CREATED BY"
|
||||
commentHeader = "COMMENT"
|
||||
)
|
||||
|
||||
// NewHistoryFormat returns a format for rendering an HistoryContext
|
||||
func NewHistoryFormat(source string, quiet bool, human bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
switch {
|
||||
case quiet:
|
||||
return defaultQuietFormat
|
||||
case !human:
|
||||
return nonHumanHistoryTableFormat
|
||||
default:
|
||||
return defaultHistoryTableFormat
|
||||
}
|
||||
}
|
||||
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// HistoryWrite writes the context
|
||||
func HistoryWrite(ctx Context, human bool, histories []image.HistoryResponseItem) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, history := range histories {
|
||||
historyCtx := &historyContext{trunc: ctx.Trunc, h: history, human: human}
|
||||
if err := format(historyCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
historyCtx := &historyContext{}
|
||||
historyCtx.header = map[string]string{
|
||||
"ID": historyIDHeader,
|
||||
"CreatedSince": createdSinceHeader,
|
||||
"CreatedAt": createdAtHeader,
|
||||
"CreatedBy": createdByHeader,
|
||||
"Size": sizeHeader,
|
||||
"Comment": commentHeader,
|
||||
}
|
||||
return ctx.Write(historyCtx, render)
|
||||
}
|
||||
|
||||
type historyContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
human bool
|
||||
h image.HistoryResponseItem
|
||||
}
|
||||
|
||||
func (c *historyContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *historyContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.h.ID)
|
||||
}
|
||||
return c.h.ID
|
||||
}
|
||||
|
||||
func (c *historyContext) CreatedAt() string {
|
||||
var created string
|
||||
created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(c.h.Created), 0)))
|
||||
return created
|
||||
}
|
||||
|
||||
func (c *historyContext) CreatedSince() string {
|
||||
var created string
|
||||
created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(c.h.Created), 0)))
|
||||
return created + " ago"
|
||||
}
|
||||
|
||||
func (c *historyContext) CreatedBy() string {
|
||||
createdBy := strings.Replace(c.h.CreatedBy, "\t", " ", -1)
|
||||
if c.trunc {
|
||||
createdBy = stringutils.Ellipsis(createdBy, 45)
|
||||
}
|
||||
return createdBy
|
||||
}
|
||||
|
||||
func (c *historyContext) Size() string {
|
||||
size := ""
|
||||
if c.human {
|
||||
size = units.HumanSizeWithPrecision(float64(c.h.Size), 3)
|
||||
} else {
|
||||
size = strconv.FormatInt(c.h.Size, 10)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (c *historyContext) Comment() string {
|
||||
return c.h.Comment
|
||||
}
|
|
@ -1,213 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type historyCase struct {
|
||||
historyCtx historyContext
|
||||
expValue string
|
||||
call func() string
|
||||
}
|
||||
|
||||
func TestHistoryContext_ID(t *testing.T) {
|
||||
id := stringid.GenerateRandomID()
|
||||
|
||||
var ctx historyContext
|
||||
cases := []historyCase{
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{ID: id},
|
||||
trunc: false,
|
||||
}, id, ctx.ID,
|
||||
},
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{ID: id},
|
||||
trunc: true,
|
||||
}, stringid.TruncateID(id), ctx.ID,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.historyCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistoryContext_CreatedSince(t *testing.T) {
|
||||
unixTime := time.Now().AddDate(0, 0, -7).Unix()
|
||||
expected := "7 days ago"
|
||||
|
||||
var ctx historyContext
|
||||
cases := []historyCase{
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{Created: unixTime},
|
||||
trunc: false,
|
||||
human: true,
|
||||
}, expected, ctx.CreatedSince,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.historyCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistoryContext_CreatedBy(t *testing.T) {
|
||||
withTabs := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*`
|
||||
expected := `/bin/sh -c apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 && echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list && apt-get update && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates nginx=${NGINX_VERSION} nginx-module-xslt nginx-module-geoip nginx-module-image-filter nginx-module-perl nginx-module-njs gettext-base && rm -rf /var/lib/apt/lists/*`
|
||||
|
||||
var ctx historyContext
|
||||
cases := []historyCase{
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{CreatedBy: withTabs},
|
||||
trunc: false,
|
||||
}, expected, ctx.CreatedBy,
|
||||
},
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{CreatedBy: withTabs},
|
||||
trunc: true,
|
||||
}, stringutils.Ellipsis(expected, 45), ctx.CreatedBy,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.historyCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistoryContext_Size(t *testing.T) {
|
||||
size := int64(182964289)
|
||||
expected := "183MB"
|
||||
|
||||
var ctx historyContext
|
||||
cases := []historyCase{
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{Size: size},
|
||||
trunc: false,
|
||||
human: true,
|
||||
}, expected, ctx.Size,
|
||||
}, {
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{Size: size},
|
||||
trunc: false,
|
||||
human: false,
|
||||
}, strconv.Itoa(182964289), ctx.Size,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.historyCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistoryContext_Comment(t *testing.T) {
|
||||
comment := "Some comment"
|
||||
|
||||
var ctx historyContext
|
||||
cases := []historyCase{
|
||||
{
|
||||
historyContext{
|
||||
h: image.HistoryResponseItem{Comment: comment},
|
||||
trunc: false,
|
||||
}, comment, ctx.Comment,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.historyCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistoryContext_Table(t *testing.T) {
|
||||
out := bytes.NewBufferString("")
|
||||
unixTime := time.Now().AddDate(0, 0, -1).Unix()
|
||||
histories := []image.HistoryResponseItem{
|
||||
{ID: "imageID1", Created: unixTime, CreatedBy: "/bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}},
|
||||
{ID: "imageID2", Created: unixTime, CreatedBy: "/bin/bash echo", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}},
|
||||
{ID: "imageID3", Created: unixTime, CreatedBy: "/bin/bash ls", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}},
|
||||
{ID: "imageID4", Created: unixTime, CreatedBy: "/bin/bash grep", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}},
|
||||
}
|
||||
expectedNoTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
imageID1 24 hours ago /bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on 183MB Hi
|
||||
imageID2 24 hours ago /bin/bash echo 183MB Hi
|
||||
imageID3 24 hours ago /bin/bash ls 183MB Hi
|
||||
imageID4 24 hours ago /bin/bash grep 183MB Hi
|
||||
`
|
||||
expectedTrunc := `IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
imageID1 24 hours ago /bin/bash ls && npm i && npm run test && k... 183MB Hi
|
||||
imageID2 24 hours ago /bin/bash echo 183MB Hi
|
||||
imageID3 24 hours ago /bin/bash ls 183MB Hi
|
||||
imageID4 24 hours ago /bin/bash grep 183MB Hi
|
||||
`
|
||||
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{Context{
|
||||
Format: NewHistoryFormat("table", false, true),
|
||||
Trunc: true,
|
||||
Output: out,
|
||||
},
|
||||
expectedTrunc,
|
||||
},
|
||||
{Context{
|
||||
Format: NewHistoryFormat("table", false, true),
|
||||
Trunc: false,
|
||||
Output: out,
|
||||
},
|
||||
expectedNoTrunc,
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
HistoryWrite(context.context, true, histories)
|
||||
assert.Equal(t, context.expected, out.String())
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
|
@ -1,272 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}"
|
||||
defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}"
|
||||
|
||||
imageIDHeader = "IMAGE ID"
|
||||
repositoryHeader = "REPOSITORY"
|
||||
tagHeader = "TAG"
|
||||
digestHeader = "DIGEST"
|
||||
)
|
||||
|
||||
// ImageContext contains image specific information required by the formatter, encapsulate a Context struct.
|
||||
type ImageContext struct {
|
||||
Context
|
||||
Digest bool
|
||||
}
|
||||
|
||||
func isDangling(image types.ImageSummary) bool {
|
||||
return len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "<none>@<none>"
|
||||
}
|
||||
|
||||
// NewImageFormat returns a format for rendering an ImageContext
|
||||
func NewImageFormat(source string, quiet bool, digest bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
switch {
|
||||
case quiet:
|
||||
return defaultQuietFormat
|
||||
case digest:
|
||||
return defaultImageTableFormatWithDigest
|
||||
default:
|
||||
return defaultImageTableFormat
|
||||
}
|
||||
case RawFormatKey:
|
||||
switch {
|
||||
case quiet:
|
||||
return `image_id: {{.ID}}`
|
||||
case digest:
|
||||
return `repository: {{ .Repository }}
|
||||
tag: {{.Tag}}
|
||||
digest: {{.Digest}}
|
||||
image_id: {{.ID}}
|
||||
created_at: {{.CreatedAt}}
|
||||
virtual_size: {{.Size}}
|
||||
`
|
||||
default:
|
||||
return `repository: {{ .Repository }}
|
||||
tag: {{.Tag}}
|
||||
image_id: {{.ID}}
|
||||
created_at: {{.CreatedAt}}
|
||||
virtual_size: {{.Size}}
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
format := Format(source)
|
||||
if format.IsTable() && digest && !format.Contains("{{.Digest}}") {
|
||||
format += "\t{{.Digest}}"
|
||||
}
|
||||
return format
|
||||
}
|
||||
|
||||
// ImageWrite writes the formatter images using the ImageContext
|
||||
func ImageWrite(ctx ImageContext, images []types.ImageSummary) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
return imageFormat(ctx, images, format)
|
||||
}
|
||||
return ctx.Write(newImageContext(), render)
|
||||
}
|
||||
|
||||
func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error {
|
||||
for _, image := range images {
|
||||
images := []*imageContext{}
|
||||
if isDangling(image) {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
repo: "<none>",
|
||||
tag: "<none>",
|
||||
digest: "<none>",
|
||||
})
|
||||
} else {
|
||||
repoTags := map[string][]string{}
|
||||
repoDigests := map[string][]string{}
|
||||
|
||||
for _, refString := range image.RepoTags {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if nt, ok := ref.(reference.NamedTagged); ok {
|
||||
familiarRef := reference.FamiliarName(ref)
|
||||
repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag())
|
||||
}
|
||||
}
|
||||
for _, refString := range image.RepoDigests {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if c, ok := ref.(reference.Canonical); ok {
|
||||
familiarRef := reference.FamiliarName(ref)
|
||||
repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String())
|
||||
}
|
||||
}
|
||||
|
||||
for repo, tags := range repoTags {
|
||||
digests := repoDigests[repo]
|
||||
|
||||
// Do not display digests as their own row
|
||||
delete(repoDigests, repo)
|
||||
|
||||
if !ctx.Digest {
|
||||
// Ignore digest references, just show tag once
|
||||
digests = nil
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
if len(digests) == 0 {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
digest: "<none>",
|
||||
})
|
||||
continue
|
||||
}
|
||||
// Display the digests for each tag
|
||||
for _, dgst := range digests {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
digest: dgst,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Show rows for remaining digest only references
|
||||
for repo, digests := range repoDigests {
|
||||
// If digests are displayed, show row per digest
|
||||
if ctx.Digest {
|
||||
for _, dgst := range digests {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
repo: repo,
|
||||
tag: "<none>",
|
||||
digest: dgst,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
repo: repo,
|
||||
tag: "<none>",
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, imageCtx := range images {
|
||||
if err := format(imageCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type imageContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
i types.ImageSummary
|
||||
repo string
|
||||
tag string
|
||||
digest string
|
||||
}
|
||||
|
||||
func newImageContext() *imageContext {
|
||||
imageCtx := imageContext{}
|
||||
imageCtx.header = map[string]string{
|
||||
"ID": imageIDHeader,
|
||||
"Repository": repositoryHeader,
|
||||
"Tag": tagHeader,
|
||||
"Digest": digestHeader,
|
||||
"CreatedSince": createdSinceHeader,
|
||||
"CreatedAt": createdAtHeader,
|
||||
"Size": sizeHeader,
|
||||
"Containers": containersHeader,
|
||||
"VirtualSize": sizeHeader,
|
||||
"SharedSize": sharedSizeHeader,
|
||||
"UniqueSize": uniqueSizeHeader,
|
||||
}
|
||||
return &imageCtx
|
||||
}
|
||||
|
||||
func (c *imageContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *imageContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.i.ID)
|
||||
}
|
||||
return c.i.ID
|
||||
}
|
||||
|
||||
func (c *imageContext) Repository() string {
|
||||
return c.repo
|
||||
}
|
||||
|
||||
func (c *imageContext) Tag() string {
|
||||
return c.tag
|
||||
}
|
||||
|
||||
func (c *imageContext) Digest() string {
|
||||
return c.digest
|
||||
}
|
||||
|
||||
func (c *imageContext) CreatedSince() string {
|
||||
createdAt := time.Unix(int64(c.i.Created), 0)
|
||||
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *imageContext) CreatedAt() string {
|
||||
return time.Unix(int64(c.i.Created), 0).String()
|
||||
}
|
||||
|
||||
func (c *imageContext) Size() string {
|
||||
return units.HumanSizeWithPrecision(float64(c.i.Size), 3)
|
||||
}
|
||||
|
||||
func (c *imageContext) Containers() string {
|
||||
if c.i.Containers == -1 {
|
||||
return "N/A"
|
||||
}
|
||||
return fmt.Sprintf("%d", c.i.Containers)
|
||||
}
|
||||
|
||||
func (c *imageContext) VirtualSize() string {
|
||||
return units.HumanSize(float64(c.i.VirtualSize))
|
||||
}
|
||||
|
||||
func (c *imageContext) SharedSize() string {
|
||||
if c.i.SharedSize == -1 {
|
||||
return "N/A"
|
||||
}
|
||||
return units.HumanSize(float64(c.i.SharedSize))
|
||||
}
|
||||
|
||||
func (c *imageContext) UniqueSize() string {
|
||||
if c.i.VirtualSize == -1 || c.i.SharedSize == -1 {
|
||||
return "N/A"
|
||||
}
|
||||
return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize))
|
||||
}
|
|
@ -1,327 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestImageContext(t *testing.T) {
|
||||
imageID := stringid.GenerateRandomID()
|
||||
unix := time.Now().Unix()
|
||||
|
||||
var ctx imageContext
|
||||
cases := []struct {
|
||||
imageCtx imageContext
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{imageContext{
|
||||
i: types.ImageSummary{ID: imageID},
|
||||
trunc: true,
|
||||
}, stringid.TruncateID(imageID), ctx.ID},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{ID: imageID},
|
||||
trunc: false,
|
||||
}, imageID, ctx.ID},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{Size: 10, VirtualSize: 10},
|
||||
trunc: true,
|
||||
}, "10B", ctx.Size},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{Created: unix},
|
||||
trunc: true,
|
||||
}, time.Unix(unix, 0).String(), ctx.CreatedAt},
|
||||
// FIXME
|
||||
// {imageContext{
|
||||
// i: types.ImageSummary{Created: unix},
|
||||
// trunc: true,
|
||||
// }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{},
|
||||
repo: "busybox",
|
||||
}, "busybox", ctx.Repository},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{},
|
||||
tag: "latest",
|
||||
}, "latest", ctx.Tag},
|
||||
{imageContext{
|
||||
i: types.ImageSummary{},
|
||||
digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a",
|
||||
}, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", ctx.Digest},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.imageCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageContextWrite(t *testing.T) {
|
||||
unixTime := time.Now().AddDate(0, 0, -1).Unix()
|
||||
expectedTime := time.Unix(unixTime, 0).String()
|
||||
|
||||
cases := []struct {
|
||||
context ImageContext
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: "{{InvalidFunction}}",
|
||||
},
|
||||
},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: "{{nil}}",
|
||||
},
|
||||
},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table Format
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table", false, false),
|
||||
},
|
||||
},
|
||||
`REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
image tag1 imageID1 24 hours ago 0B
|
||||
image tag2 imageID2 24 hours ago 0B
|
||||
<none> <none> imageID3 24 hours ago 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table {{.Repository}}", false, false),
|
||||
},
|
||||
},
|
||||
"REPOSITORY\nimage\nimage\n<none>\n",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table {{.Repository}}", false, true),
|
||||
},
|
||||
Digest: true,
|
||||
},
|
||||
`REPOSITORY DIGEST
|
||||
image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
|
||||
image <none>
|
||||
<none> <none>
|
||||
`,
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table {{.Repository}}", true, false),
|
||||
},
|
||||
},
|
||||
"REPOSITORY\nimage\nimage\n<none>\n",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table", true, false),
|
||||
},
|
||||
},
|
||||
"imageID1\nimageID2\nimageID3\n",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table", false, true),
|
||||
},
|
||||
Digest: true,
|
||||
},
|
||||
`REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE
|
||||
image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0B
|
||||
image tag2 <none> imageID2 24 hours ago 0B
|
||||
<none> <none> <none> imageID3 24 hours ago 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table", true, true),
|
||||
},
|
||||
Digest: true,
|
||||
},
|
||||
"imageID1\nimageID2\nimageID3\n",
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("raw", false, false),
|
||||
},
|
||||
},
|
||||
fmt.Sprintf(`repository: image
|
||||
tag: tag1
|
||||
image_id: imageID1
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
repository: image
|
||||
tag: tag2
|
||||
image_id: imageID2
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
repository: <none>
|
||||
tag: <none>
|
||||
image_id: imageID3
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
`, expectedTime, expectedTime, expectedTime),
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("raw", false, true),
|
||||
},
|
||||
Digest: true,
|
||||
},
|
||||
fmt.Sprintf(`repository: image
|
||||
tag: tag1
|
||||
digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
|
||||
image_id: imageID1
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
repository: image
|
||||
tag: tag2
|
||||
digest: <none>
|
||||
image_id: imageID2
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
repository: <none>
|
||||
tag: <none>
|
||||
digest: <none>
|
||||
image_id: imageID3
|
||||
created_at: %s
|
||||
virtual_size: 0B
|
||||
|
||||
`, expectedTime, expectedTime, expectedTime),
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("raw", true, false),
|
||||
},
|
||||
},
|
||||
`image_id: imageID1
|
||||
image_id: imageID2
|
||||
image_id: imageID3
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("{{.Repository}}", false, false),
|
||||
},
|
||||
},
|
||||
"image\nimage\n<none>\n",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("{{.Repository}}", false, true),
|
||||
},
|
||||
Digest: true,
|
||||
},
|
||||
"image\nimage\n<none>\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
images := []types.ImageSummary{
|
||||
{ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime},
|
||||
{ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime},
|
||||
{ID: "imageID3", RepoTags: []string{"<none>:<none>"}, RepoDigests: []string{"<none>@<none>"}, Created: unixTime},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := ImageWrite(testcase.context, images)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageContextWriteWithNoImage(t *testing.T) {
|
||||
out := bytes.NewBufferString("")
|
||||
images := []types.ImageSummary{}
|
||||
|
||||
contexts := []struct {
|
||||
context ImageContext
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("{{.Repository}}", false, false),
|
||||
Output: out,
|
||||
},
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table {{.Repository}}", false, false),
|
||||
Output: out,
|
||||
},
|
||||
},
|
||||
"REPOSITORY\n",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("{{.Repository}}", false, true),
|
||||
Output: out,
|
||||
},
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
Context: Context{
|
||||
Format: NewImageFormat("table {{.Repository}}", false, true),
|
||||
Output: out,
|
||||
},
|
||||
},
|
||||
"REPOSITORY DIGEST\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
ImageWrite(context.context, images)
|
||||
assert.Equal(t, context.expected, out.String())
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}"
|
||||
|
||||
networkIDHeader = "NETWORK ID"
|
||||
ipv6Header = "IPV6"
|
||||
internalHeader = "INTERNAL"
|
||||
)
|
||||
|
||||
// NewNetworkFormat returns a Format for rendering using a network Context
|
||||
func NewNetworkFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultNetworkTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `network_id: {{.ID}}`
|
||||
}
|
||||
return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// NetworkWrite writes the context
|
||||
func NetworkWrite(ctx Context, networks []types.NetworkResource) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, network := range networks {
|
||||
networkCtx := &networkContext{trunc: ctx.Trunc, n: network}
|
||||
if err := format(networkCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
networkCtx := networkContext{}
|
||||
networkCtx.header = networkHeaderContext{
|
||||
"ID": networkIDHeader,
|
||||
"Name": nameHeader,
|
||||
"Driver": driverHeader,
|
||||
"Scope": scopeHeader,
|
||||
"IPv6": ipv6Header,
|
||||
"Internal": internalHeader,
|
||||
"Labels": labelsHeader,
|
||||
"CreatedAt": createdAtHeader,
|
||||
}
|
||||
return ctx.Write(&networkCtx, render)
|
||||
}
|
||||
|
||||
type networkHeaderContext map[string]string
|
||||
|
||||
func (c networkHeaderContext) Label(name string) string {
|
||||
n := strings.Split(name, ".")
|
||||
r := strings.NewReplacer("-", " ", "_", " ")
|
||||
h := r.Replace(n[len(n)-1])
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type networkContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
n types.NetworkResource
|
||||
}
|
||||
|
||||
func (c *networkContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *networkContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.n.ID)
|
||||
}
|
||||
return c.n.ID
|
||||
}
|
||||
|
||||
func (c *networkContext) Name() string {
|
||||
return c.n.Name
|
||||
}
|
||||
|
||||
func (c *networkContext) Driver() string {
|
||||
return c.n.Driver
|
||||
}
|
||||
|
||||
func (c *networkContext) Scope() string {
|
||||
return c.n.Scope
|
||||
}
|
||||
|
||||
func (c *networkContext) IPv6() string {
|
||||
return fmt.Sprintf("%v", c.n.EnableIPv6)
|
||||
}
|
||||
|
||||
func (c *networkContext) Internal() string {
|
||||
return fmt.Sprintf("%v", c.n.Internal)
|
||||
}
|
||||
|
||||
func (c *networkContext) Labels() string {
|
||||
if c.n.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
for k, v := range c.n.Labels {
|
||||
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return strings.Join(joinLabels, ",")
|
||||
}
|
||||
|
||||
func (c *networkContext) Label(name string) string {
|
||||
if c.n.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
return c.n.Labels[name]
|
||||
}
|
||||
|
||||
func (c *networkContext) CreatedAt() string {
|
||||
return c.n.Created.String()
|
||||
}
|
|
@ -1,213 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNetworkContext(t *testing.T) {
|
||||
networkID := stringid.GenerateRandomID()
|
||||
|
||||
var ctx networkContext
|
||||
cases := []struct {
|
||||
networkCtx networkContext
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{networkContext{
|
||||
n: types.NetworkResource{ID: networkID},
|
||||
trunc: false,
|
||||
}, networkID, ctx.ID},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{ID: networkID},
|
||||
trunc: true,
|
||||
}, stringid.TruncateID(networkID), ctx.ID},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{Name: "network_name"},
|
||||
}, "network_name", ctx.Name},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{Driver: "driver_name"},
|
||||
}, "driver_name", ctx.Driver},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{EnableIPv6: true},
|
||||
}, "true", ctx.IPv6},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{EnableIPv6: false},
|
||||
}, "false", ctx.IPv6},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{Internal: true},
|
||||
}, "true", ctx.Internal},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{Internal: false},
|
||||
}, "false", ctx.Internal},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{},
|
||||
}, "", ctx.Labels},
|
||||
{networkContext{
|
||||
n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}},
|
||||
}, "label1=value1,label2=value2", ctx.Labels},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.networkCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewNetworkFormat("table", false)},
|
||||
`NETWORK ID NAME DRIVER SCOPE
|
||||
networkID1 foobar_baz foo local
|
||||
networkID2 foobar_bar bar local
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNetworkFormat("table", true)},
|
||||
`networkID1
|
||||
networkID2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNetworkFormat("table {{.Name}}", false)},
|
||||
`NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNetworkFormat("table {{.Name}}", true)},
|
||||
`NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewNetworkFormat("raw", false)},
|
||||
`network_id: networkID1
|
||||
name: foobar_baz
|
||||
driver: foo
|
||||
scope: local
|
||||
|
||||
network_id: networkID2
|
||||
name: foobar_bar
|
||||
driver: bar
|
||||
scope: local
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNetworkFormat("raw", true)},
|
||||
`network_id: networkID1
|
||||
network_id: networkID2
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewNetworkFormat("{{.Name}}", false)},
|
||||
`foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
// Custom Format with CreatedAt
|
||||
{
|
||||
Context{Format: NewNetworkFormat("{{.Name}} {{.CreatedAt}}", false)},
|
||||
`foobar_baz 2016-01-01 00:00:00 +0000 UTC
|
||||
foobar_bar 2017-01-01 00:00:00 +0000 UTC
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
timestamp1, _ := time.Parse("2006-01-02", "2016-01-01")
|
||||
timestamp2, _ := time.Parse("2006-01-02", "2017-01-01")
|
||||
|
||||
for _, testcase := range cases {
|
||||
networks := []types.NetworkResource{
|
||||
{ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local", Created: timestamp1},
|
||||
{ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local", Created: timestamp2},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := NetworkWrite(testcase.context, networks)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkContextWriteJSON(t *testing.T) {
|
||||
networks := []types.NetworkResource{
|
||||
{ID: "networkID1", Name: "foobar_baz"},
|
||||
{ID: "networkID2", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"},
|
||||
{"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"},
|
||||
}
|
||||
|
||||
out := bytes.NewBufferString("")
|
||||
err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkContextWriteJSONField(t *testing.T) {
|
||||
networks := []types.NetworkResource{
|
||||
{ID: "networkID1", Name: "foobar_baz"},
|
||||
{ID: "networkID2", Name: "foobar_bar"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, networks[i].ID, s)
|
||||
}
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/inspect"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}"
|
||||
nodeInspectPrettyTemplate Format = `ID: {{.ID}}
|
||||
{{- if .Name }}
|
||||
Name: {{.Name}}
|
||||
{{- end }}
|
||||
{{- if .Labels }}
|
||||
Labels:
|
||||
{{- range $k, $v := .Labels }}
|
||||
- {{ $k }}{{if $v }}={{ $v }}{{ end }}
|
||||
{{- end }}{{ end }}
|
||||
Hostname: {{.Hostname}}
|
||||
Joined at: {{.CreatedAt}}
|
||||
Status:
|
||||
State: {{.StatusState}}
|
||||
{{- if .HasStatusMessage}}
|
||||
Message: {{.StatusMessage}}
|
||||
{{- end}}
|
||||
Availability: {{.SpecAvailability}}
|
||||
{{- if .Status.Addr}}
|
||||
Address: {{.StatusAddr}}
|
||||
{{- end}}
|
||||
{{- if .HasManagerStatus}}
|
||||
Manager Status:
|
||||
Address: {{.ManagerStatusAddr}}
|
||||
Raft Status: {{.ManagerStatusReachability}}
|
||||
{{- if .IsManagerStatusLeader}}
|
||||
Leader: Yes
|
||||
{{- else}}
|
||||
Leader: No
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
Platform:
|
||||
Operating System: {{.PlatformOS}}
|
||||
Architecture: {{.PlatformArchitecture}}
|
||||
Resources:
|
||||
CPUs: {{.ResourceNanoCPUs}}
|
||||
Memory: {{.ResourceMemory}}
|
||||
{{- if .HasEnginePlugins}}
|
||||
Plugins:
|
||||
{{- range $k, $v := .EnginePlugins }}
|
||||
{{ $k }}:{{if $v }} {{ $v }}{{ end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
Engine Version: {{.EngineVersion}}
|
||||
{{- if .EngineLabels}}
|
||||
Engine Labels:
|
||||
{{- range $k, $v := .EngineLabels }}
|
||||
- {{ $k }}{{if $v }}={{ $v }}{{ end }}
|
||||
{{- end }}{{- end }}
|
||||
`
|
||||
nodeIDHeader = "ID"
|
||||
selfHeader = ""
|
||||
hostnameHeader = "HOSTNAME"
|
||||
availabilityHeader = "AVAILABILITY"
|
||||
managerStatusHeader = "MANAGER STATUS"
|
||||
)
|
||||
|
||||
// NewNodeFormat returns a Format for rendering using a node Context
|
||||
func NewNodeFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case PrettyFormatKey:
|
||||
return nodeInspectPrettyTemplate
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultNodeTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `node_id: {{.ID}}`
|
||||
}
|
||||
return `node_id: {{.ID}}\nhostname: {{.Hostname}}\nstatus: {{.Status}}\navailability: {{.Availability}}\nmanager_status: {{.ManagerStatus}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// NodeWrite writes the context
|
||||
func NodeWrite(ctx Context, nodes []swarm.Node, info types.Info) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, node := range nodes {
|
||||
nodeCtx := &nodeContext{n: node, info: info}
|
||||
if err := format(nodeCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
nodeCtx := nodeContext{}
|
||||
nodeCtx.header = nodeHeaderContext{
|
||||
"ID": nodeIDHeader,
|
||||
"Self": selfHeader,
|
||||
"Hostname": hostnameHeader,
|
||||
"Status": statusHeader,
|
||||
"Availability": availabilityHeader,
|
||||
"ManagerStatus": managerStatusHeader,
|
||||
}
|
||||
return ctx.Write(&nodeCtx, render)
|
||||
}
|
||||
|
||||
type nodeHeaderContext map[string]string
|
||||
|
||||
type nodeContext struct {
|
||||
HeaderContext
|
||||
n swarm.Node
|
||||
info types.Info
|
||||
}
|
||||
|
||||
func (c *nodeContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *nodeContext) ID() string {
|
||||
return c.n.ID
|
||||
}
|
||||
|
||||
func (c *nodeContext) Self() bool {
|
||||
return c.n.ID == c.info.Swarm.NodeID
|
||||
}
|
||||
|
||||
func (c *nodeContext) Hostname() string {
|
||||
return c.n.Description.Hostname
|
||||
}
|
||||
|
||||
func (c *nodeContext) Status() string {
|
||||
return command.PrettyPrint(string(c.n.Status.State))
|
||||
}
|
||||
|
||||
func (c *nodeContext) Availability() string {
|
||||
return command.PrettyPrint(string(c.n.Spec.Availability))
|
||||
}
|
||||
|
||||
func (c *nodeContext) ManagerStatus() string {
|
||||
reachability := ""
|
||||
if c.n.ManagerStatus != nil {
|
||||
if c.n.ManagerStatus.Leader {
|
||||
reachability = "Leader"
|
||||
} else {
|
||||
reachability = string(c.n.ManagerStatus.Reachability)
|
||||
}
|
||||
}
|
||||
return command.PrettyPrint(reachability)
|
||||
}
|
||||
|
||||
// NodeInspectWrite renders the context for a list of services
|
||||
func NodeInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error {
|
||||
if ctx.Format != nodeInspectPrettyTemplate {
|
||||
return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef)
|
||||
}
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, ref := range refs {
|
||||
nodeI, _, err := getRef(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node, ok := nodeI.(swarm.Node)
|
||||
if !ok {
|
||||
return fmt.Errorf("got wrong object to inspect :%v", ok)
|
||||
}
|
||||
if err := format(&nodeInspectContext{Node: node}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(&nodeInspectContext{}, render)
|
||||
}
|
||||
|
||||
type nodeInspectContext struct {
|
||||
swarm.Node
|
||||
subContext
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) ID() string {
|
||||
return ctx.Node.ID
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) Name() string {
|
||||
return ctx.Node.Spec.Name
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) Labels() map[string]string {
|
||||
return ctx.Node.Spec.Labels
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) Hostname() string {
|
||||
return ctx.Node.Description.Hostname
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) CreatedAt() string {
|
||||
return command.PrettyPrint(ctx.Node.CreatedAt)
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) StatusState() string {
|
||||
return command.PrettyPrint(ctx.Node.Status.State)
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) HasStatusMessage() bool {
|
||||
return ctx.Node.Status.Message != ""
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) StatusMessage() string {
|
||||
return command.PrettyPrint(ctx.Node.Status.Message)
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) SpecAvailability() string {
|
||||
return command.PrettyPrint(ctx.Node.Spec.Availability)
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) HasStatusAddr() bool {
|
||||
return ctx.Node.Status.Addr != ""
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) StatusAddr() string {
|
||||
return ctx.Node.Status.Addr
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) HasManagerStatus() bool {
|
||||
return ctx.Node.ManagerStatus != nil
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) ManagerStatusAddr() string {
|
||||
return ctx.Node.ManagerStatus.Addr
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) ManagerStatusReachability() string {
|
||||
return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability)
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) IsManagerStatusLeader() bool {
|
||||
return ctx.Node.ManagerStatus.Leader
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) PlatformOS() string {
|
||||
return ctx.Node.Description.Platform.OS
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) PlatformArchitecture() string {
|
||||
return ctx.Node.Description.Platform.Architecture
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) ResourceNanoCPUs() int {
|
||||
if ctx.Node.Description.Resources.NanoCPUs == 0 {
|
||||
return int(0)
|
||||
}
|
||||
return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) ResourceMemory() string {
|
||||
if ctx.Node.Description.Resources.MemoryBytes == 0 {
|
||||
return ""
|
||||
}
|
||||
return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes))
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) HasEnginePlugins() bool {
|
||||
return len(ctx.Node.Description.Engine.Plugins) > 0
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) EnginePlugins() map[string]string {
|
||||
pluginMap := map[string][]string{}
|
||||
for _, p := range ctx.Node.Description.Engine.Plugins {
|
||||
pluginMap[p.Type] = append(pluginMap[p.Type], p.Name)
|
||||
}
|
||||
|
||||
pluginNamesByType := map[string]string{}
|
||||
for k, v := range pluginMap {
|
||||
pluginNamesByType[k] = strings.Join(v, ", ")
|
||||
}
|
||||
return pluginNamesByType
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) EngineLabels() map[string]string {
|
||||
return ctx.Node.Description.Engine.Labels
|
||||
}
|
||||
|
||||
func (ctx *nodeInspectContext) EngineVersion() string {
|
||||
return ctx.Node.Description.Engine.EngineVersion
|
||||
}
|
|
@ -1,188 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNodeContext(t *testing.T) {
|
||||
nodeID := stringid.GenerateRandomID()
|
||||
|
||||
var ctx nodeContext
|
||||
cases := []struct {
|
||||
nodeCtx nodeContext
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{nodeContext{
|
||||
n: swarm.Node{ID: nodeID},
|
||||
}, nodeID, ctx.ID},
|
||||
{nodeContext{
|
||||
n: swarm.Node{Description: swarm.NodeDescription{Hostname: "node_hostname"}},
|
||||
}, "node_hostname", ctx.Hostname},
|
||||
{nodeContext{
|
||||
n: swarm.Node{Status: swarm.NodeStatus{State: swarm.NodeState("foo")}},
|
||||
}, "Foo", ctx.Status},
|
||||
{nodeContext{
|
||||
n: swarm.Node{Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}},
|
||||
}, "Drain", ctx.Availability},
|
||||
{nodeContext{
|
||||
n: swarm.Node{ManagerStatus: &swarm.ManagerStatus{Leader: true}},
|
||||
}, "Leader", ctx.ManagerStatus},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.nodeCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewNodeFormat("table", false)},
|
||||
`ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||
nodeID1 foobar_baz Foo Drain Leader
|
||||
nodeID2 foobar_bar Bar Active Reachable
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNodeFormat("table", true)},
|
||||
`nodeID1
|
||||
nodeID2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNodeFormat("table {{.Hostname}}", false)},
|
||||
`HOSTNAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNodeFormat("table {{.Hostname}}", true)},
|
||||
`HOSTNAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewNodeFormat("raw", false)},
|
||||
`node_id: nodeID1
|
||||
hostname: foobar_baz
|
||||
status: Foo
|
||||
availability: Drain
|
||||
manager_status: Leader
|
||||
|
||||
node_id: nodeID2
|
||||
hostname: foobar_bar
|
||||
status: Bar
|
||||
availability: Active
|
||||
manager_status: Reachable
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewNodeFormat("raw", true)},
|
||||
`node_id: nodeID1
|
||||
node_id: nodeID2
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewNodeFormat("{{.Hostname}}", false)},
|
||||
`foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
nodes := []swarm.Node{
|
||||
{ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}, Status: swarm.NodeStatus{State: swarm.NodeState("foo")}, Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("drain")}, ManagerStatus: &swarm.ManagerStatus{Leader: true}},
|
||||
{ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}, Status: swarm.NodeStatus{State: swarm.NodeState("bar")}, Spec: swarm.NodeSpec{Availability: swarm.NodeAvailability("active")}, ManagerStatus: &swarm.ManagerStatus{Leader: false, Reachability: swarm.Reachability("Reachable")}},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := NodeWrite(testcase.context, nodes, types.Info{})
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeContextWriteJSON(t *testing.T) {
|
||||
nodes := []swarm.Node{
|
||||
{ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}},
|
||||
{ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false},
|
||||
{"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false},
|
||||
}
|
||||
|
||||
out := bytes.NewBufferString("")
|
||||
err := NodeWrite(Context{Format: "{{json .}}", Output: out}, nodes, types.Info{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeContextWriteJSONField(t *testing.T) {
|
||||
nodes := []swarm.Node{
|
||||
{ID: "nodeID1", Description: swarm.NodeDescription{Hostname: "foobar_baz"}},
|
||||
{ID: "nodeID2", Description: swarm.NodeDescription{Hostname: "foobar_bar"}},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := NodeWrite(Context{Format: "{{json .ID}}", Output: out}, nodes, types.Info{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, nodes[i].ID, s)
|
||||
}
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPluginTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Description}}\t{{.Enabled}}"
|
||||
|
||||
pluginIDHeader = "ID"
|
||||
descriptionHeader = "DESCRIPTION"
|
||||
enabledHeader = "ENABLED"
|
||||
)
|
||||
|
||||
// NewPluginFormat returns a Format for rendering using a plugin Context
|
||||
func NewPluginFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultPluginTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `plugin_id: {{.ID}}`
|
||||
}
|
||||
return `plugin_id: {{.ID}}\nname: {{.Name}}\ndescription: {{.Description}}\nenabled: {{.Enabled}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// PluginWrite writes the context
|
||||
func PluginWrite(ctx Context, plugins []*types.Plugin) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, plugin := range plugins {
|
||||
pluginCtx := &pluginContext{trunc: ctx.Trunc, p: *plugin}
|
||||
if err := format(pluginCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
pluginCtx := pluginContext{}
|
||||
pluginCtx.header = map[string]string{
|
||||
"ID": pluginIDHeader,
|
||||
"Name": nameHeader,
|
||||
"Description": descriptionHeader,
|
||||
"Enabled": enabledHeader,
|
||||
"PluginReference": imageHeader,
|
||||
}
|
||||
return ctx.Write(&pluginCtx, render)
|
||||
}
|
||||
|
||||
type pluginContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
p types.Plugin
|
||||
}
|
||||
|
||||
func (c *pluginContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *pluginContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.p.ID)
|
||||
}
|
||||
return c.p.ID
|
||||
}
|
||||
|
||||
func (c *pluginContext) Name() string {
|
||||
return c.p.Name
|
||||
}
|
||||
|
||||
func (c *pluginContext) Description() string {
|
||||
desc := strings.Replace(c.p.Config.Description, "\n", "", -1)
|
||||
desc = strings.Replace(desc, "\r", "", -1)
|
||||
if c.trunc {
|
||||
desc = stringutils.Ellipsis(desc, 45)
|
||||
}
|
||||
|
||||
return desc
|
||||
}
|
||||
|
||||
func (c *pluginContext) Enabled() bool {
|
||||
return c.p.Enabled
|
||||
}
|
||||
|
||||
func (c *pluginContext) PluginReference() string {
|
||||
return c.p.PluginReference
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPluginContext(t *testing.T) {
|
||||
pluginID := stringid.GenerateRandomID()
|
||||
|
||||
var ctx pluginContext
|
||||
cases := []struct {
|
||||
pluginCtx pluginContext
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{pluginContext{
|
||||
p: types.Plugin{ID: pluginID},
|
||||
trunc: false,
|
||||
}, pluginID, ctx.ID},
|
||||
{pluginContext{
|
||||
p: types.Plugin{ID: pluginID},
|
||||
trunc: true,
|
||||
}, stringid.TruncateID(pluginID), ctx.ID},
|
||||
{pluginContext{
|
||||
p: types.Plugin{Name: "plugin_name"},
|
||||
}, "plugin_name", ctx.Name},
|
||||
{pluginContext{
|
||||
p: types.Plugin{Config: types.PluginConfig{Description: "plugin_description"}},
|
||||
}, "plugin_description", ctx.Description},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.pluginCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewPluginFormat("table", false)},
|
||||
`ID NAME DESCRIPTION ENABLED
|
||||
pluginID1 foobar_baz description 1 true
|
||||
pluginID2 foobar_bar description 2 false
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewPluginFormat("table", true)},
|
||||
`pluginID1
|
||||
pluginID2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewPluginFormat("table {{.Name}}", false)},
|
||||
`NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewPluginFormat("table {{.Name}}", true)},
|
||||
`NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewPluginFormat("raw", false)},
|
||||
`plugin_id: pluginID1
|
||||
name: foobar_baz
|
||||
description: description 1
|
||||
enabled: true
|
||||
|
||||
plugin_id: pluginID2
|
||||
name: foobar_bar
|
||||
description: description 2
|
||||
enabled: false
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewPluginFormat("raw", true)},
|
||||
`plugin_id: pluginID1
|
||||
plugin_id: pluginID2
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewPluginFormat("{{.Name}}", false)},
|
||||
`foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
plugins := []*types.Plugin{
|
||||
{ID: "pluginID1", Name: "foobar_baz", Config: types.PluginConfig{Description: "description 1"}, Enabled: true},
|
||||
{ID: "pluginID2", Name: "foobar_bar", Config: types.PluginConfig{Description: "description 2"}, Enabled: false},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := PluginWrite(testcase.context, plugins)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginContextWriteJSON(t *testing.T) {
|
||||
plugins := []*types.Plugin{
|
||||
{ID: "pluginID1", Name: "foobar_baz"},
|
||||
{ID: "pluginID2", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"Description": "", "Enabled": false, "ID": "pluginID1", "Name": "foobar_baz", "PluginReference": ""},
|
||||
{"Description": "", "Enabled": false, "ID": "pluginID2", "Name": "foobar_bar", "PluginReference": ""},
|
||||
}
|
||||
|
||||
out := bytes.NewBufferString("")
|
||||
err := PluginWrite(Context{Format: "{{json .}}", Output: out}, plugins)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginContextWriteJSONField(t *testing.T) {
|
||||
plugins := []*types.Plugin{
|
||||
{ID: "pluginID1", Name: "foobar_baz"},
|
||||
{ID: "pluginID2", Name: "foobar_bar"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := PluginWrite(Context{Format: "{{json .ID}}", Output: out}, plugins)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, plugins[i].ID, s)
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"unicode"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func marshalJSON(x interface{}) ([]byte, error) {
|
||||
m, err := marshalMap(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
// marshalMap marshals x to map[string]interface{}
|
||||
func marshalMap(x interface{}) (map[string]interface{}, error) {
|
||||
val := reflect.ValueOf(x)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind())
|
||||
}
|
||||
if val.IsNil() {
|
||||
return nil, errors.Errorf("expected a pointer to a struct, got nil pointer")
|
||||
}
|
||||
valElem := val.Elem()
|
||||
if valElem.Kind() != reflect.Struct {
|
||||
return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind())
|
||||
}
|
||||
typ := val.Type()
|
||||
m := make(map[string]interface{})
|
||||
for i := 0; i < val.NumMethod(); i++ {
|
||||
k, v, err := marshalForMethod(typ.Method(i), val.Method(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k != "" {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var unmarshallableNames = map[string]struct{}{"FullHeader": {}}
|
||||
|
||||
// marshalForMethod returns the map key and the map value for marshalling the method.
|
||||
// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()")
|
||||
func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) {
|
||||
if val.Kind() != reflect.Func {
|
||||
return "", nil, errors.Errorf("expected func, got %v", val.Kind())
|
||||
}
|
||||
name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut()
|
||||
_, blackListed := unmarshallableNames[name]
|
||||
// FIXME: In text/template, (numOut == 2) is marshallable,
|
||||
// if the type of the second param is error.
|
||||
marshallable := unicode.IsUpper(rune(name[0])) && !blackListed &&
|
||||
numIn == 0 && numOut == 1
|
||||
if !marshallable {
|
||||
return "", nil, nil
|
||||
}
|
||||
result := val.Call(make([]reflect.Value, numIn))
|
||||
intf := result[0].Interface()
|
||||
return name, intf, nil
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type dummy struct {
|
||||
}
|
||||
|
||||
func (d *dummy) Func1() string {
|
||||
return "Func1"
|
||||
}
|
||||
|
||||
func (d *dummy) func2() string {
|
||||
return "func2(should not be marshalled)"
|
||||
}
|
||||
|
||||
func (d *dummy) Func3() (string, int) {
|
||||
return "Func3(should not be marshalled)", -42
|
||||
}
|
||||
|
||||
func (d *dummy) Func4() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
type dummyType string
|
||||
|
||||
func (d *dummy) Func5() dummyType {
|
||||
return dummyType("Func5")
|
||||
}
|
||||
|
||||
func (d *dummy) FullHeader() string {
|
||||
return "FullHeader(should not be marshalled)"
|
||||
}
|
||||
|
||||
var dummyExpected = map[string]interface{}{
|
||||
"Func1": "Func1",
|
||||
"Func4": 4,
|
||||
"Func5": dummyType("Func5"),
|
||||
}
|
||||
|
||||
func TestMarshalMap(t *testing.T) {
|
||||
d := dummy{}
|
||||
m, err := marshalMap(&d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(dummyExpected, m) {
|
||||
t.Fatalf("expected %+v, got %+v",
|
||||
dummyExpected, m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalMapBad(t *testing.T) {
|
||||
if _, err := marshalMap(nil); err == nil {
|
||||
t.Fatal("expected an error (argument is nil)")
|
||||
}
|
||||
if _, err := marshalMap(dummy{}); err == nil {
|
||||
t.Fatal("expected an error (argument is non-pointer)")
|
||||
}
|
||||
x := 42
|
||||
if _, err := marshalMap(&x); err == nil {
|
||||
t.Fatal("expected an error (argument is a pointer to non-struct)")
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSecretTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}"
|
||||
secretIDHeader = "ID"
|
||||
secretNameHeader = "NAME"
|
||||
secretCreatedHeader = "CREATED"
|
||||
secretUpdatedHeader = "UPDATED"
|
||||
)
|
||||
|
||||
// NewSecretFormat returns a Format for rendering using a network Context
|
||||
func NewSecretFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultSecretTableFormat
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// SecretWrite writes the context
|
||||
func SecretWrite(ctx Context, secrets []swarm.Secret) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, secret := range secrets {
|
||||
secretCtx := &secretContext{s: secret}
|
||||
if err := format(secretCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newSecretContext(), render)
|
||||
}
|
||||
|
||||
func newSecretContext() *secretContext {
|
||||
sCtx := &secretContext{}
|
||||
|
||||
sCtx.header = map[string]string{
|
||||
"ID": secretIDHeader,
|
||||
"Name": nameHeader,
|
||||
"CreatedAt": secretCreatedHeader,
|
||||
"UpdatedAt": secretUpdatedHeader,
|
||||
"Labels": labelsHeader,
|
||||
}
|
||||
return sCtx
|
||||
}
|
||||
|
||||
type secretContext struct {
|
||||
HeaderContext
|
||||
s swarm.Secret
|
||||
}
|
||||
|
||||
func (c *secretContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *secretContext) ID() string {
|
||||
return c.s.ID
|
||||
}
|
||||
|
||||
func (c *secretContext) Name() string {
|
||||
return c.s.Spec.Annotations.Name
|
||||
}
|
||||
|
||||
func (c *secretContext) CreatedAt() string {
|
||||
return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.CreatedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *secretContext) UpdatedAt() string {
|
||||
return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.UpdatedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *secretContext) Labels() string {
|
||||
mapLabels := c.s.Spec.Annotations.Labels
|
||||
if mapLabels == nil {
|
||||
return ""
|
||||
}
|
||||
var joinLabels []string
|
||||
for k, v := range mapLabels {
|
||||
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return strings.Join(joinLabels, ",")
|
||||
}
|
||||
|
||||
func (c *secretContext) Label(name string) string {
|
||||
if c.s.Spec.Annotations.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
return c.s.Spec.Annotations.Labels[name]
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSecretContextFormatWrite(t *testing.T) {
|
||||
// Check default output format (verbose and non-verbose mode) for table headers
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{Context{Format: NewSecretFormat("table", false)},
|
||||
`ID NAME CREATED UPDATED
|
||||
1 passwords Less than a second ago Less than a second ago
|
||||
2 id_rsa Less than a second ago Less than a second ago
|
||||
`},
|
||||
{Context{Format: NewSecretFormat("table {{.Name}}", true)},
|
||||
`NAME
|
||||
passwords
|
||||
id_rsa
|
||||
`},
|
||||
{Context{Format: NewSecretFormat("{{.ID}}-{{.Name}}", false)},
|
||||
`1-passwords
|
||||
2-id_rsa
|
||||
`},
|
||||
}
|
||||
|
||||
secrets := []swarm.Secret{
|
||||
{ID: "1",
|
||||
Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()},
|
||||
Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "passwords"}}},
|
||||
{ID: "2",
|
||||
Meta: swarm.Meta{CreatedAt: time.Now(), UpdatedAt: time.Now()},
|
||||
Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "id_rsa"}}},
|
||||
}
|
||||
for _, testcase := range cases {
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
if err := SecretWrite(testcase.context, secrets); err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,535 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/command/inspect"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const serviceInspectPrettyTemplate Format = `
|
||||
ID: {{.ID}}
|
||||
Name: {{.Name}}
|
||||
{{- if .Labels }}
|
||||
Labels:
|
||||
{{- range $k, $v := .Labels }}
|
||||
{{ $k }}{{if $v }}={{ $v }}{{ end }}
|
||||
{{- end }}{{ end }}
|
||||
Service Mode:
|
||||
{{- if .IsModeGlobal }} Global
|
||||
{{- else if .IsModeReplicated }} Replicated
|
||||
{{- if .ModeReplicatedReplicas }}
|
||||
Replicas: {{ .ModeReplicatedReplicas }}
|
||||
{{- end }}{{ end }}
|
||||
{{- if .HasUpdateStatus }}
|
||||
UpdateStatus:
|
||||
State: {{ .UpdateStatusState }}
|
||||
{{- if .HasUpdateStatusStarted }}
|
||||
Started: {{ .UpdateStatusStarted }}
|
||||
{{- end }}
|
||||
{{- if .UpdateIsCompleted }}
|
||||
Completed: {{ .UpdateStatusCompleted }}
|
||||
{{- end }}
|
||||
Message: {{ .UpdateStatusMessage }}
|
||||
{{- end }}
|
||||
Placement:
|
||||
{{- if .TaskPlacementConstraints }}
|
||||
Constraints: {{ .TaskPlacementConstraints }}
|
||||
{{- end }}
|
||||
{{- if .TaskPlacementPreferences }}
|
||||
Preferences: {{ .TaskPlacementPreferences }}
|
||||
{{- end }}
|
||||
{{- if .HasUpdateConfig }}
|
||||
UpdateConfig:
|
||||
Parallelism: {{ .UpdateParallelism }}
|
||||
{{- if .HasUpdateDelay}}
|
||||
Delay: {{ .UpdateDelay }}
|
||||
{{- end }}
|
||||
On failure: {{ .UpdateOnFailure }}
|
||||
{{- if .HasUpdateMonitor}}
|
||||
Monitoring Period: {{ .UpdateMonitor }}
|
||||
{{- end }}
|
||||
Max failure ratio: {{ .UpdateMaxFailureRatio }}
|
||||
Update order: {{ .UpdateOrder }}
|
||||
{{- end }}
|
||||
{{- if .HasRollbackConfig }}
|
||||
RollbackConfig:
|
||||
Parallelism: {{ .RollbackParallelism }}
|
||||
{{- if .HasRollbackDelay}}
|
||||
Delay: {{ .RollbackDelay }}
|
||||
{{- end }}
|
||||
On failure: {{ .RollbackOnFailure }}
|
||||
{{- if .HasRollbackMonitor}}
|
||||
Monitoring Period: {{ .RollbackMonitor }}
|
||||
{{- end }}
|
||||
Max failure ratio: {{ .RollbackMaxFailureRatio }}
|
||||
Rollback order: {{ .RollbackOrder }}
|
||||
{{- end }}
|
||||
ContainerSpec:
|
||||
Image: {{ .ContainerImage }}
|
||||
{{- if .ContainerArgs }}
|
||||
Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }}
|
||||
{{- end -}}
|
||||
{{- if .ContainerEnv }}
|
||||
Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }}
|
||||
{{- end -}}
|
||||
{{- if .ContainerWorkDir }}
|
||||
Dir: {{ .ContainerWorkDir }}
|
||||
{{- end -}}
|
||||
{{- if .ContainerUser }}
|
||||
User: {{ .ContainerUser }}
|
||||
{{- end }}
|
||||
{{- if .ContainerMounts }}
|
||||
Mounts:
|
||||
{{- end }}
|
||||
{{- range $mount := .ContainerMounts }}
|
||||
Target = {{ $mount.Target }}
|
||||
Source = {{ $mount.Source }}
|
||||
ReadOnly = {{ $mount.ReadOnly }}
|
||||
Type = {{ $mount.Type }}
|
||||
{{- end -}}
|
||||
{{- if .HasResources }}
|
||||
Resources:
|
||||
{{- if .HasResourceReservations }}
|
||||
Reservations:
|
||||
{{- if gt .ResourceReservationNanoCPUs 0.0 }}
|
||||
CPU: {{ .ResourceReservationNanoCPUs }}
|
||||
{{- end }}
|
||||
{{- if .ResourceReservationMemory }}
|
||||
Memory: {{ .ResourceReservationMemory }}
|
||||
{{- end }}{{ end }}
|
||||
{{- if .HasResourceLimits }}
|
||||
Limits:
|
||||
{{- if gt .ResourceLimitsNanoCPUs 0.0 }}
|
||||
CPU: {{ .ResourceLimitsNanoCPUs }}
|
||||
{{- end }}
|
||||
{{- if .ResourceLimitMemory }}
|
||||
Memory: {{ .ResourceLimitMemory }}
|
||||
{{- end }}{{ end }}{{ end }}
|
||||
{{- if .Networks }}
|
||||
Networks:
|
||||
{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }}
|
||||
Endpoint Mode: {{ .EndpointMode }}
|
||||
{{- if .Ports }}
|
||||
Ports:
|
||||
{{- range $port := .Ports }}
|
||||
PublishedPort = {{ $port.PublishedPort }}
|
||||
Protocol = {{ $port.Protocol }}
|
||||
TargetPort = {{ $port.TargetPort }}
|
||||
PublishMode = {{ $port.PublishMode }}
|
||||
{{- end }} {{ end -}}
|
||||
`
|
||||
|
||||
// NewServiceFormat returns a Format for rendering using a Context
|
||||
func NewServiceFormat(source string) Format {
|
||||
switch source {
|
||||
case PrettyFormatKey:
|
||||
return serviceInspectPrettyTemplate
|
||||
default:
|
||||
return Format(strings.TrimPrefix(source, RawFormatKey))
|
||||
}
|
||||
}
|
||||
|
||||
func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string {
|
||||
networkNames := make(map[string]string)
|
||||
for _, network := range service.Spec.TaskTemplate.Networks {
|
||||
if resolved, _, err := getNetwork(network.Target); err == nil {
|
||||
if resolvedNetwork, ok := resolved.(types.NetworkResource); ok {
|
||||
networkNames[resolvedNetwork.ID] = resolvedNetwork.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return networkNames
|
||||
}
|
||||
|
||||
// ServiceInspectWrite renders the context for a list of services
|
||||
func ServiceInspectWrite(ctx Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error {
|
||||
if ctx.Format != serviceInspectPrettyTemplate {
|
||||
return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef)
|
||||
}
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, ref := range refs {
|
||||
serviceI, _, err := getRef(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service, ok := serviceI.(swarm.Service)
|
||||
if !ok {
|
||||
return errors.Errorf("got wrong object to inspect")
|
||||
}
|
||||
if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(&serviceInspectContext{}, render)
|
||||
}
|
||||
|
||||
type serviceInspectContext struct {
|
||||
swarm.Service
|
||||
subContext
|
||||
|
||||
// networkNames is a map from network IDs (as found in
|
||||
// Networks[x].Target) to network names.
|
||||
networkNames map[string]string
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(ctx)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ID() string {
|
||||
return ctx.Service.ID
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) Name() string {
|
||||
return ctx.Service.Spec.Name
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) Labels() map[string]string {
|
||||
return ctx.Service.Spec.Labels
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) IsModeGlobal() bool {
|
||||
return ctx.Service.Spec.Mode.Global != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) IsModeReplicated() bool {
|
||||
return ctx.Service.Spec.Mode.Replicated != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 {
|
||||
return ctx.Service.Spec.Mode.Replicated.Replicas
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateStatus() bool {
|
||||
return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != ""
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState {
|
||||
return ctx.Service.UpdateStatus.State
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool {
|
||||
return ctx.Service.UpdateStatus.StartedAt != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateStatusStarted() string {
|
||||
return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateIsCompleted() bool {
|
||||
return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateStatusCompleted() string {
|
||||
return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateStatusMessage() string {
|
||||
return ctx.Service.UpdateStatus.Message
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) TaskPlacementConstraints() []string {
|
||||
if ctx.Service.Spec.TaskTemplate.Placement != nil {
|
||||
return ctx.Service.Spec.TaskTemplate.Placement.Constraints
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
|
||||
if ctx.Service.Spec.TaskTemplate.Placement == nil {
|
||||
return nil
|
||||
}
|
||||
var strings []string
|
||||
for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences {
|
||||
if pref.Spread != nil {
|
||||
strings = append(strings, "spread="+pref.Spread.SpreadDescriptor)
|
||||
}
|
||||
}
|
||||
return strings
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateConfig() bool {
|
||||
return ctx.Service.Spec.UpdateConfig != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateParallelism() uint64 {
|
||||
return ctx.Service.Spec.UpdateConfig.Parallelism
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateDelay() bool {
|
||||
return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateDelay() time.Duration {
|
||||
return ctx.Service.Spec.UpdateConfig.Delay
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOnFailure() string {
|
||||
return ctx.Service.Spec.UpdateConfig.FailureAction
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOrder() string {
|
||||
return ctx.Service.Spec.UpdateConfig.Order
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateMonitor() bool {
|
||||
return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateMonitor() time.Duration {
|
||||
return ctx.Service.Spec.UpdateConfig.Monitor
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 {
|
||||
return ctx.Service.Spec.UpdateConfig.MaxFailureRatio
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasRollbackConfig() bool {
|
||||
return ctx.Service.Spec.RollbackConfig != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackParallelism() uint64 {
|
||||
return ctx.Service.Spec.RollbackConfig.Parallelism
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasRollbackDelay() bool {
|
||||
return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackDelay() time.Duration {
|
||||
return ctx.Service.Spec.RollbackConfig.Delay
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOnFailure() string {
|
||||
return ctx.Service.Spec.RollbackConfig.FailureAction
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasRollbackMonitor() bool {
|
||||
return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackMonitor() time.Duration {
|
||||
return ctx.Service.Spec.RollbackConfig.Monitor
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 {
|
||||
return ctx.Service.Spec.RollbackConfig.MaxFailureRatio
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOrder() string {
|
||||
return ctx.Service.Spec.RollbackConfig.Order
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerImage() string {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerArgs() []string {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerEnv() []string {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerWorkDir() string {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerUser() string {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.User
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount {
|
||||
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasResources() bool {
|
||||
return ctx.Service.Spec.TaskTemplate.Resources != nil
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasResourceReservations() bool {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil {
|
||||
return false
|
||||
}
|
||||
return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 {
|
||||
return float64(0)
|
||||
}
|
||||
return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceReservationMemory() string {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 {
|
||||
return ""
|
||||
}
|
||||
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes))
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasResourceLimits() bool {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil {
|
||||
return false
|
||||
}
|
||||
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 {
|
||||
return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ResourceLimitMemory() string {
|
||||
if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 {
|
||||
return ""
|
||||
}
|
||||
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes))
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) Networks() []string {
|
||||
var out []string
|
||||
for _, n := range ctx.Service.Spec.TaskTemplate.Networks {
|
||||
if name, ok := ctx.networkNames[n.Target]; ok {
|
||||
out = append(out, name)
|
||||
} else {
|
||||
out = append(out, n.Target)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) EndpointMode() string {
|
||||
if ctx.Service.Spec.EndpointSpec == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(ctx.Service.Spec.EndpointSpec.Mode)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) Ports() []swarm.PortConfig {
|
||||
return ctx.Service.Endpoint.Ports
|
||||
}
|
||||
|
||||
const (
|
||||
defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}"
|
||||
|
||||
serviceIDHeader = "ID"
|
||||
modeHeader = "MODE"
|
||||
replicasHeader = "REPLICAS"
|
||||
)
|
||||
|
||||
// NewServiceListFormat returns a Format for rendering using a service Context
|
||||
func NewServiceListFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultServiceTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `id: {{.ID}}`
|
||||
}
|
||||
return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// ServiceListInfo stores the information about mode and replicas to be used by template
|
||||
type ServiceListInfo struct {
|
||||
Mode string
|
||||
Replicas string
|
||||
}
|
||||
|
||||
// ServiceListWrite writes the context
|
||||
func ServiceListWrite(ctx Context, services []swarm.Service, info map[string]ServiceListInfo) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, service := range services {
|
||||
serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas}
|
||||
if err := format(serviceCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
serviceCtx := serviceContext{}
|
||||
serviceCtx.header = map[string]string{
|
||||
"ID": serviceIDHeader,
|
||||
"Name": nameHeader,
|
||||
"Mode": modeHeader,
|
||||
"Replicas": replicasHeader,
|
||||
"Image": imageHeader,
|
||||
"Ports": portsHeader,
|
||||
}
|
||||
return ctx.Write(&serviceCtx, render)
|
||||
}
|
||||
|
||||
type serviceContext struct {
|
||||
HeaderContext
|
||||
service swarm.Service
|
||||
mode string
|
||||
replicas string
|
||||
}
|
||||
|
||||
func (c *serviceContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *serviceContext) ID() string {
|
||||
return stringid.TruncateID(c.service.ID)
|
||||
}
|
||||
|
||||
func (c *serviceContext) Name() string {
|
||||
return c.service.Spec.Name
|
||||
}
|
||||
|
||||
func (c *serviceContext) Mode() string {
|
||||
return c.mode
|
||||
}
|
||||
|
||||
func (c *serviceContext) Replicas() string {
|
||||
return c.replicas
|
||||
}
|
||||
|
||||
func (c *serviceContext) Image() string {
|
||||
image := c.service.Spec.TaskTemplate.ContainerSpec.Image
|
||||
if ref, err := reference.ParseNormalizedNamed(image); err == nil {
|
||||
// update image string for display, (strips any digest)
|
||||
if nt, ok := ref.(reference.NamedTagged); ok {
|
||||
if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil {
|
||||
image = reference.FamiliarString(namedTagged)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return image
|
||||
}
|
||||
|
||||
func (c *serviceContext) Ports() string {
|
||||
if c.service.Spec.EndpointSpec == nil || c.service.Spec.EndpointSpec.Ports == nil {
|
||||
return ""
|
||||
}
|
||||
ports := []string{}
|
||||
for _, pConfig := range c.service.Spec.EndpointSpec.Ports {
|
||||
if pConfig.PublishMode == swarm.PortConfigPublishModeIngress {
|
||||
ports = append(ports, fmt.Sprintf("*:%d->%d/%s",
|
||||
pConfig.PublishedPort,
|
||||
pConfig.TargetPort,
|
||||
pConfig.Protocol,
|
||||
))
|
||||
}
|
||||
}
|
||||
return strings.Join(ports, ",")
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestServiceContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewServiceListFormat("table", false)},
|
||||
`ID NAME MODE REPLICAS IMAGE PORTS
|
||||
id_baz baz global 2/4 *:80->8080/tcp
|
||||
id_bar bar replicated 2/4 *:80->8080/tcp
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewServiceListFormat("table", true)},
|
||||
`id_baz
|
||||
id_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewServiceListFormat("table {{.Name}}", false)},
|
||||
`NAME
|
||||
baz
|
||||
bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewServiceListFormat("table {{.Name}}", true)},
|
||||
`NAME
|
||||
baz
|
||||
bar
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewServiceListFormat("raw", false)},
|
||||
`id: id_baz
|
||||
name: baz
|
||||
mode: global
|
||||
replicas: 2/4
|
||||
image:
|
||||
ports: *:80->8080/tcp
|
||||
|
||||
id: id_bar
|
||||
name: bar
|
||||
mode: replicated
|
||||
replicas: 2/4
|
||||
image:
|
||||
ports: *:80->8080/tcp
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewServiceListFormat("raw", true)},
|
||||
`id: id_baz
|
||||
id: id_bar
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewServiceListFormat("{{.Name}}", false)},
|
||||
`baz
|
||||
bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
services := []swarm.Service{
|
||||
{
|
||||
ID: "id_baz",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{Name: "baz"},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: "ingress",
|
||||
PublishedPort: 80,
|
||||
TargetPort: 8080,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "id_bar",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{Name: "bar"},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: "ingress",
|
||||
PublishedPort: 80,
|
||||
TargetPort: 8080,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
info := map[string]ServiceListInfo{
|
||||
"id_baz": {
|
||||
Mode: "global",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
"id_bar": {
|
||||
Mode: "replicated",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := ServiceListWrite(testcase.context, services, info)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceContextWriteJSON(t *testing.T) {
|
||||
services := []swarm.Service{
|
||||
{
|
||||
ID: "id_baz",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{Name: "baz"},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: "ingress",
|
||||
PublishedPort: 80,
|
||||
TargetPort: 8080,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "id_bar",
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{Name: "bar"},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
Ports: []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: "ingress",
|
||||
PublishedPort: 80,
|
||||
TargetPort: 8080,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
info := map[string]ServiceListInfo{
|
||||
"id_baz": {
|
||||
Mode: "global",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
"id_bar": {
|
||||
Mode: "replicated",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"ID": "id_baz", "Name": "baz", "Mode": "global", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"},
|
||||
{"ID": "id_bar", "Name": "bar", "Mode": "replicated", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"},
|
||||
}
|
||||
|
||||
out := bytes.NewBufferString("")
|
||||
err := ServiceListWrite(Context{Format: "{{json .}}", Output: out}, services, info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
func TestServiceContextWriteJSONField(t *testing.T) {
|
||||
services := []swarm.Service{
|
||||
{ID: "id_baz", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "baz"}}},
|
||||
{ID: "id_bar", Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "bar"}}},
|
||||
}
|
||||
info := map[string]ServiceListInfo{
|
||||
"id_baz": {
|
||||
Mode: "global",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
"id_bar": {
|
||||
Mode: "replicated",
|
||||
Replicas: "2/4",
|
||||
},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := ServiceListWrite(Context{Format: "{{json .Name}}", Output: out}, services, info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, services[i].Spec.Name, s)
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStackTableFormat = "table {{.Name}}\t{{.Services}}"
|
||||
|
||||
stackServicesHeader = "SERVICES"
|
||||
)
|
||||
|
||||
// Stack contains deployed stack information.
|
||||
type Stack struct {
|
||||
// Name is the name of the stack
|
||||
Name string
|
||||
// Services is the number of the services
|
||||
Services int
|
||||
}
|
||||
|
||||
// NewStackFormat returns a format for use with a stack Context
|
||||
func NewStackFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
return defaultStackTableFormat
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// StackWrite writes formatted stacks using the Context
|
||||
func StackWrite(ctx Context, stacks []*Stack) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, stack := range stacks {
|
||||
if err := format(&stackContext{s: stack}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newStackContext(), render)
|
||||
}
|
||||
|
||||
type stackContext struct {
|
||||
HeaderContext
|
||||
s *Stack
|
||||
}
|
||||
|
||||
func newStackContext() *stackContext {
|
||||
stackCtx := stackContext{}
|
||||
stackCtx.header = map[string]string{
|
||||
"Name": nameHeader,
|
||||
"Services": stackServicesHeader,
|
||||
}
|
||||
return &stackCtx
|
||||
}
|
||||
|
||||
func (s *stackContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(s)
|
||||
}
|
||||
|
||||
func (s *stackContext) Name() string {
|
||||
return s.s.Name
|
||||
}
|
||||
|
||||
func (s *stackContext) Services() string {
|
||||
return strconv.Itoa(s.s.Services)
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStackContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewStackFormat("table")},
|
||||
`NAME SERVICES
|
||||
baz 2
|
||||
bar 1
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewStackFormat("table {{.Name}}")},
|
||||
`NAME
|
||||
baz
|
||||
bar
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewStackFormat("{{.Name}}")},
|
||||
`baz
|
||||
bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
stacks := []*Stack{
|
||||
{Name: "baz", Services: 2},
|
||||
{Name: "bar", Services: 1},
|
||||
}
|
||||
for _, testcase := range cases {
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := StackWrite(testcase.context, stacks)
|
||||
if err != nil {
|
||||
assert.Error(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, out.String(), testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,220 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
winOSType = "windows"
|
||||
defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}"
|
||||
winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}"
|
||||
|
||||
containerHeader = "CONTAINER"
|
||||
cpuPercHeader = "CPU %"
|
||||
netIOHeader = "NET I/O"
|
||||
blockIOHeader = "BLOCK I/O"
|
||||
memPercHeader = "MEM %" // Used only on Linux
|
||||
winMemUseHeader = "PRIV WORKING SET" // Used only on Windows
|
||||
memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux
|
||||
pidsHeader = "PIDS" // Used only on Linux
|
||||
)
|
||||
|
||||
// StatsEntry represents represents the statistics data collected from a container
|
||||
type StatsEntry struct {
|
||||
Container string
|
||||
Name string
|
||||
ID string
|
||||
CPUPercentage float64
|
||||
Memory float64 // On Windows this is the private working set
|
||||
MemoryLimit float64 // Not used on Windows
|
||||
MemoryPercentage float64 // Not used on Windows
|
||||
NetworkRx float64
|
||||
NetworkTx float64
|
||||
BlockRead float64
|
||||
BlockWrite float64
|
||||
PidsCurrent uint64 // Not used on Windows
|
||||
IsInvalid bool
|
||||
}
|
||||
|
||||
// ContainerStats represents an entity to store containers statistics synchronously
|
||||
type ContainerStats struct {
|
||||
mutex sync.Mutex
|
||||
StatsEntry
|
||||
err error
|
||||
}
|
||||
|
||||
// GetError returns the container statistics error.
|
||||
// This is used to determine whether the statistics are valid or not
|
||||
func (cs *ContainerStats) GetError() error {
|
||||
cs.mutex.Lock()
|
||||
defer cs.mutex.Unlock()
|
||||
return cs.err
|
||||
}
|
||||
|
||||
// SetErrorAndReset zeroes all the container statistics and store the error.
|
||||
// It is used when receiving time out error during statistics collecting to reduce lock overhead
|
||||
func (cs *ContainerStats) SetErrorAndReset(err error) {
|
||||
cs.mutex.Lock()
|
||||
defer cs.mutex.Unlock()
|
||||
cs.CPUPercentage = 0
|
||||
cs.Memory = 0
|
||||
cs.MemoryPercentage = 0
|
||||
cs.MemoryLimit = 0
|
||||
cs.NetworkRx = 0
|
||||
cs.NetworkTx = 0
|
||||
cs.BlockRead = 0
|
||||
cs.BlockWrite = 0
|
||||
cs.PidsCurrent = 0
|
||||
cs.err = err
|
||||
cs.IsInvalid = true
|
||||
}
|
||||
|
||||
// SetError sets container statistics error
|
||||
func (cs *ContainerStats) SetError(err error) {
|
||||
cs.mutex.Lock()
|
||||
defer cs.mutex.Unlock()
|
||||
cs.err = err
|
||||
if err != nil {
|
||||
cs.IsInvalid = true
|
||||
}
|
||||
}
|
||||
|
||||
// SetStatistics set the container statistics
|
||||
func (cs *ContainerStats) SetStatistics(s StatsEntry) {
|
||||
cs.mutex.Lock()
|
||||
defer cs.mutex.Unlock()
|
||||
s.Container = cs.Container
|
||||
cs.StatsEntry = s
|
||||
}
|
||||
|
||||
// GetStatistics returns container statistics with other meta data such as the container name
|
||||
func (cs *ContainerStats) GetStatistics() StatsEntry {
|
||||
cs.mutex.Lock()
|
||||
defer cs.mutex.Unlock()
|
||||
return cs.StatsEntry
|
||||
}
|
||||
|
||||
// NewStatsFormat returns a format for rendering an CStatsContext
|
||||
func NewStatsFormat(source, osType string) Format {
|
||||
if source == TableFormatKey {
|
||||
if osType == winOSType {
|
||||
return Format(winDefaultStatsTableFormat)
|
||||
}
|
||||
return Format(defaultStatsTableFormat)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// NewContainerStats returns a new ContainerStats entity and sets in it the given name
|
||||
func NewContainerStats(container, osType string) *ContainerStats {
|
||||
return &ContainerStats{
|
||||
StatsEntry: StatsEntry{Container: container},
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerStatsWrite renders the context for a list of containers statistics
|
||||
func ContainerStatsWrite(ctx Context, containerStats []StatsEntry, osType string) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, cstats := range containerStats {
|
||||
containerStatsCtx := &containerStatsContext{
|
||||
s: cstats,
|
||||
os: osType,
|
||||
}
|
||||
if err := format(containerStatsCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
memUsage := memUseHeader
|
||||
if osType == winOSType {
|
||||
memUsage = winMemUseHeader
|
||||
}
|
||||
containerStatsCtx := containerStatsContext{}
|
||||
containerStatsCtx.header = map[string]string{
|
||||
"Container": containerHeader,
|
||||
"Name": nameHeader,
|
||||
"ID": containerIDHeader,
|
||||
"CPUPerc": cpuPercHeader,
|
||||
"MemUsage": memUsage,
|
||||
"MemPerc": memPercHeader,
|
||||
"NetIO": netIOHeader,
|
||||
"BlockIO": blockIOHeader,
|
||||
"PIDs": pidsHeader,
|
||||
}
|
||||
containerStatsCtx.os = osType
|
||||
return ctx.Write(&containerStatsCtx, render)
|
||||
}
|
||||
|
||||
type containerStatsContext struct {
|
||||
HeaderContext
|
||||
s StatsEntry
|
||||
os string
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) Container() string {
|
||||
return c.s.Container
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) Name() string {
|
||||
if len(c.s.Name) > 1 {
|
||||
return c.s.Name[1:]
|
||||
}
|
||||
return "--"
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) ID() string {
|
||||
return c.s.ID
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) CPUPerc() string {
|
||||
if c.s.IsInvalid {
|
||||
return fmt.Sprintf("--")
|
||||
}
|
||||
return fmt.Sprintf("%.2f%%", c.s.CPUPercentage)
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) MemUsage() string {
|
||||
if c.s.IsInvalid {
|
||||
return fmt.Sprintf("-- / --")
|
||||
}
|
||||
if c.os == winOSType {
|
||||
return fmt.Sprintf("%s", units.BytesSize(c.s.Memory))
|
||||
}
|
||||
return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit))
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) MemPerc() string {
|
||||
if c.s.IsInvalid || c.os == winOSType {
|
||||
return fmt.Sprintf("--")
|
||||
}
|
||||
return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage)
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) NetIO() string {
|
||||
if c.s.IsInvalid {
|
||||
return fmt.Sprintf("--")
|
||||
}
|
||||
return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3))
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) BlockIO() string {
|
||||
if c.s.IsInvalid {
|
||||
return fmt.Sprintf("--")
|
||||
}
|
||||
return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3))
|
||||
}
|
||||
|
||||
func (c *containerStatsContext) PIDs() string {
|
||||
if c.s.IsInvalid || c.os == winOSType {
|
||||
return fmt.Sprintf("--")
|
||||
}
|
||||
return fmt.Sprintf("%d", c.s.PidsCurrent)
|
||||
}
|
|
@ -1,266 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestContainerStatsContext(t *testing.T) {
|
||||
containerID := stringid.GenerateRandomID()
|
||||
|
||||
var ctx containerStatsContext
|
||||
tt := []struct {
|
||||
stats StatsEntry
|
||||
osType string
|
||||
expValue string
|
||||
expHeader string
|
||||
call func() string
|
||||
}{
|
||||
{StatsEntry{Container: containerID}, "", containerID, containerHeader, ctx.Container},
|
||||
{StatsEntry{CPUPercentage: 5.5}, "", "5.50%", cpuPercHeader, ctx.CPUPerc},
|
||||
{StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "", "--", cpuPercHeader, ctx.CPUPerc},
|
||||
{StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "", "0.31B / 12.3B", netIOHeader, ctx.NetIO},
|
||||
{StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "", "--", netIOHeader, ctx.NetIO},
|
||||
{StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "", "0.1B / 2.3B", blockIOHeader, ctx.BlockIO},
|
||||
{StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "", "--", blockIOHeader, ctx.BlockIO},
|
||||
{StatsEntry{MemoryPercentage: 10.2}, "", "10.20%", memPercHeader, ctx.MemPerc},
|
||||
{StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "", "--", memPercHeader, ctx.MemPerc},
|
||||
{StatsEntry{MemoryPercentage: 10.2}, "windows", "--", memPercHeader, ctx.MemPerc},
|
||||
{StatsEntry{Memory: 24, MemoryLimit: 30}, "", "24B / 30B", memUseHeader, ctx.MemUsage},
|
||||
{StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "", "-- / --", memUseHeader, ctx.MemUsage},
|
||||
{StatsEntry{Memory: 24, MemoryLimit: 30}, "windows", "24B", winMemUseHeader, ctx.MemUsage},
|
||||
{StatsEntry{PidsCurrent: 10}, "", "10", pidsHeader, ctx.PIDs},
|
||||
{StatsEntry{PidsCurrent: 10, IsInvalid: true}, "", "--", pidsHeader, ctx.PIDs},
|
||||
{StatsEntry{PidsCurrent: 10}, "windows", "--", pidsHeader, ctx.PIDs},
|
||||
}
|
||||
|
||||
for _, te := range tt {
|
||||
ctx = containerStatsContext{s: te.stats, os: te.osType}
|
||||
if v := te.call(); v != te.expValue {
|
||||
t.Fatalf("Expected %q, got %q", te.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerStatsContextWrite(t *testing.T) {
|
||||
tt := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "table {{.MemUsage}}"},
|
||||
`MEM USAGE / LIMIT
|
||||
20B / 20B
|
||||
-- / --
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{.Container}} {{.ID}} {{.Name}}"},
|
||||
`container1 abcdef foo
|
||||
container2 --
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{.Container}} {{.CPUPerc}}"},
|
||||
`container1 20.00%
|
||||
container2 --
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, te := range tt {
|
||||
stats := []StatsEntry{
|
||||
{
|
||||
Container: "container1",
|
||||
ID: "abcdef",
|
||||
Name: "/foo",
|
||||
CPUPercentage: 20,
|
||||
Memory: 20,
|
||||
MemoryLimit: 20,
|
||||
MemoryPercentage: 20,
|
||||
NetworkRx: 20,
|
||||
NetworkTx: 20,
|
||||
BlockRead: 20,
|
||||
BlockWrite: 20,
|
||||
PidsCurrent: 2,
|
||||
IsInvalid: false,
|
||||
},
|
||||
{
|
||||
Container: "container2",
|
||||
CPUPercentage: 30,
|
||||
Memory: 30,
|
||||
MemoryLimit: 30,
|
||||
MemoryPercentage: 30,
|
||||
NetworkRx: 30,
|
||||
NetworkTx: 30,
|
||||
BlockRead: 30,
|
||||
BlockWrite: 30,
|
||||
PidsCurrent: 3,
|
||||
IsInvalid: true,
|
||||
},
|
||||
}
|
||||
var out bytes.Buffer
|
||||
te.context.Output = &out
|
||||
err := ContainerStatsWrite(te.context, stats, "linux")
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, te.expected)
|
||||
} else {
|
||||
assert.Equal(t, te.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerStatsContextWriteWindows(t *testing.T) {
|
||||
tt := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{Format: "table {{.MemUsage}}"},
|
||||
`PRIV WORKING SET
|
||||
20B
|
||||
-- / --
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{.Container}} {{.CPUPerc}}"},
|
||||
`container1 20.00%
|
||||
container2 --
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"},
|
||||
`container1 -- --
|
||||
container2 -- --
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, te := range tt {
|
||||
stats := []StatsEntry{
|
||||
{
|
||||
Container: "container1",
|
||||
CPUPercentage: 20,
|
||||
Memory: 20,
|
||||
MemoryLimit: 20,
|
||||
MemoryPercentage: 20,
|
||||
NetworkRx: 20,
|
||||
NetworkTx: 20,
|
||||
BlockRead: 20,
|
||||
BlockWrite: 20,
|
||||
PidsCurrent: 2,
|
||||
IsInvalid: false,
|
||||
},
|
||||
{
|
||||
Container: "container2",
|
||||
CPUPercentage: 30,
|
||||
Memory: 30,
|
||||
MemoryLimit: 30,
|
||||
MemoryPercentage: 30,
|
||||
NetworkRx: 30,
|
||||
NetworkTx: 30,
|
||||
BlockRead: 30,
|
||||
BlockWrite: 30,
|
||||
PidsCurrent: 3,
|
||||
IsInvalid: true,
|
||||
},
|
||||
}
|
||||
var out bytes.Buffer
|
||||
te.context.Output = &out
|
||||
err := ContainerStatsWrite(te.context, stats, "windows")
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, te.expected)
|
||||
} else {
|
||||
assert.Equal(t, te.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerStatsContextWriteWithNoStats(t *testing.T) {
|
||||
var out bytes.Buffer
|
||||
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Container}}",
|
||||
Output: &out,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Container}}",
|
||||
Output: &out,
|
||||
},
|
||||
"CONTAINER\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Container}}\t{{.CPUPerc}}",
|
||||
Output: &out,
|
||||
},
|
||||
"CONTAINER CPU %\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
ContainerStatsWrite(context.context, []StatsEntry{}, "linux")
|
||||
assert.Equal(t, context.expected, out.String())
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerStatsContextWriteWithNoStatsWindows(t *testing.T) {
|
||||
var out bytes.Buffer
|
||||
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Container}}",
|
||||
Output: &out,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Container}}\t{{.MemUsage}}",
|
||||
Output: &out,
|
||||
},
|
||||
"CONTAINER PRIV WORKING SET\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}",
|
||||
Output: &out,
|
||||
},
|
||||
"CONTAINER CPU % PRIV WORKING SET\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
ContainerStatsWrite(context.context, []StatsEntry{}, "windows")
|
||||
assert.Equal(t, context.expected, out.String())
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTaskTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Image}}\t{{.Node}}\t{{.DesiredState}}\t{{.CurrentState}}\t{{.Error}}\t{{.Ports}}"
|
||||
|
||||
nodeHeader = "NODE"
|
||||
taskIDHeader = "ID"
|
||||
desiredStateHeader = "DESIRED STATE"
|
||||
currentStateHeader = "CURRENT STATE"
|
||||
errorHeader = "ERROR"
|
||||
|
||||
maxErrLength = 30
|
||||
)
|
||||
|
||||
// NewTaskFormat returns a Format for rendering using a task Context
|
||||
func NewTaskFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return defaultTaskTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `id: {{.ID}}`
|
||||
}
|
||||
return `id: {{.ID}}\nname: {{.Name}}\nimage: {{.Image}}\nnode: {{.Node}}\ndesired_state: {{.DesiredState}}\ncurrent_state: {{.CurrentState}}\nerror: {{.Error}}\nports: {{.Ports}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// TaskWrite writes the context
|
||||
func TaskWrite(ctx Context, tasks []swarm.Task, names map[string]string, nodes map[string]string) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, task := range tasks {
|
||||
taskCtx := &taskContext{trunc: ctx.Trunc, task: task, name: names[task.ID], node: nodes[task.ID]}
|
||||
if err := format(taskCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
taskCtx := taskContext{}
|
||||
taskCtx.header = taskHeaderContext{
|
||||
"ID": taskIDHeader,
|
||||
"Name": nameHeader,
|
||||
"Image": imageHeader,
|
||||
"Node": nodeHeader,
|
||||
"DesiredState": desiredStateHeader,
|
||||
"CurrentState": currentStateHeader,
|
||||
"Error": errorHeader,
|
||||
"Ports": portsHeader,
|
||||
}
|
||||
return ctx.Write(&taskCtx, render)
|
||||
}
|
||||
|
||||
type taskHeaderContext map[string]string
|
||||
|
||||
type taskContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
task swarm.Task
|
||||
name string
|
||||
node string
|
||||
}
|
||||
|
||||
func (c *taskContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *taskContext) ID() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.task.ID)
|
||||
}
|
||||
return c.task.ID
|
||||
}
|
||||
|
||||
func (c *taskContext) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *taskContext) Image() string {
|
||||
image := c.task.Spec.ContainerSpec.Image
|
||||
if c.trunc {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err == nil {
|
||||
// update image string for display, (strips any digest)
|
||||
if nt, ok := ref.(reference.NamedTagged); ok {
|
||||
if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil {
|
||||
image = reference.FamiliarString(namedTagged)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return image
|
||||
}
|
||||
|
||||
func (c *taskContext) Node() string {
|
||||
return c.node
|
||||
}
|
||||
|
||||
func (c *taskContext) DesiredState() string {
|
||||
return command.PrettyPrint(c.task.DesiredState)
|
||||
}
|
||||
|
||||
func (c *taskContext) CurrentState() string {
|
||||
return fmt.Sprintf("%s %s ago",
|
||||
command.PrettyPrint(c.task.Status.State),
|
||||
strings.ToLower(units.HumanDuration(time.Since(c.task.Status.Timestamp))),
|
||||
)
|
||||
}
|
||||
|
||||
func (c *taskContext) Error() string {
|
||||
// Trim and quote the error message.
|
||||
taskErr := c.task.Status.Err
|
||||
if c.trunc && len(taskErr) > maxErrLength {
|
||||
taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1])
|
||||
}
|
||||
if len(taskErr) > 0 {
|
||||
taskErr = fmt.Sprintf("\"%s\"", taskErr)
|
||||
}
|
||||
return taskErr
|
||||
}
|
||||
|
||||
func (c *taskContext) Ports() string {
|
||||
if len(c.task.Status.PortStatus.Ports) == 0 {
|
||||
return ""
|
||||
}
|
||||
ports := []string{}
|
||||
for _, pConfig := range c.task.Status.PortStatus.Ports {
|
||||
ports = append(ports, fmt.Sprintf("*:%d->%d/%s",
|
||||
pConfig.PublishedPort,
|
||||
pConfig.TargetPort,
|
||||
pConfig.Protocol,
|
||||
))
|
||||
}
|
||||
return strings.Join(ports, ",")
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTaskContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewTaskFormat("table", true)},
|
||||
`taskID1
|
||||
taskID2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewTaskFormat("table {{.Name}}\t{{.Node}}\t{{.Ports}}", false)},
|
||||
`NAME NODE PORTS
|
||||
foobar_baz foo1
|
||||
foobar_bar foo2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewTaskFormat("table {{.Name}}", true)},
|
||||
`NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewTaskFormat("raw", true)},
|
||||
`id: taskID1
|
||||
id: taskID2
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewTaskFormat("{{.Name}} {{.Node}}", false)},
|
||||
`foobar_baz foo1
|
||||
foobar_bar foo2
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
tasks := []swarm.Task{
|
||||
{ID: "taskID1"},
|
||||
{ID: "taskID2"},
|
||||
}
|
||||
names := map[string]string{
|
||||
"taskID1": "foobar_baz",
|
||||
"taskID2": "foobar_bar",
|
||||
}
|
||||
nodes := map[string]string{
|
||||
"taskID1": "foo1",
|
||||
"taskID2": "foo2",
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := TaskWrite(testcase.context, tasks, names, nodes)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTaskContextWriteJSONField(t *testing.T) {
|
||||
tasks := []swarm.Task{
|
||||
{ID: "taskID1"},
|
||||
{ID: "taskID2"},
|
||||
}
|
||||
names := map[string]string{
|
||||
"taskID1": "foobar_baz",
|
||||
"taskID2": "foobar_bar",
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := TaskWrite(Context{Format: "{{json .ID}}", Output: out}, tasks, names, map[string]string{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, tasks[i].ID, s)
|
||||
}
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVolumeQuietFormat = "{{.Name}}"
|
||||
defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
|
||||
|
||||
volumeNameHeader = "VOLUME NAME"
|
||||
mountpointHeader = "MOUNTPOINT"
|
||||
linksHeader = "LINKS"
|
||||
// Status header ?
|
||||
)
|
||||
|
||||
// NewVolumeFormat returns a format for use with a volume Context
|
||||
func NewVolumeFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultVolumeQuietFormat
|
||||
}
|
||||
return defaultVolumeTableFormat
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `name: {{.Name}}`
|
||||
}
|
||||
return `name: {{.Name}}\ndriver: {{.Driver}}\n`
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
// VolumeWrite writes formatted volumes using the Context
|
||||
func VolumeWrite(ctx Context, volumes []*types.Volume) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, volume := range volumes {
|
||||
if err := format(&volumeContext{v: *volume}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newVolumeContext(), render)
|
||||
}
|
||||
|
||||
type volumeHeaderContext map[string]string
|
||||
|
||||
func (c volumeHeaderContext) Label(name string) string {
|
||||
n := strings.Split(name, ".")
|
||||
r := strings.NewReplacer("-", " ", "_", " ")
|
||||
h := r.Replace(n[len(n)-1])
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type volumeContext struct {
|
||||
HeaderContext
|
||||
v types.Volume
|
||||
}
|
||||
|
||||
func newVolumeContext() *volumeContext {
|
||||
volumeCtx := volumeContext{}
|
||||
volumeCtx.header = volumeHeaderContext{
|
||||
"Name": volumeNameHeader,
|
||||
"Driver": driverHeader,
|
||||
"Scope": scopeHeader,
|
||||
"Mountpoint": mountpointHeader,
|
||||
"Labels": labelsHeader,
|
||||
"Links": linksHeader,
|
||||
"Size": sizeHeader,
|
||||
}
|
||||
return &volumeCtx
|
||||
}
|
||||
|
||||
func (c *volumeContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *volumeContext) Name() string {
|
||||
return c.v.Name
|
||||
}
|
||||
|
||||
func (c *volumeContext) Driver() string {
|
||||
return c.v.Driver
|
||||
}
|
||||
|
||||
func (c *volumeContext) Scope() string {
|
||||
return c.v.Scope
|
||||
}
|
||||
|
||||
func (c *volumeContext) Mountpoint() string {
|
||||
return c.v.Mountpoint
|
||||
}
|
||||
|
||||
func (c *volumeContext) Labels() string {
|
||||
if c.v.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
for k, v := range c.v.Labels {
|
||||
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return strings.Join(joinLabels, ",")
|
||||
}
|
||||
|
||||
func (c *volumeContext) Label(name string) string {
|
||||
if c.v.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
return c.v.Labels[name]
|
||||
}
|
||||
|
||||
func (c *volumeContext) Links() string {
|
||||
if c.v.UsageData == nil {
|
||||
return "N/A"
|
||||
}
|
||||
return fmt.Sprintf("%d", c.v.UsageData.RefCount)
|
||||
}
|
||||
|
||||
func (c *volumeContext) Size() string {
|
||||
if c.v.UsageData == nil {
|
||||
return "N/A"
|
||||
}
|
||||
return units.HumanSize(float64(c.v.UsageData.Size))
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVolumeContext(t *testing.T) {
|
||||
volumeName := stringid.GenerateRandomID()
|
||||
|
||||
var ctx volumeContext
|
||||
cases := []struct {
|
||||
volumeCtx volumeContext
|
||||
expValue string
|
||||
call func() string
|
||||
}{
|
||||
{volumeContext{
|
||||
v: types.Volume{Name: volumeName},
|
||||
}, volumeName, ctx.Name},
|
||||
{volumeContext{
|
||||
v: types.Volume{Driver: "driver_name"},
|
||||
}, "driver_name", ctx.Driver},
|
||||
{volumeContext{
|
||||
v: types.Volume{Scope: "local"},
|
||||
}, "local", ctx.Scope},
|
||||
{volumeContext{
|
||||
v: types.Volume{Mountpoint: "mountpoint"},
|
||||
}, "mountpoint", ctx.Mountpoint},
|
||||
{volumeContext{
|
||||
v: types.Volume{},
|
||||
}, "", ctx.Labels},
|
||||
{volumeContext{
|
||||
v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}},
|
||||
}, "label1=value1,label2=value2", ctx.Labels},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = c.volumeCtx
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
compareMultipleValues(t, v, c.expValue)
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeContextWrite(t *testing.T) {
|
||||
cases := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
|
||||
// Errors
|
||||
{
|
||||
Context{Format: "{{InvalidFunction}}"},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: "{{nil}}"},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table format
|
||||
{
|
||||
Context{Format: NewVolumeFormat("table", false)},
|
||||
`DRIVER VOLUME NAME
|
||||
foo foobar_baz
|
||||
bar foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewVolumeFormat("table", true)},
|
||||
`foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewVolumeFormat("table {{.Name}}", false)},
|
||||
`VOLUME NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewVolumeFormat("table {{.Name}}", true)},
|
||||
`VOLUME NAME
|
||||
foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{Format: NewVolumeFormat("raw", false)},
|
||||
`name: foobar_baz
|
||||
driver: foo
|
||||
|
||||
name: foobar_bar
|
||||
driver: bar
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{Format: NewVolumeFormat("raw", true)},
|
||||
`name: foobar_baz
|
||||
name: foobar_bar
|
||||
`,
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{Format: NewVolumeFormat("{{.Name}}", false)},
|
||||
`foobar_baz
|
||||
foobar_bar
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
volumes := []*types.Volume{
|
||||
{Name: "foobar_baz", Driver: "foo"},
|
||||
{Name: "foobar_bar", Driver: "bar"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
testcase.context.Output = out
|
||||
err := VolumeWrite(testcase.context, volumes)
|
||||
if err != nil {
|
||||
assert.EqualError(t, err, testcase.expected)
|
||||
} else {
|
||||
assert.Equal(t, testcase.expected, out.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeContextWriteJSON(t *testing.T) {
|
||||
volumes := []*types.Volume{
|
||||
{Driver: "foo", Name: "foobar_baz"},
|
||||
{Driver: "bar", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
{"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"},
|
||||
{"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, expectedJSONs[i], m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeContextWriteJSONField(t *testing.T) {
|
||||
volumes := []*types.Volume{
|
||||
{Driver: "foo", Name: "foobar_baz"},
|
||||
{Driver: "bar", Name: "foobar_bar"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
t.Logf("Output: line %d: %s", i, line)
|
||||
var s string
|
||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, volumes[i].Name, s)
|
||||
}
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package idresolver
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
nodeInspectFunc func(string) (swarm.Node, []byte, error)
|
||||
serviceInspectFunc func(string) (swarm.Service, []byte, error)
|
||||
}
|
||||
|
||||
func (cli *fakeClient) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
|
||||
if cli.nodeInspectFunc != nil {
|
||||
return cli.nodeInspectFunc(nodeID)
|
||||
}
|
||||
return swarm.Node{}, []byte{}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) {
|
||||
if cli.serviceInspectFunc != nil {
|
||||
return cli.serviceInspectFunc(serviceID)
|
||||
}
|
||||
return swarm.Service{}, []byte{}, nil
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package idresolver
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// IDResolver provides ID to Name resolution.
|
||||
type IDResolver struct {
|
||||
client client.APIClient
|
||||
noResolve bool
|
||||
cache map[string]string
|
||||
}
|
||||
|
||||
// New creates a new IDResolver.
|
||||
func New(client client.APIClient, noResolve bool) *IDResolver {
|
||||
return &IDResolver{
|
||||
client: client,
|
||||
noResolve: noResolve,
|
||||
cache: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) {
|
||||
switch t.(type) {
|
||||
case swarm.Node:
|
||||
node, _, err := r.client.NodeInspectWithRaw(ctx, id)
|
||||
if err != nil {
|
||||
return id, nil
|
||||
}
|
||||
if node.Spec.Annotations.Name != "" {
|
||||
return node.Spec.Annotations.Name, nil
|
||||
}
|
||||
if node.Description.Hostname != "" {
|
||||
return node.Description.Hostname, nil
|
||||
}
|
||||
return id, nil
|
||||
case swarm.Service:
|
||||
service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return id, nil
|
||||
}
|
||||
return service.Spec.Annotations.Name, nil
|
||||
default:
|
||||
return "", errors.Errorf("unsupported type")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Resolve will attempt to resolve an ID to a Name by querying the manager.
|
||||
// Results are stored into a cache.
|
||||
// If the `-n` flag is used in the command-line, resolution is disabled.
|
||||
func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) {
|
||||
if r.noResolve {
|
||||
return id, nil
|
||||
}
|
||||
if name, ok := r.cache[id]; ok {
|
||||
return name, nil
|
||||
}
|
||||
name, err := r.get(ctx, t, id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
r.cache[id] = name
|
||||
return name, nil
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
package idresolver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
// Import builders to get the builder function as package function
|
||||
. "github.com/docker/docker/cli/internal/test/builders"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestResolveError(t *testing.T) {
|
||||
cli := &fakeClient{
|
||||
nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) {
|
||||
return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node")
|
||||
},
|
||||
}
|
||||
|
||||
idResolver := New(cli, false)
|
||||
_, err := idResolver.Resolve(context.Background(), struct{}{}, "nodeID")
|
||||
|
||||
assert.EqualError(t, err, "unsupported type")
|
||||
}
|
||||
|
||||
func TestResolveWithNoResolveOption(t *testing.T) {
|
||||
resolved := false
|
||||
cli := &fakeClient{
|
||||
nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) {
|
||||
resolved = true
|
||||
return swarm.Node{}, []byte{}, nil
|
||||
},
|
||||
serviceInspectFunc: func(serviceID string) (swarm.Service, []byte, error) {
|
||||
resolved = true
|
||||
return swarm.Service{}, []byte{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
idResolver := New(cli, true)
|
||||
id, err := idResolver.Resolve(context.Background(), swarm.Node{}, "nodeID")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "nodeID", id)
|
||||
assert.False(t, resolved)
|
||||
}
|
||||
|
||||
func TestResolveWithCache(t *testing.T) {
|
||||
inspectCounter := 0
|
||||
cli := &fakeClient{
|
||||
nodeInspectFunc: func(nodeID string) (swarm.Node, []byte, error) {
|
||||
inspectCounter++
|
||||
return *Node(NodeName("node-foo")), []byte{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
idResolver := New(cli, false)
|
||||
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 2; i++ {
|
||||
id, err := idResolver.Resolve(ctx, swarm.Node{}, "nodeID")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "node-foo", id)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, inspectCounter)
|
||||
}
|
||||
|
||||
func TestResolveNode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
nodeID string
|
||||
nodeInspectFunc func(string) (swarm.Node, []byte, error)
|
||||
expectedID string
|
||||
}{
|
||||
{
|
||||
nodeID: "nodeID",
|
||||
nodeInspectFunc: func(string) (swarm.Node, []byte, error) {
|
||||
return swarm.Node{}, []byte{}, errors.Errorf("error inspecting node")
|
||||
},
|
||||
expectedID: "nodeID",
|
||||
},
|
||||
{
|
||||
nodeID: "nodeID",
|
||||
nodeInspectFunc: func(string) (swarm.Node, []byte, error) {
|
||||
return *Node(NodeName("node-foo")), []byte{}, nil
|
||||
},
|
||||
expectedID: "node-foo",
|
||||
},
|
||||
{
|
||||
nodeID: "nodeID",
|
||||
nodeInspectFunc: func(string) (swarm.Node, []byte, error) {
|
||||
return *Node(NodeName(""), Hostname("node-hostname")), []byte{}, nil
|
||||
},
|
||||
expectedID: "node-hostname",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, tc := range testCases {
|
||||
cli := &fakeClient{
|
||||
nodeInspectFunc: tc.nodeInspectFunc,
|
||||
}
|
||||
idResolver := New(cli, false)
|
||||
id, err := idResolver.Resolve(ctx, swarm.Node{}, tc.nodeID)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedID, id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveService(t *testing.T) {
|
||||
testCases := []struct {
|
||||
serviceID string
|
||||
serviceInspectFunc func(string) (swarm.Service, []byte, error)
|
||||
expectedID string
|
||||
}{
|
||||
{
|
||||
serviceID: "serviceID",
|
||||
serviceInspectFunc: func(string) (swarm.Service, []byte, error) {
|
||||
return swarm.Service{}, []byte{}, errors.Errorf("error inspecting service")
|
||||
},
|
||||
expectedID: "serviceID",
|
||||
},
|
||||
{
|
||||
serviceID: "serviceID",
|
||||
serviceInspectFunc: func(string) (swarm.Service, []byte, error) {
|
||||
return *Service(ServiceName("service-foo")), []byte{}, nil
|
||||
},
|
||||
expectedID: "service-foo",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, tc := range testCases {
|
||||
cli := &fakeClient{
|
||||
serviceInspectFunc: tc.serviceInspectFunc,
|
||||
}
|
||||
idResolver := New(cli, false)
|
||||
id, err := idResolver.Resolve(ctx, swarm.Service{}, tc.serviceID)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedID, id)
|
||||
}
|
||||
}
|
|
@ -1,558 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/command/image/build"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type buildOptions struct {
|
||||
context string
|
||||
dockerfileName string
|
||||
tags opts.ListOpts
|
||||
labels opts.ListOpts
|
||||
buildArgs opts.ListOpts
|
||||
extraHosts opts.ListOpts
|
||||
ulimits *opts.UlimitOpt
|
||||
memory opts.MemBytes
|
||||
memorySwap opts.MemSwapBytes
|
||||
shmSize opts.MemBytes
|
||||
cpuShares int64
|
||||
cpuPeriod int64
|
||||
cpuQuota int64
|
||||
cpuSetCpus string
|
||||
cpuSetMems string
|
||||
cgroupParent string
|
||||
isolation string
|
||||
quiet bool
|
||||
noCache bool
|
||||
rm bool
|
||||
forceRm bool
|
||||
pull bool
|
||||
cacheFrom []string
|
||||
compress bool
|
||||
securityOpt []string
|
||||
networkMode string
|
||||
squash bool
|
||||
target string
|
||||
imageIDFile string
|
||||
}
|
||||
|
||||
// NewBuildCommand creates a new `docker build` command
|
||||
func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
ulimits := make(map[string]*units.Ulimit)
|
||||
options := buildOptions{
|
||||
tags: opts.NewListOpts(validateTag),
|
||||
buildArgs: opts.NewListOpts(opts.ValidateEnv),
|
||||
ulimits: opts.NewUlimitOpt(&ulimits),
|
||||
labels: opts.NewListOpts(opts.ValidateEnv),
|
||||
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "build [OPTIONS] PATH | URL | -",
|
||||
Short: "Build an image from a Dockerfile",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.context = args[0]
|
||||
return runBuild(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
|
||||
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
|
||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||
flags.VarP(&options.memory, "memory", "m", "Memory limit")
|
||||
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
|
||||
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
|
||||
flags.Var(&options.labels, "label", "Set metadata for an image")
|
||||
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
|
||||
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
|
||||
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
||||
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
|
||||
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
flags.SetAnnotation("network", "version", []string{"1.25"})
|
||||
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
|
||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||
flags.SetAnnotation("squash", "experimental", nil)
|
||||
flags.SetAnnotation("squash", "version", []string{"1.25"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// lastProgressOutput is the same as progress.Output except
|
||||
// that it only output with the last update. It is used in
|
||||
// non terminal scenarios to suppress verbose messages
|
||||
type lastProgressOutput struct {
|
||||
output progress.Output
|
||||
}
|
||||
|
||||
// WriteProgress formats progress information from a ProgressReader.
|
||||
func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||
if !prog.LastUpdate {
|
||||
return nil
|
||||
}
|
||||
|
||||
return out.output.WriteProgress(prog)
|
||||
}
|
||||
|
||||
func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
err error
|
||||
contextDir string
|
||||
tempDir string
|
||||
relDockerfile string
|
||||
progBuff io.Writer
|
||||
buildBuff io.Writer
|
||||
)
|
||||
|
||||
specifiedContext := options.context
|
||||
progBuff = dockerCli.Out()
|
||||
buildBuff = dockerCli.Out()
|
||||
if options.quiet {
|
||||
progBuff = bytes.NewBuffer(nil)
|
||||
buildBuff = bytes.NewBuffer(nil)
|
||||
}
|
||||
if options.imageIDFile != "" {
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "Removing image ID file")
|
||||
}
|
||||
}
|
||||
|
||||
if options.dockerfileName == "-" {
|
||||
if specifiedContext == "-" {
|
||||
return errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
dockerfileCtx = dockerCli.In()
|
||||
}
|
||||
|
||||
switch {
|
||||
case specifiedContext == "-":
|
||||
buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName)
|
||||
case isLocalDir(specifiedContext):
|
||||
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
|
||||
case urlutil.IsGitURL(specifiedContext):
|
||||
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName)
|
||||
case urlutil.IsURL(specifiedContext):
|
||||
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
|
||||
default:
|
||||
return errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if options.quiet && urlutil.IsURL(specifiedContext) {
|
||||
fmt.Fprintln(dockerCli.Err(), progBuff)
|
||||
}
|
||||
return errors.Errorf("unable to prepare context: %s", err)
|
||||
}
|
||||
|
||||
if tempDir != "" {
|
||||
defer os.RemoveAll(tempDir)
|
||||
contextDir = tempDir
|
||||
}
|
||||
|
||||
if buildCtx == nil {
|
||||
// And canonicalize dockerfile name to a platform-independent one
|
||||
relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
|
||||
if err != nil {
|
||||
return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var excludes []string
|
||||
if err == nil {
|
||||
excludes, err = dockerignore.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
|
||||
return errors.Errorf("Error checking context: '%s'.", err)
|
||||
}
|
||||
|
||||
// If .dockerignore mentions .dockerignore or the Dockerfile then make
|
||||
// sure we send both files over to the daemon because Dockerfile is,
|
||||
// obviously, needed no matter what, and .dockerignore is needed to know
|
||||
// if either one needs to be removed. The daemon will remove them
|
||||
// if necessary, after it parses the Dockerfile. Ignore errors here, as
|
||||
// they will have been caught by validateContextDirectory above.
|
||||
// Excludes are used instead of includes to maintain the order of files
|
||||
// in the archive.
|
||||
if keep, _ := fileutils.Matches(".dockerignore", excludes); keep {
|
||||
excludes = append(excludes, "!.dockerignore")
|
||||
}
|
||||
if keep, _ := fileutils.Matches(relDockerfile, excludes); keep && dockerfileCtx == nil {
|
||||
excludes = append(excludes, "!"+relDockerfile)
|
||||
}
|
||||
|
||||
compression := archive.Uncompressed
|
||||
if options.compress {
|
||||
compression = archive.Gzip
|
||||
}
|
||||
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
|
||||
Compression: compression,
|
||||
ExcludePatterns: excludes,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// replace Dockerfile if added dynamically
|
||||
if dockerfileCtx != nil {
|
||||
buildCtx, relDockerfile, err = addDockerfileToBuildContext(dockerfileCtx, buildCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var resolvedTags []*resolvedTag
|
||||
if command.IsTrusted() {
|
||||
translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
|
||||
return TrustedReference(ctx, dockerCli, ref, nil)
|
||||
}
|
||||
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
|
||||
// Dockerfile which uses trusted pulls.
|
||||
buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
|
||||
}
|
||||
|
||||
// Setup an upload progress bar
|
||||
progressOutput := streamformatter.NewProgressOutput(progBuff)
|
||||
if !dockerCli.Out().IsTerminal() {
|
||||
progressOutput = &lastProgressOutput{output: progressOutput}
|
||||
}
|
||||
|
||||
var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
|
||||
|
||||
authConfigs, _ := dockerCli.GetAllCredentials()
|
||||
buildOptions := types.ImageBuildOptions{
|
||||
Memory: options.memory.Value(),
|
||||
MemorySwap: options.memorySwap.Value(),
|
||||
Tags: options.tags.GetAll(),
|
||||
SuppressOutput: options.quiet,
|
||||
NoCache: options.noCache,
|
||||
Remove: options.rm,
|
||||
ForceRemove: options.forceRm,
|
||||
PullParent: options.pull,
|
||||
Isolation: container.Isolation(options.isolation),
|
||||
CPUSetCPUs: options.cpuSetCpus,
|
||||
CPUSetMems: options.cpuSetMems,
|
||||
CPUShares: options.cpuShares,
|
||||
CPUQuota: options.cpuQuota,
|
||||
CPUPeriod: options.cpuPeriod,
|
||||
CgroupParent: options.cgroupParent,
|
||||
Dockerfile: relDockerfile,
|
||||
ShmSize: options.shmSize.Value(),
|
||||
Ulimits: options.ulimits.GetList(),
|
||||
BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()),
|
||||
AuthConfigs: authConfigs,
|
||||
Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
|
||||
CacheFrom: options.cacheFrom,
|
||||
SecurityOpt: options.securityOpt,
|
||||
NetworkMode: options.networkMode,
|
||||
Squash: options.squash,
|
||||
ExtraHosts: options.extraHosts.GetAll(),
|
||||
Target: options.target,
|
||||
}
|
||||
|
||||
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
||||
if err != nil {
|
||||
if options.quiet {
|
||||
fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
imageID := ""
|
||||
aux := func(auxJSON *json.RawMessage) {
|
||||
var result types.BuildResult
|
||||
if err := json.Unmarshal(*auxJSON, &result); err != nil {
|
||||
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
|
||||
} else {
|
||||
imageID = result.ID
|
||||
}
|
||||
}
|
||||
|
||||
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux)
|
||||
if err != nil {
|
||||
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
||||
// If no error code is set, default to 1
|
||||
if jerr.Code == 0 {
|
||||
jerr.Code = 1
|
||||
}
|
||||
if options.quiet {
|
||||
fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
|
||||
}
|
||||
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Windows: show error message about modified file permissions if the
|
||||
// daemon isn't running Windows.
|
||||
if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
|
||||
fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+
|
||||
"image from Windows against a non-Windows Docker host. All files and "+
|
||||
"directories added to build context will have '-rwxr-xr-x' permissions. "+
|
||||
"It is recommended to double check and reset permissions for sensitive "+
|
||||
"files and directories.")
|
||||
}
|
||||
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
if options.quiet {
|
||||
imageID = fmt.Sprintf("%s", buildBuff)
|
||||
fmt.Fprintf(dockerCli.Out(), imageID)
|
||||
}
|
||||
|
||||
if options.imageIDFile != "" {
|
||||
if imageID == "" {
|
||||
return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile)
|
||||
}
|
||||
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if command.IsTrusted() {
|
||||
// Since the build was successful, now we must tag any of the resolved
|
||||
// images from the above Dockerfile rewrite.
|
||||
for _, resolved := range resolvedTags {
|
||||
if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) {
|
||||
file, err := ioutil.ReadAll(dockerfileCtx)
|
||||
dockerfileCtx.Close()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
now := time.Now()
|
||||
hdrTmpl := &tar.Header{
|
||||
Mode: 0600,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
ModTime: now,
|
||||
Typeflag: tar.TypeReg,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
}
|
||||
randomName := ".dockerfile." + stringid.GenerateRandomID()[:20]
|
||||
|
||||
buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{
|
||||
// Add the dockerfile with a random filename
|
||||
randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
return hdrTmpl, file, nil
|
||||
},
|
||||
// Update .dockerignore to include the random filename
|
||||
".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
if h == nil {
|
||||
h = hdrTmpl
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
if content != nil {
|
||||
if _, err := b.ReadFrom(content); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
b.WriteString(".dockerignore")
|
||||
}
|
||||
b.WriteString("\n" + randomName + "\n")
|
||||
return h, b.Bytes(), nil
|
||||
},
|
||||
})
|
||||
return buildCtx, randomName, nil
|
||||
}
|
||||
|
||||
func isLocalDir(c string) bool {
|
||||
_, err := os.Stat(c)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error)
|
||||
|
||||
// validateTag checks if the given image name can be resolved.
|
||||
func validateTag(rawRepo string) (string, error) {
|
||||
_, err := reference.ParseNormalizedNamed(rawRepo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return rawRepo, nil
|
||||
}
|
||||
|
||||
var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
|
||||
|
||||
// resolvedTag records the repository, tag, and resolved digest reference
|
||||
// from a Dockerfile rewrite.
|
||||
type resolvedTag struct {
|
||||
digestRef reference.Canonical
|
||||
tagRef reference.NamedTagged
|
||||
}
|
||||
|
||||
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
|
||||
// "FROM <image>" instructions to a digest reference. `translator` is a
|
||||
// function that takes a repository name and tag reference and returns a
|
||||
// trusted digest reference.
|
||||
func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
|
||||
scanner := bufio.NewScanner(dockerfile)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
// Scan the lines of the Dockerfile, looking for a "FROM" line.
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
matches := dockerfileFromLinePattern.FindStringSubmatch(line)
|
||||
if matches != nil && matches[1] != api.NoBaseImageSpecifier {
|
||||
// Replace the line with a resolved "FROM repo@digest"
|
||||
var ref reference.Named
|
||||
ref, err = reference.ParseNormalizedNamed(matches[1])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() {
|
||||
trustedRef, err := translator(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef)))
|
||||
resolvedTags = append(resolvedTags, &resolvedTag{
|
||||
digestRef: trustedRef,
|
||||
tagRef: ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_, err := fmt.Fprintln(buf, line)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), resolvedTags, scanner.Err()
|
||||
}
|
||||
|
||||
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
|
||||
// replaces the entry with the given Dockerfile name with the contents of the
|
||||
// new Dockerfile. Returns a new tar archive stream with the replaced
|
||||
// Dockerfile.
|
||||
func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
tarReader := tar.NewReader(inputTarStream)
|
||||
tarWriter := tar.NewWriter(pipeWriter)
|
||||
|
||||
defer inputTarStream.Close()
|
||||
|
||||
for {
|
||||
hdr, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
tarWriter.Close()
|
||||
pipeWriter.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
content := io.Reader(tarReader)
|
||||
if hdr.Name == dockerfileName {
|
||||
// This entry is the Dockerfile. Since the tar archive was
|
||||
// generated from a directory on the local filesystem, the
|
||||
// Dockerfile will only appear once in the archive.
|
||||
var newDockerfile []byte
|
||||
newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
hdr.Size = int64(len(newDockerfile))
|
||||
content = bytes.NewBuffer(newDockerfile)
|
||||
}
|
||||
|
||||
if err := tarWriter.WriteHeader(hdr); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tarWriter, content); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/gitutils"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
)
|
||||
|
||||
// ValidateContextDirectory checks if all the contents of the directory
|
||||
// can be read and returns an error if some files can't be read
|
||||
// symlinks which point to non-existing files don't trigger an error
|
||||
func ValidateContextDirectory(srcPath string, excludes []string) error {
|
||||
contextRoot, err := getContextRoot(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return errors.Errorf("can't stat '%s'", filePath)
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// skip this directory/file if it's not in the path, it won't get added to the context
|
||||
if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
|
||||
return err
|
||||
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
|
||||
return err
|
||||
} else if skip {
|
||||
if f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip checking if symlinks point to non-existing files, such symlinks can be useful
|
||||
// also skip named pipes, because they hanging on open
|
||||
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !f.IsDir() {
|
||||
currentFile, err := os.Open(filePath)
|
||||
if err != nil && os.IsPermission(err) {
|
||||
return errors.Errorf("no permission to read from '%s'", filePath)
|
||||
}
|
||||
currentFile.Close()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetContextFromReader will read the contents of the given reader as either a
|
||||
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
||||
// path to the Dockerfile inside the tar.
|
||||
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||
buf := bufio.NewReader(r)
|
||||
|
||||
magic, err := buf.Peek(archive.HeaderSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
|
||||
if archive.IsArchive(magic) {
|
||||
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
|
||||
}
|
||||
|
||||
if dockerfileName == "-" {
|
||||
return nil, "", errors.New("build context is not an archive")
|
||||
}
|
||||
|
||||
// Input should be read as a Dockerfile.
|
||||
tmpDir, err := ioutil.TempDir("", "docker-build-context-")
|
||||
if err != nil {
|
||||
return nil, "", errors.Errorf("unable to create temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
_, err = io.Copy(f, buf)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if err := r.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
tar, err := archive.Tar(tmpDir, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tar, func() error {
|
||||
err := tar.Close()
|
||||
os.RemoveAll(tmpDir)
|
||||
return err
|
||||
}), DefaultDockerfileName, nil
|
||||
|
||||
}
|
||||
|
||||
// GetContextFromGitURL uses a Git URL as context for a `docker build`. The
|
||||
// git repo is cloned into a temporary directory used as the context directory.
|
||||
// Returns the absolute path to the temporary context directory, the relative
|
||||
// path of the dockerfile in that context directory, and a non-nil error on
|
||||
// success.
|
||||
func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
return "", "", errors.Errorf("unable to find 'git': %v", err)
|
||||
}
|
||||
if absContextDir, err = gitutils.Clone(gitURL); err != nil {
|
||||
return "", "", errors.Errorf("unable to 'git clone' to temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
return getDockerfileRelPath(absContextDir, dockerfileName)
|
||||
}
|
||||
|
||||
// GetContextFromURL uses a remote URL as context for a `docker build`. The
|
||||
// remote resource is downloaded as either a Dockerfile or a tar archive.
|
||||
// Returns the tar archive used for the context and a path of the
|
||||
// dockerfile inside the tar.
|
||||
func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
|
||||
response, err := httputils.Download(remoteURL)
|
||||
if err != nil {
|
||||
return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err)
|
||||
}
|
||||
progressOutput := streamformatter.NewProgressOutput(out)
|
||||
|
||||
// Pass the response body through a progress reader.
|
||||
progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL))
|
||||
|
||||
return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
|
||||
}
|
||||
|
||||
// GetContextFromLocalDir uses the given local directory as context for a
|
||||
// `docker build`. Returns the absolute path to the local context directory,
|
||||
// the relative path of the dockerfile in that context directory, and a non-nil
|
||||
// error on success.
|
||||
func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
// When using a local context directory, when the Dockerfile is specified
|
||||
// with the `-f/--file` option then it is considered relative to the
|
||||
// current directory and not the context directory.
|
||||
if dockerfileName != "" && dockerfileName != "-" {
|
||||
if dockerfileName, err = filepath.Abs(dockerfileName); err != nil {
|
||||
return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return getDockerfileRelPath(localDir, dockerfileName)
|
||||
}
|
||||
|
||||
// getDockerfileRelPath uses the given context directory for a `docker build`
|
||||
// and returns the absolute path to the context directory, the relative path of
|
||||
// the dockerfile in that context directory, and a non-nil error on success.
|
||||
func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) {
|
||||
if absContextDir, err = filepath.Abs(givenContextDir); err != nil {
|
||||
return "", "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err)
|
||||
}
|
||||
|
||||
// The context dir might be a symbolic link, so follow it to the actual
|
||||
// target directory.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if !isUNC(absContextDir) {
|
||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", errors.Errorf("unable to evaluate symlinks in context path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return "", "", errors.Errorf("context must be a directory: %s", absContextDir)
|
||||
}
|
||||
|
||||
absDockerfile := givenDockerfile
|
||||
if absDockerfile == "" {
|
||||
// No -f/--file was specified so use the default relative to the
|
||||
// context directory.
|
||||
absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName)
|
||||
|
||||
// Just to be nice ;-) look for 'dockerfile' too but only
|
||||
// use it if we found it, otherwise ignore this check
|
||||
if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) {
|
||||
altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName))
|
||||
if _, err = os.Lstat(altPath); err == nil {
|
||||
absDockerfile = altPath
|
||||
}
|
||||
}
|
||||
} else if absDockerfile == "-" {
|
||||
absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName)
|
||||
}
|
||||
|
||||
// If not already an absolute path, the Dockerfile path should be joined to
|
||||
// the base directory.
|
||||
if !filepath.IsAbs(absDockerfile) {
|
||||
absDockerfile = filepath.Join(absContextDir, absDockerfile)
|
||||
}
|
||||
|
||||
// Evaluate symlinks in the path to the Dockerfile too.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if givenDockerfile != "-" {
|
||||
if !isUNC(absDockerfile) {
|
||||
absDockerfile, err = filepath.EvalSymlinks(absDockerfile)
|
||||
if err != nil {
|
||||
return "", "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(absDockerfile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile)
|
||||
}
|
||||
return "", "", errors.Errorf("unable to stat Dockerfile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil {
|
||||
return "", "", errors.Errorf("unable to get relative Dockerfile path: %v", err)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
||||
return "", "", errors.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir)
|
||||
}
|
||||
|
||||
return absContextDir, relDockerfile, nil
|
||||
}
|
||||
|
||||
// isUNC returns true if the path is UNC (one starting \\). It always returns
|
||||
// false on Linux.
|
||||
func isUNC(path string) bool {
|
||||
return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`)
|
||||
}
|
|
@ -1,383 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
const dockerfileContents = "FROM busybox"
|
||||
|
||||
var prepareEmpty = func(t *testing.T) (string, func()) {
|
||||
return "", func() {}
|
||||
}
|
||||
|
||||
var prepareNoFiles = func(t *testing.T) (string, func()) {
|
||||
return createTestTempDir(t, "", "builder-context-test")
|
||||
}
|
||||
|
||||
var prepareOneFile = func(t *testing.T) (string, func()) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
return contextDir, cleanup
|
||||
}
|
||||
|
||||
func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) {
|
||||
contextDir, cleanup := prepare(t)
|
||||
defer cleanup()
|
||||
|
||||
err := ValidateContextDirectory(contextDir, excludes)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error should be nil, got: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirNoDockerfile(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "")
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should not be nil")
|
||||
}
|
||||
|
||||
if absContextDir != "" {
|
||||
t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != "" {
|
||||
t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirNotExistingDir(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
fakePath := filepath.Join(contextDir, "fake")
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "")
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should not be nil")
|
||||
}
|
||||
|
||||
if absContextDir != "" {
|
||||
t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != "" {
|
||||
t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
fakePath := filepath.Join(contextDir, "fake")
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should not be nil")
|
||||
}
|
||||
|
||||
if absContextDir != "" {
|
||||
t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != "" {
|
||||
t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) {
|
||||
contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer dirCleanup()
|
||||
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
|
||||
chdirCleanup := chdir(t, contextDir)
|
||||
defer chdirCleanup()
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting context from local dir: %s", err)
|
||||
}
|
||||
|
||||
if absContextDir != contextDir {
|
||||
t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != DefaultDockerfileName {
|
||||
t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirWithDockerfile(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting context from local dir: %s", err)
|
||||
}
|
||||
|
||||
if absContextDir != contextDir {
|
||||
t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != DefaultDockerfileName {
|
||||
t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirLocalFile(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777)
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "")
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should not be nil")
|
||||
}
|
||||
|
||||
if absContextDir != "" {
|
||||
t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != "" {
|
||||
t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
chdirCleanup := chdir(t, contextDir)
|
||||
defer chdirCleanup()
|
||||
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
|
||||
absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting context from local dir: %s", err)
|
||||
}
|
||||
|
||||
if absContextDir != contextDir {
|
||||
t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir)
|
||||
}
|
||||
|
||||
if relDockerfile != DefaultDockerfileName {
|
||||
t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetContextFromReaderString(t *testing.T) {
|
||||
tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when executing GetContextFromReader: %s", err)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(tarArchive)
|
||||
|
||||
_, err = tarReader.Next()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when reading tar archive: %s", err)
|
||||
}
|
||||
|
||||
buff := new(bytes.Buffer)
|
||||
buff.ReadFrom(tarReader)
|
||||
contents := buff.String()
|
||||
|
||||
_, err = tarReader.Next()
|
||||
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Tar stream too long: %s", err)
|
||||
}
|
||||
|
||||
if err = tarArchive.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
|
||||
if dockerfileContents != contents {
|
||||
t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents)
|
||||
}
|
||||
|
||||
if relDockerfile != DefaultDockerfileName {
|
||||
t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContextFromReaderTar(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-context-test")
|
||||
defer cleanup()
|
||||
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777)
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar: %s", err)
|
||||
}
|
||||
|
||||
tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when executing GetContextFromReader: %s", err)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(tarArchive)
|
||||
|
||||
header, err := tarReader.Next()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when reading tar archive: %s", err)
|
||||
}
|
||||
|
||||
if header.Name != DefaultDockerfileName {
|
||||
t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name)
|
||||
}
|
||||
|
||||
buff := new(bytes.Buffer)
|
||||
buff.ReadFrom(tarReader)
|
||||
contents := buff.String()
|
||||
|
||||
_, err = tarReader.Next()
|
||||
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Tar stream too long: %s", err)
|
||||
}
|
||||
|
||||
if err = tarArchive.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
|
||||
if dockerfileContents != contents {
|
||||
t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents)
|
||||
}
|
||||
|
||||
if relDockerfile != DefaultDockerfileName {
|
||||
t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateContextDirectoryEmptyContext(t *testing.T) {
|
||||
// This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81.
|
||||
// The test will ultimately end up calling filepath.Abs(""). On Windows,
|
||||
// golang will error. On Linux, golang will return /. Due to there being
|
||||
// drive letters on Windows, this is probably the correct behaviour for
|
||||
// Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Invalid test on Windows")
|
||||
}
|
||||
testValidateContextDirectory(t, prepareEmpty, []string{})
|
||||
}
|
||||
|
||||
func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) {
|
||||
testValidateContextDirectory(t, prepareNoFiles, []string{})
|
||||
}
|
||||
|
||||
func TestValidateContextDirectoryWithOneFile(t *testing.T) {
|
||||
testValidateContextDirectory(t, prepareOneFile, []string{})
|
||||
}
|
||||
|
||||
func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) {
|
||||
testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName})
|
||||
}
|
||||
|
||||
// createTestTempDir creates a temporary directory for testing.
|
||||
// It returns the created path and a cleanup function which is meant to be used as deferred call.
|
||||
// When an error occurs, it terminates the test.
|
||||
func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) {
|
||||
path, err := ioutil.TempDir(dir, prefix)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err)
|
||||
}
|
||||
|
||||
return path, func() {
|
||||
err = os.RemoveAll(path)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when removing directory %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createTestTempSubdir creates a temporary directory for testing.
|
||||
// It returns the created path but doesn't provide a cleanup function,
|
||||
// so createTestTempSubdir should be used only for creating temporary subdirectories
|
||||
// whose parent directories are properly cleaned up.
|
||||
// When an error occurs, it terminates the test.
|
||||
func createTestTempSubdir(t *testing.T, dir, prefix string) string {
|
||||
path, err := ioutil.TempDir(dir, prefix)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err)
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
// createTestTempFile creates a temporary file within dir with specific contents and permissions.
|
||||
// When an error occurs, it terminates the test
|
||||
func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string {
|
||||
filePath := filepath.Join(dir, filename)
|
||||
err := ioutil.WriteFile(filePath, []byte(contents), perm)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating %s file: %s", filename, err)
|
||||
}
|
||||
|
||||
return filePath
|
||||
}
|
||||
|
||||
// chdir changes current working directory to dir.
|
||||
// It returns a function which changes working directory back to the previous one.
|
||||
// This function is meant to be executed as a deferred call.
|
||||
// When an error occurs, it terminates the test.
|
||||
func chdir(t *testing.T, dir string) func() {
|
||||
workingDirectory, err := os.Getwd()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when retrieving working directory: %s", err)
|
||||
}
|
||||
|
||||
err = os.Chdir(dir)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when changing directory to %s: %s", dir, err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
err = os.Chdir(workingDirectory)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func getContextRoot(srcPath string) (string, error) {
|
||||
return filepath.Join(srcPath, "."), nil
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
func getContextRoot(srcPath string) (string, error) {
|
||||
cr, err := filepath.Abs(srcPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return longpath.AddPrefix(cr), nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue