Merge pull request #24467 from tiborvass/cherry-picks-1.12.0-rc4
[WIP] Cherry picks 1.12.0 rc4
This commit is contained in:
commit
e4c35bd74c
329 changed files with 6078 additions and 2462 deletions
|
@ -1018,7 +1018,7 @@ by another client (#15489)
|
||||||
#### Security
|
#### Security
|
||||||
- Fix tar breakout vulnerability
|
- Fix tar breakout vulnerability
|
||||||
* Extractions are now sandboxed chroot
|
* Extractions are now sandboxed chroot
|
||||||
- Security options are no longer committed to images
|
- Security options are no longer comitted to images
|
||||||
|
|
||||||
#### Runtime
|
#### Runtime
|
||||||
- Fix deadlock in `docker ps -f exited=1`
|
- Fix deadlock in `docker ps -f exited=1`
|
||||||
|
@ -1444,7 +1444,7 @@ by another client (#15489)
|
||||||
* Update issue filing instructions
|
* Update issue filing instructions
|
||||||
* Warn against the use of symlinks for Docker's storage folder
|
* Warn against the use of symlinks for Docker's storage folder
|
||||||
* Replace the Firefox example with an IceWeasel example
|
* Replace the Firefox example with an IceWeasel example
|
||||||
* Rewrite the PostgresSQL example using a Dockerfile and add more details to it
|
* Rewrite the PostgreSQL example using a Dockerfile and add more details to it
|
||||||
* Improve the OS X documentation
|
* Improve the OS X documentation
|
||||||
|
|
||||||
#### Remote API
|
#### Remote API
|
||||||
|
|
|
@ -36,6 +36,7 @@ RUN apt-get update && apt-get install -y \
|
||||||
libapparmor-dev \
|
libapparmor-dev \
|
||||||
libc6-dev \
|
libc6-dev \
|
||||||
libcap-dev \
|
libcap-dev \
|
||||||
|
libltdl-dev \
|
||||||
libsqlite3-dev \
|
libsqlite3-dev \
|
||||||
libsystemd-dev \
|
libsystemd-dev \
|
||||||
mercurial \
|
mercurial \
|
||||||
|
|
|
@ -44,6 +44,7 @@ func NewCopyCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`,
|
docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`,
|
||||||
Short: "Copy files/folders between a container and the local filesystem",
|
Short: "Copy files/folders between a container and the local filesystem",
|
||||||
Long: strings.Join([]string{
|
Long: strings.Join([]string{
|
||||||
|
"Copy files/folders between a container and the local filesystem\n",
|
||||||
"\nUse '-' as the source to read a tar archive from stdin\n",
|
"\nUse '-' as the source to read a tar archive from stdin\n",
|
||||||
"and extract it to a directory destination in a container.\n",
|
"and extract it to a directory destination in a container.\n",
|
||||||
"Use '-' as the destination to stream a tar archive of a\n",
|
"Use '-' as the destination to stream a tar archive of a\n",
|
||||||
|
|
|
@ -55,7 +55,7 @@ func NewPsCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
||||||
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
|
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||||
flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)")
|
flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)")
|
||||||
flags.IntVarP(&opts.last, "", "n", -1, "Show n last created containers (includes all states)")
|
flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)")
|
||||||
flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template")
|
flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template")
|
||||||
flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided")
|
flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided")
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ func NewTagCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
var opts tagOptions
|
var opts tagOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "tag IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]",
|
Use: "tag IMAGE[:TAG] IMAGE[:TAG]",
|
||||||
Short: "Tag an image into a repository",
|
Short: "Tag an image into a repository",
|
||||||
Args: cli.ExactArgs(2),
|
Args: cli.ExactArgs(2),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
|
|
@ -51,7 +51,6 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||||
fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1])
|
fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver)
|
|
||||||
ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
|
ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
|
||||||
ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver)
|
ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver)
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func runRemove(dockerCli *client.DockerCli, networks []string) error {
|
||||||
status = 1
|
status = 1
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Err(), "%s\n", name)
|
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
|
|
|
@ -35,9 +35,10 @@ func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeReference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
|
// Reference returns the reference of a node. The special value "self" for a node
|
||||||
// The special value "self" for a node reference is mapped to the current
|
// reference is mapped to the current node, hence the node ID is retrieved using
|
||||||
// node, hence the node ID is retrieved using the `/info` endpoint.
|
// the `/info` endpoint.
|
||||||
|
func Reference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
|
||||||
if ref == "self" {
|
if ref == "self" {
|
||||||
info, err := client.Info(ctx)
|
info, err := client.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -45,7 +45,7 @@ func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
getRef := func(ref string) (interface{}, []byte, error) {
|
getRef := func(ref string) (interface{}, []byte, error) {
|
||||||
nodeRef, err := nodeReference(client, ctx, ref)
|
nodeRef, err := Reference(client, ctx, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
nodeRef, err := nodeReference(client, ctx, opts.nodeID)
|
nodeRef, err := Reference(client, ctx, opts.nodeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -55,10 +55,9 @@ func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
||||||
|
|
||||||
filter := opts.filter.Value()
|
filter := opts.filter.Value()
|
||||||
filter.Add("node", node.ID)
|
filter.Add("node", node.ID)
|
||||||
if !opts.all && !filter.Include("desired_state") {
|
if !opts.all && !filter.Include("desired-state") {
|
||||||
filter.Add("desired_state", string(swarm.TaskStateRunning))
|
filter.Add("desired-state", string(swarm.TaskStateRunning))
|
||||||
filter.Add("desired_state", string(swarm.TaskStateAccepted))
|
filter.Add("desired-state", string(swarm.TaskStateAccepted))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks, err := client.TaskList(
|
tasks, err := client.TaskList(
|
||||||
|
|
|
@ -45,7 +45,7 @@ func runLogout(dockerCli *client.DockerCli, serverAddress string) error {
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Remove login credentials for %s\n", serverAddress)
|
fmt.Fprintf(dockerCli.Out(), "Remove login credentials for %s\n", serverAddress)
|
||||||
if err := client.EraseCredentials(dockerCli.ConfigFile(), serverAddress); err != nil {
|
if err := client.EraseCredentials(dockerCli.ConfigFile(), serverAddress); err != nil {
|
||||||
fmt.Fprintf(dockerCli.Out(), "WARNING: could not erase credentials: %v\n", err)
|
fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
"github.com/docker/docker/api/client"
|
||||||
"github.com/docker/docker/cli"
|
"github.com/docker/docker/cli"
|
||||||
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
@ -33,7 +34,7 @@ func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
|
|
||||||
func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error {
|
func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error {
|
||||||
apiClient := dockerCli.Client()
|
apiClient := dockerCli.Client()
|
||||||
headers := map[string][]string{}
|
createOpts := types.ServiceCreateOptions{}
|
||||||
|
|
||||||
service, err := opts.ToService()
|
service, err := opts.ToService()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -49,10 +50,10 @@ func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
headers["X-Registry-Auth"] = []string{encodedAuth}
|
createOpts.EncodedRegistryAuth = encodedAuth
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := apiClient.ServiceCreate(ctx, service, headers)
|
response, err := apiClient.ServiceCreate(ctx, service, createOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,7 +169,7 @@ func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) {
|
||||||
for _, v := range containerSpec.Mounts {
|
for _, v := range containerSpec.Mounts {
|
||||||
fmt.Fprintf(out, " Target = %s\n", v.Target)
|
fmt.Fprintf(out, " Target = %s\n", v.Target)
|
||||||
fmt.Fprintf(out, " Source = %s\n", v.Source)
|
fmt.Fprintf(out, " Source = %s\n", v.Source)
|
||||||
fmt.Fprintf(out, " Writable = %v\n", v.Writable)
|
fmt.Fprintf(out, " ReadOnly = %v\n", v.ReadOnly)
|
||||||
fmt.Fprintf(out, " Type = %v\n", v.Type)
|
fmt.Fprintf(out, " Type = %v\n", v.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,10 +176,16 @@ func (m *MountOpt) Set(value string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set writable as the default
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
parts := strings.SplitN(field, "=", 2)
|
parts := strings.SplitN(field, "=", 2)
|
||||||
if len(parts) == 1 && strings.ToLower(parts[0]) == "writable" {
|
if len(parts) == 1 && strings.ToLower(parts[0]) == "readonly" {
|
||||||
mount.Writable = true
|
mount.ReadOnly = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parts) == 1 && strings.ToLower(parts[0]) == "volume-nocopy" {
|
||||||
|
volumeOptions().NoCopy = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,15 +201,16 @@ func (m *MountOpt) Set(value string) error {
|
||||||
mount.Source = value
|
mount.Source = value
|
||||||
case "target":
|
case "target":
|
||||||
mount.Target = value
|
mount.Target = value
|
||||||
case "writable":
|
case "readonly":
|
||||||
mount.Writable, err = strconv.ParseBool(value)
|
ro, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid value for writable: %s", value)
|
return fmt.Errorf("invalid value for readonly: %s", value)
|
||||||
}
|
}
|
||||||
|
mount.ReadOnly = ro
|
||||||
case "bind-propagation":
|
case "bind-propagation":
|
||||||
bindOptions().Propagation = swarm.MountPropagation(strings.ToUpper(value))
|
bindOptions().Propagation = swarm.MountPropagation(strings.ToUpper(value))
|
||||||
case "volume-populate":
|
case "volume-nocopy":
|
||||||
volumeOptions().Populate, err = strconv.ParseBool(value)
|
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid value for populate: %s", value)
|
return fmt.Errorf("invalid value for populate: %s", value)
|
||||||
}
|
}
|
||||||
|
@ -229,6 +236,17 @@ func (m *MountOpt) Set(value string) error {
|
||||||
return fmt.Errorf("target is required")
|
return fmt.Errorf("target is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if mount.VolumeOptions != nil && mount.Source == "" {
|
||||||
|
return fmt.Errorf("source is required when specifying volume-* options")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mount.Type == swarm.MountType("BIND") && mount.VolumeOptions != nil {
|
||||||
|
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", swarm.MountTypeBind)
|
||||||
|
}
|
||||||
|
if mount.Type == swarm.MountType("VOLUME") && mount.BindOptions != nil {
|
||||||
|
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", swarm.MountTypeVolume)
|
||||||
|
}
|
||||||
|
|
||||||
m.values = append(m.values, mount)
|
m.values = append(m.values, mount)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -458,7 +476,7 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
|
||||||
|
|
||||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||||
|
|
||||||
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on_failure, or any)")
|
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)")
|
||||||
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts")
|
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts")
|
||||||
flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up")
|
flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up")
|
||||||
flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy")
|
flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy")
|
||||||
|
@ -469,7 +487,7 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
|
||||||
flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates")
|
flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates")
|
||||||
|
|
||||||
flags.StringSliceVar(&opts.networks, flagNetwork, []string{}, "Network attachments")
|
flags.StringSliceVar(&opts.networks, flagNetwork, []string{}, "Network attachments")
|
||||||
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode(Valid values: vip, dnsrr)")
|
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)")
|
||||||
flags.VarP(&opts.endpoint.ports, flagPublish, "p", "Publish a port as a node port")
|
flags.VarP(&opts.endpoint.ports, flagPublish, "p", "Publish a port as a node port")
|
||||||
|
|
||||||
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to Swarm agents")
|
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to Swarm agents")
|
||||||
|
|
|
@ -111,5 +111,53 @@ func TestMountOptSetErrorInvalidField(t *testing.T) {
|
||||||
|
|
||||||
func TestMountOptSetErrorInvalidWritable(t *testing.T) {
|
func TestMountOptSetErrorInvalidWritable(t *testing.T) {
|
||||||
var mount MountOpt
|
var mount MountOpt
|
||||||
assert.Error(t, mount.Set("type=VOLUME,writable=yes"), "invalid value for writable: yes")
|
assert.Error(t, mount.Set("type=VOLUME,readonly=no"), "invalid value for readonly: no")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMountOptDefaultEnableWritable(t *testing.T) {
|
||||||
|
var m MountOpt
|
||||||
|
assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo"))
|
||||||
|
assert.Equal(t, m.values[0].ReadOnly, false)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly"))
|
||||||
|
assert.Equal(t, m.values[0].ReadOnly, true)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1"))
|
||||||
|
assert.Equal(t, m.values[0].ReadOnly, true)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0"))
|
||||||
|
assert.Equal(t, m.values[0].ReadOnly, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMountOptVolumeNoCopy(t *testing.T) {
|
||||||
|
var m MountOpt
|
||||||
|
assert.Error(t, m.Set("type=volume,target=/foo,volume-nocopy"), "source is required")
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=volume,target=/foo,source=foo"))
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions == nil, true)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true"))
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions != nil, true)
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy"))
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions != nil, true)
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true)
|
||||||
|
|
||||||
|
m = MountOpt{}
|
||||||
|
assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1"))
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions != nil, true)
|
||||||
|
assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMountOptTypeConflict(t *testing.T) {
|
||||||
|
var m MountOpt
|
||||||
|
assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix")
|
||||||
|
assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix")
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
"github.com/docker/docker/api/client"
|
||||||
"github.com/docker/docker/cli"
|
"github.com/docker/docker/cli"
|
||||||
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,7 +78,7 @@ func runServiceScale(dockerCli *client.DockerCli, serviceID string, scale string
|
||||||
}
|
}
|
||||||
serviceMode.Replicated.Replicas = &uintScale
|
serviceMode.Replicated.Replicas = &uintScale
|
||||||
|
|
||||||
err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, nil)
|
err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
"github.com/docker/docker/api/client"
|
||||||
"github.com/docker/docker/api/client/idresolver"
|
"github.com/docker/docker/api/client/idresolver"
|
||||||
|
"github.com/docker/docker/api/client/node"
|
||||||
"github.com/docker/docker/api/client/task"
|
"github.com/docker/docker/api/client/task"
|
||||||
"github.com/docker/docker/cli"
|
"github.com/docker/docker/cli"
|
||||||
"github.com/docker/docker/opts"
|
"github.com/docker/docker/opts"
|
||||||
|
@ -51,9 +52,21 @@ func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
||||||
|
|
||||||
filter := opts.filter.Value()
|
filter := opts.filter.Value()
|
||||||
filter.Add("service", service.ID)
|
filter.Add("service", service.ID)
|
||||||
if !opts.all && !filter.Include("desired_state") {
|
if !opts.all && !filter.Include("desired-state") {
|
||||||
filter.Add("desired_state", string(swarm.TaskStateRunning))
|
filter.Add("desired-state", string(swarm.TaskStateRunning))
|
||||||
filter.Add("desired_state", string(swarm.TaskStateAccepted))
|
filter.Add("desired-state", string(swarm.TaskStateAccepted))
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Include("node") {
|
||||||
|
nodeFilters := filter.Get("node")
|
||||||
|
for _, nodeFilter := range nodeFilters {
|
||||||
|
nodeReference, err := node.Reference(client, ctx, nodeFilter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
filter.Del("node", nodeFilter)
|
||||||
|
filter.Add("node", nodeReference)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/docker/docker/cli"
|
"github.com/docker/docker/cli"
|
||||||
"github.com/docker/docker/opts"
|
"github.com/docker/docker/opts"
|
||||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||||
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/docker/engine-api/types/swarm"
|
"github.com/docker/engine-api/types/swarm"
|
||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
@ -39,7 +40,7 @@ func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error {
|
func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error {
|
||||||
apiClient := dockerCli.Client()
|
apiClient := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
headers := map[string][]string{}
|
updateOpts := types.ServiceUpdateOptions{}
|
||||||
|
|
||||||
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID)
|
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -64,10 +65,10 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID stri
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
headers["X-Registry-Auth"] = []string{encodedAuth}
|
updateOpts.EncodedRegistryAuth = encodedAuth
|
||||||
}
|
}
|
||||||
|
|
||||||
err = apiClient.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, headers)
|
err = apiClient.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, updateOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTopLevelDeployCommand return a command for `docker deploy`
|
// NewTopLevelDeployCommand returns a command for `docker deploy`
|
||||||
func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command {
|
func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
cmd := newDeployCommand(dockerCli)
|
cmd := newDeployCommand(dockerCli)
|
||||||
// Remove the aliases at the top level
|
// Remove the aliases at the top level
|
||||||
|
|
|
@ -184,13 +184,13 @@ func deployServices(
|
||||||
if service, exists := existingServiceMap[name]; exists {
|
if service, exists := existingServiceMap[name]; exists {
|
||||||
fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID)
|
fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID)
|
||||||
|
|
||||||
// TODO(nishanttotla): Pass headers with X-Registry-Auth
|
// TODO(nishanttotla): Pass auth token
|
||||||
if err := apiClient.ServiceUpdate(
|
if err := apiClient.ServiceUpdate(
|
||||||
ctx,
|
ctx,
|
||||||
service.ID,
|
service.ID,
|
||||||
service.Version,
|
service.Version,
|
||||||
serviceSpec,
|
serviceSpec,
|
||||||
nil,
|
types.ServiceUpdateOptions{},
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func deployServices(
|
||||||
fmt.Fprintf(out, "Creating service %s\n", name)
|
fmt.Fprintf(out, "Creating service %s\n", name)
|
||||||
|
|
||||||
// TODO(nishanttotla): Pass headers with X-Registry-Auth
|
// TODO(nishanttotla): Pass headers with X-Registry-Auth
|
||||||
if _, err := apiClient.ServiceCreate(ctx, serviceSpec, nil); err != nil {
|
if _, err := apiClient.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
bundle, err := bundlefile.LoadFile(reader)
|
bundle, err := bundlefile.LoadFile(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -63,6 +63,11 @@ func runRemove(dockerCli *client.DockerCli, opts removeOptions) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(services) == 0 && len(networks) == 0 {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if hasError {
|
if hasError {
|
||||||
return fmt.Errorf("Failed to remove some resources")
|
return fmt.Errorf("Failed to remove some resources")
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
package stack
|
package stack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/docker/docker/api/client"
|
"github.com/docker/docker/api/client"
|
||||||
|
@ -43,14 +45,15 @@ func newTasksCommand(dockerCli *client.DockerCli) *cobra.Command {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
||||||
|
namespace := opts.namespace
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
filter := opts.filter.Value()
|
filter := opts.filter.Value()
|
||||||
filter.Add("label", labelNamespace+"="+opts.namespace)
|
filter.Add("label", labelNamespace+"="+opts.namespace)
|
||||||
if !opts.all && !filter.Include("desired_state") {
|
if !opts.all && !filter.Include("desired-state") {
|
||||||
filter.Add("desired_state", string(swarm.TaskStateRunning))
|
filter.Add("desired-state", string(swarm.TaskStateRunning))
|
||||||
filter.Add("desired_state", string(swarm.TaskStateAccepted))
|
filter.Add("desired-state", string(swarm.TaskStateAccepted))
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter})
|
||||||
|
@ -58,5 +61,10 @@ func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
|
return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve))
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,13 @@ import (
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
generatedSecretEntropyBytes = 16
|
||||||
|
generatedSecretBase = 36
|
||||||
|
// floor(log(2^128-1, 36)) + 1
|
||||||
|
maxGeneratedSecretLength = 25
|
||||||
|
)
|
||||||
|
|
||||||
type initOptions struct {
|
type initOptions struct {
|
||||||
swarmOptions
|
swarmOptions
|
||||||
listenAddr NodeAddrOption
|
listenAddr NodeAddrOption
|
||||||
|
@ -46,6 +53,12 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions
|
||||||
client := dockerCli.Client()
|
client := dockerCli.Client()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// If no secret was specified, we create a random one
|
||||||
|
if !flags.Changed("secret") {
|
||||||
|
opts.secret = generateRandomSecret()
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "No --secret provided. Generated random secret:\n\t%s\n\n", opts.secret)
|
||||||
|
}
|
||||||
|
|
||||||
req := swarm.InitRequest{
|
req := swarm.InitRequest{
|
||||||
ListenAddr: opts.listenAddr.String(),
|
ListenAddr: opts.listenAddr.String(),
|
||||||
ForceNewCluster: opts.forceNewCluster,
|
ForceNewCluster: opts.forceNewCluster,
|
||||||
|
@ -56,6 +69,27 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("Swarm initialized: current node (%s) is now a manager.\n", nodeID)
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID)
|
||||||
|
|
||||||
|
// Fetch CAHash and Address from the API
|
||||||
|
info, err := client.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
node, _, err := client.NodeInspectWithRaw(ctx, nodeID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.ManagerStatus != nil && info.Swarm.CACertHash != "" {
|
||||||
|
var secretArgs string
|
||||||
|
if opts.secret != "" {
|
||||||
|
secretArgs = "--secret " + opts.secret
|
||||||
|
}
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\tdocker swarm join %s \\\n\t--ca-hash %s \\\n\t%s\n", secretArgs, info.Swarm.CACertHash, node.ManagerStatus.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,10 +14,10 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultListenAddr = "0.0.0.0:2377"
|
defaultListenAddr = "0.0.0.0:2377"
|
||||||
// WORKER constant for worker name
|
|
||||||
WORKER = "WORKER"
|
worker = "WORKER"
|
||||||
// MANAGER constant for manager name
|
manager = "MANAGER"
|
||||||
MANAGER = "MANAGER"
|
none = "NONE"
|
||||||
|
|
||||||
flagAutoAccept = "auto-accept"
|
flagAutoAccept = "auto-accept"
|
||||||
flagCertExpiry = "cert-expiry"
|
flagCertExpiry = "cert-expiry"
|
||||||
|
@ -30,8 +30,8 @@ const (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultPolicies = []swarm.Policy{
|
defaultPolicies = []swarm.Policy{
|
||||||
{Role: WORKER, Autoaccept: true},
|
{Role: worker, Autoaccept: true},
|
||||||
{Role: MANAGER, Autoaccept: false},
|
{Role: manager, Autoaccept: false},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -86,40 +86,33 @@ func NewListenAddrOption() NodeAddrOption {
|
||||||
|
|
||||||
// AutoAcceptOption is a value type for auto-accept policy
|
// AutoAcceptOption is a value type for auto-accept policy
|
||||||
type AutoAcceptOption struct {
|
type AutoAcceptOption struct {
|
||||||
values map[string]bool
|
values map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// String prints a string representation of this option
|
// String prints a string representation of this option
|
||||||
func (o *AutoAcceptOption) String() string {
|
func (o *AutoAcceptOption) String() string {
|
||||||
keys := []string{}
|
keys := []string{}
|
||||||
for key, value := range o.values {
|
for key := range o.values {
|
||||||
keys = append(keys, fmt.Sprintf("%s=%v", strings.ToLower(key), value))
|
keys = append(keys, fmt.Sprintf("%s=true", strings.ToLower(key)))
|
||||||
}
|
}
|
||||||
return strings.Join(keys, ", ")
|
return strings.Join(keys, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets a new value on this option
|
// Set sets a new value on this option
|
||||||
func (o *AutoAcceptOption) Set(value string) error {
|
func (o *AutoAcceptOption) Set(acceptValues string) error {
|
||||||
value = strings.ToUpper(value)
|
for _, value := range strings.Split(acceptValues, ",") {
|
||||||
switch value {
|
value = strings.ToUpper(value)
|
||||||
case "", "NONE":
|
switch value {
|
||||||
if accept, ok := o.values[WORKER]; ok && accept {
|
case none, worker, manager:
|
||||||
return fmt.Errorf("value NONE is incompatible with %s", WORKER)
|
o.values[value] = struct{}{}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("must be one / combination of %s, %s; or NONE", worker, manager)
|
||||||
}
|
}
|
||||||
if accept, ok := o.values[MANAGER]; ok && accept {
|
|
||||||
return fmt.Errorf("value NONE is incompatible with %s", MANAGER)
|
|
||||||
}
|
|
||||||
o.values[WORKER] = false
|
|
||||||
o.values[MANAGER] = false
|
|
||||||
case WORKER, MANAGER:
|
|
||||||
if accept, ok := o.values[value]; ok && !accept {
|
|
||||||
return fmt.Errorf("value NONE is incompatible with %s", value)
|
|
||||||
}
|
|
||||||
o.values[value] = true
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("must be one of %s, %s, NONE", WORKER, MANAGER)
|
|
||||||
}
|
}
|
||||||
|
// NONE must stand alone, so if any non-NONE setting exist with it, error with conflict
|
||||||
|
if o.isPresent(none) && len(o.values) > 1 {
|
||||||
|
return fmt.Errorf("value NONE cannot be specified alongside other node types")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,7 +126,11 @@ func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy {
|
||||||
policies := []swarm.Policy{}
|
policies := []swarm.Policy{}
|
||||||
for _, p := range defaultPolicies {
|
for _, p := range defaultPolicies {
|
||||||
if len(o.values) != 0 {
|
if len(o.values) != 0 {
|
||||||
p.Autoaccept = o.values[string(p.Role)]
|
if _, ok := o.values[string(p.Role)]; ok {
|
||||||
|
p.Autoaccept = true
|
||||||
|
} else {
|
||||||
|
p.Autoaccept = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
p.Secret = secret
|
p.Secret = secret
|
||||||
policies = append(policies, p)
|
policies = append(policies, p)
|
||||||
|
@ -141,9 +138,15 @@ func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy {
|
||||||
return policies
|
return policies
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isPresent returns whether the key exists in the set or not
|
||||||
|
func (o *AutoAcceptOption) isPresent(key string) bool {
|
||||||
|
_, c := o.values[key]
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// NewAutoAcceptOption returns a new auto-accept option
|
// NewAutoAcceptOption returns a new auto-accept option
|
||||||
func NewAutoAcceptOption() AutoAcceptOption {
|
func NewAutoAcceptOption() AutoAcceptOption {
|
||||||
return AutoAcceptOption{values: make(map[string]bool)}
|
return AutoAcceptOption{values: make(map[string]struct{})}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExternalCAOption is a Value type for parsing external CA specifications.
|
// ExternalCAOption is a Value type for parsing external CA specifications.
|
||||||
|
@ -237,7 +240,7 @@ func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
|
||||||
|
|
||||||
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
||||||
flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)")
|
flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)")
|
||||||
flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to accept nodes into cluster")
|
flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to join a cluster")
|
||||||
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit")
|
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit")
|
||||||
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period")
|
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period")
|
||||||
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates")
|
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates")
|
||||||
|
|
|
@ -40,35 +40,51 @@ func TestNodeAddrOptionSetInvalidFormat(t *testing.T) {
|
||||||
func TestAutoAcceptOptionSetWorker(t *testing.T) {
|
func TestAutoAcceptOptionSetWorker(t *testing.T) {
|
||||||
opt := NewAutoAcceptOption()
|
opt := NewAutoAcceptOption()
|
||||||
assert.NilError(t, opt.Set("worker"))
|
assert.NilError(t, opt.Set("worker"))
|
||||||
assert.Equal(t, opt.values[WORKER], true)
|
assert.Equal(t, opt.isPresent(worker), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetManager(t *testing.T) {
|
func TestAutoAcceptOptionSetManager(t *testing.T) {
|
||||||
opt := NewAutoAcceptOption()
|
opt := NewAutoAcceptOption()
|
||||||
assert.NilError(t, opt.Set("manager"))
|
assert.NilError(t, opt.Set("manager"))
|
||||||
assert.Equal(t, opt.values[MANAGER], true)
|
assert.Equal(t, opt.isPresent(manager), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetInvalid(t *testing.T) {
|
func TestAutoAcceptOptionSetInvalid(t *testing.T) {
|
||||||
opt := NewAutoAcceptOption()
|
opt := NewAutoAcceptOption()
|
||||||
assert.Error(t, opt.Set("bogus"), "must be one of")
|
assert.Error(t, opt.Set("bogus"), "must be one / combination")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoAcceptOptionSetEmpty(t *testing.T) {
|
||||||
|
opt := NewAutoAcceptOption()
|
||||||
|
assert.Error(t, opt.Set(""), "must be one / combination")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetNone(t *testing.T) {
|
func TestAutoAcceptOptionSetNone(t *testing.T) {
|
||||||
opt := NewAutoAcceptOption()
|
opt := NewAutoAcceptOption()
|
||||||
assert.NilError(t, opt.Set("none"))
|
assert.NilError(t, opt.Set("none"))
|
||||||
assert.Equal(t, opt.values[MANAGER], false)
|
assert.Equal(t, opt.isPresent(manager), false)
|
||||||
assert.Equal(t, opt.values[WORKER], false)
|
assert.Equal(t, opt.isPresent(worker), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoAcceptOptionSetTwo(t *testing.T) {
|
||||||
|
opt := NewAutoAcceptOption()
|
||||||
|
assert.NilError(t, opt.Set("worker,manager"))
|
||||||
|
assert.Equal(t, opt.isPresent(manager), true)
|
||||||
|
assert.Equal(t, opt.isPresent(worker), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionSetConflict(t *testing.T) {
|
func TestAutoAcceptOptionSetConflict(t *testing.T) {
|
||||||
opt := NewAutoAcceptOption()
|
opt := NewAutoAcceptOption()
|
||||||
assert.NilError(t, opt.Set("manager"))
|
assert.Error(t, opt.Set("none,manager"), "value NONE cannot be specified alongside other node types")
|
||||||
assert.Error(t, opt.Set("none"), "value NONE is incompatible with MANAGER")
|
|
||||||
|
|
||||||
opt = NewAutoAcceptOption()
|
opt = NewAutoAcceptOption()
|
||||||
assert.NilError(t, opt.Set("none"))
|
assert.Error(t, opt.Set("none,worker"), "value NONE cannot be specified alongside other node types")
|
||||||
assert.Error(t, opt.Set("worker"), "value NONE is incompatible with WORKER")
|
|
||||||
|
opt = NewAutoAcceptOption()
|
||||||
|
assert.Error(t, opt.Set("worker,none,manager"), "value NONE cannot be specified alongside other node types")
|
||||||
|
|
||||||
|
opt = NewAutoAcceptOption()
|
||||||
|
assert.Error(t, opt.Set("worker,manager,none"), "value NONE cannot be specified alongside other node types")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoAcceptOptionPoliciesDefault(t *testing.T) {
|
func TestAutoAcceptOptionPoliciesDefault(t *testing.T) {
|
||||||
|
@ -78,12 +94,12 @@ func TestAutoAcceptOptionPoliciesDefault(t *testing.T) {
|
||||||
policies := opt.Policies(&secret)
|
policies := opt.Policies(&secret)
|
||||||
assert.Equal(t, len(policies), 2)
|
assert.Equal(t, len(policies), 2)
|
||||||
assert.Equal(t, policies[0], swarm.Policy{
|
assert.Equal(t, policies[0], swarm.Policy{
|
||||||
Role: WORKER,
|
Role: worker,
|
||||||
Autoaccept: true,
|
Autoaccept: true,
|
||||||
Secret: &secret,
|
Secret: &secret,
|
||||||
})
|
})
|
||||||
assert.Equal(t, policies[1], swarm.Policy{
|
assert.Equal(t, policies[1], swarm.Policy{
|
||||||
Role: MANAGER,
|
Role: manager,
|
||||||
Autoaccept: false,
|
Autoaccept: false,
|
||||||
Secret: &secret,
|
Secret: &secret,
|
||||||
})
|
})
|
||||||
|
@ -98,12 +114,12 @@ func TestAutoAcceptOptionPoliciesWithManager(t *testing.T) {
|
||||||
policies := opt.Policies(&secret)
|
policies := opt.Policies(&secret)
|
||||||
assert.Equal(t, len(policies), 2)
|
assert.Equal(t, len(policies), 2)
|
||||||
assert.Equal(t, policies[0], swarm.Policy{
|
assert.Equal(t, policies[0], swarm.Policy{
|
||||||
Role: WORKER,
|
Role: worker,
|
||||||
Autoaccept: false,
|
Autoaccept: false,
|
||||||
Secret: &secret,
|
Secret: &secret,
|
||||||
})
|
})
|
||||||
assert.Equal(t, policies[1], swarm.Policy{
|
assert.Equal(t, policies[1], swarm.Policy{
|
||||||
Role: MANAGER,
|
Role: manager,
|
||||||
Autoaccept: true,
|
Autoaccept: true,
|
||||||
Secret: &secret,
|
Secret: &secret,
|
||||||
})
|
})
|
||||||
|
|
19
api/client/swarm/secret.go
Normal file
19
api/client/swarm/secret.go
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import (
|
||||||
|
cryptorand "crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
func generateRandomSecret() string {
|
||||||
|
var secretBytes [generatedSecretEntropyBytes]byte
|
||||||
|
|
||||||
|
if _, err := cryptorand.Read(secretBytes[:]); err != nil {
|
||||||
|
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
var nn big.Int
|
||||||
|
nn.SetBytes(secretBytes[:])
|
||||||
|
return fmt.Sprintf("%0[1]*s", maxGeneratedSecretLength, nn.Text(generatedSecretBase))
|
||||||
|
}
|
|
@ -47,7 +47,8 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Swarm updated.")
|
fmt.Fprintln(dockerCli.Out(), "Swarm updated.")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s\t%s\t%s\n"
|
psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
type tasksBySlot []swarm.Task
|
type tasksBySlot []swarm.Task
|
||||||
|
@ -69,7 +69,7 @@ func Print(dockerCli *client.DockerCli, ctx context.Context, tasks []swarm.Task,
|
||||||
serviceValue,
|
serviceValue,
|
||||||
task.Spec.ContainerSpec.Image,
|
task.Spec.ContainerSpec.Image,
|
||||||
client.PrettyPrint(task.Status.State),
|
client.PrettyPrint(task.Status.State),
|
||||||
units.HumanDuration(time.Since(task.Status.Timestamp)),
|
strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))),
|
||||||
client.PrettyPrint(task.DesiredState),
|
client.PrettyPrint(task.DesiredState),
|
||||||
nodeValue,
|
nodeValue,
|
||||||
)
|
)
|
||||||
|
|
|
@ -33,7 +33,7 @@ func runRemove(dockerCli *client.DockerCli, volumes []string) error {
|
||||||
status = 1
|
status = 1
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Err(), "%s\n", name)
|
fmt.Fprintf(dockerCli.Out(), "%s\n", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
|
|
|
@ -1,14 +1,18 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"mime"
|
"mime"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/docker/libtrust"
|
"github.com/docker/libtrust"
|
||||||
|
@ -135,7 +139,11 @@ func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||||
}
|
}
|
||||||
if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
|
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||||
}
|
}
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
@ -143,3 +151,19 @@ func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||||
}
|
}
|
||||||
return trustKey, nil
|
return trustKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||||
|
if ext == ".json" || ext == ".jwk" {
|
||||||
|
encoded, err = json.Marshal(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pemBlock, err := key.PEMBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||||
|
}
|
||||||
|
encoded = pem.EncodeToMemory(pemBlock)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
|
@ -40,9 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||||
|
|
||||||
var postForm map[string]interface{}
|
var postForm map[string]interface{}
|
||||||
if err := json.Unmarshal(b, &postForm); err == nil {
|
if err := json.Unmarshal(b, &postForm); err == nil {
|
||||||
if _, exists := postForm["password"]; exists {
|
maskSecretKeys(postForm)
|
||||||
postForm["password"] = "*****"
|
|
||||||
}
|
|
||||||
formStr, errMarshal := json.Marshal(postForm)
|
formStr, errMarshal := json.Marshal(postForm)
|
||||||
if errMarshal == nil {
|
if errMarshal == nil {
|
||||||
logrus.Debugf("form data: %s", string(formStr))
|
logrus.Debugf("form data: %s", string(formStr))
|
||||||
|
@ -54,3 +53,24 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func maskSecretKeys(inp interface{}) {
|
||||||
|
if arr, ok := inp.([]interface{}); ok {
|
||||||
|
for _, f := range arr {
|
||||||
|
maskSecretKeys(f)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if form, ok := inp.(map[string]interface{}); ok {
|
||||||
|
loop0:
|
||||||
|
for k, v := range form {
|
||||||
|
for _, m := range []string{"password", "secret"} {
|
||||||
|
if strings.EqualFold(m, k) {
|
||||||
|
form[k] = "*****"
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maskSecretKeys(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -32,17 +32,17 @@ type copyBackend interface {
|
||||||
|
|
||||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||||
type stateBackend interface {
|
type stateBackend interface {
|
||||||
ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
|
ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error)
|
||||||
ContainerKill(name string, sig uint64) error
|
ContainerKill(name string, sig uint64) error
|
||||||
ContainerPause(name string) error
|
ContainerPause(name string) error
|
||||||
ContainerRename(oldName, newName string) error
|
ContainerRename(oldName, newName string) error
|
||||||
ContainerResize(name string, height, width int) error
|
ContainerResize(name string, height, width int) error
|
||||||
ContainerRestart(name string, seconds int) error
|
ContainerRestart(name string, seconds int) error
|
||||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||||
ContainerStart(name string, hostConfig *container.HostConfig) error
|
ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error
|
||||||
ContainerStop(name string, seconds int) error
|
ContainerStop(name string, seconds int) error
|
||||||
ContainerUnpause(name string) error
|
ContainerUnpause(name string) error
|
||||||
ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error)
|
ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error)
|
||||||
ContainerWait(name string, timeout time.Duration) (int, error)
|
ContainerWait(name string, timeout time.Duration) (int, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,10 +132,10 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||||
// including r.TransferEncoding
|
// including r.TransferEncoding
|
||||||
// allow a nil body for backwards compatibility
|
// allow a nil body for backwards compatibility
|
||||||
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
var hostConfig *container.HostConfig
|
var hostConfig *container.HostConfig
|
||||||
// A non-nil json object is at least 7 characters.
|
// A non-nil json object is at least 7 characters.
|
||||||
if r.ContentLength > 7 || r.ContentLength == -1 {
|
if r.ContentLength > 7 || r.ContentLength == -1 {
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||||
return validationError{fmt.Errorf("starting container with HostConfig was deprecated since v1.10 and removed in v1.12")}
|
return validationError{fmt.Errorf("starting container with HostConfig was deprecated since v1.10 and removed in v1.12")}
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,8 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||||
hostConfig = c
|
hostConfig = c
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil {
|
validateHostname := versions.GreaterThanOrEqualTo(version, "1.24")
|
||||||
|
if err := s.backend.ContainerStart(vars["name"], hostConfig, validateHostname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -311,6 +312,7 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
var updateConfig container.UpdateConfig
|
var updateConfig container.UpdateConfig
|
||||||
|
|
||||||
decoder := json.NewDecoder(r.Body)
|
decoder := json.NewDecoder(r.Body)
|
||||||
|
@ -324,7 +326,8 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
|
||||||
}
|
}
|
||||||
|
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
warnings, err := s.backend.ContainerUpdate(name, hostConfig)
|
validateHostname := versions.GreaterThanOrEqualTo(version, "1.24")
|
||||||
|
warnings, err := s.backend.ContainerUpdate(name, hostConfig, validateHostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -351,13 +354,14 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
adjustCPUShares := versions.LessThan(version, "1.19")
|
adjustCPUShares := versions.LessThan(version, "1.19")
|
||||||
|
|
||||||
|
validateHostname := versions.GreaterThanOrEqualTo(version, "1.24")
|
||||||
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
|
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
|
||||||
Name: name,
|
Name: name,
|
||||||
Config: config,
|
Config: config,
|
||||||
HostConfig: hostConfig,
|
HostConfig: hostConfig,
|
||||||
NetworkingConfig: networkingConfig,
|
NetworkingConfig: networkingConfig,
|
||||||
AdjustCPUShares: adjustCPUShares,
|
AdjustCPUShares: adjustCPUShares,
|
||||||
})
|
}, validateHostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,6 +81,10 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := n.clusterProvider.GetNetwork(create.Name); err == nil {
|
||||||
|
return libnetwork.NetworkNameError(create.Name)
|
||||||
|
}
|
||||||
|
|
||||||
nw, err := n.backend.CreateNetwork(create)
|
nw, err := n.backend.CreateNetwork(create)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||||
|
|
|
@ -116,7 +116,7 @@ type Backend interface {
|
||||||
// ContainerAttachRaw attaches to container.
|
// ContainerAttachRaw attaches to container.
|
||||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
|
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
|
||||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
// ContainerCreate creates a new Docker container and returns potential warnings
|
||||||
ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
|
ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error)
|
||||||
// ContainerRm removes a container specified by `id`.
|
// ContainerRm removes a container specified by `id`.
|
||||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||||
// Commit creates a new Docker image from an existing Docker container.
|
// Commit creates a new Docker image from an existing Docker container.
|
||||||
|
@ -124,7 +124,7 @@ type Backend interface {
|
||||||
// ContainerKill stops the container execution abruptly.
|
// ContainerKill stops the container execution abruptly.
|
||||||
ContainerKill(containerID string, sig uint64) error
|
ContainerKill(containerID string, sig uint64) error
|
||||||
// ContainerStart starts a new container
|
// ContainerStart starts a new container
|
||||||
ContainerStart(containerID string, hostConfig *container.HostConfig) error
|
ContainerStart(containerID string, hostConfig *container.HostConfig, validateHostname bool) error
|
||||||
// ContainerWait stops processing until the given container is stopped.
|
// ContainerWait stops processing until the given container is stopped.
|
||||||
ContainerWait(containerID string, timeout time.Duration) (int, error)
|
ContainerWait(containerID string, timeout time.Duration) (int, error)
|
||||||
// ContainerUpdateCmdOnBuild updates container.Path and container.Args
|
// ContainerUpdateCmdOnBuild updates container.Path and container.Args
|
||||||
|
|
|
@ -181,7 +181,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig})
|
container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ func (b *Builder) processImageFrom(img builder.Image) error {
|
||||||
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be comitted.
|
||||||
onBuildTriggers := b.runConfig.OnBuild
|
onBuildTriggers := b.runConfig.OnBuild
|
||||||
b.runConfig.OnBuild = []string{}
|
b.runConfig.OnBuild = []string{}
|
||||||
|
|
||||||
|
@ -508,7 +508,7 @@ func (b *Builder) create() (string, error) {
|
||||||
c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
|
c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
|
||||||
Config: b.runConfig,
|
Config: b.runConfig,
|
||||||
HostConfig: hostConfig,
|
HostConfig: hostConfig,
|
||||||
})
|
}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -552,7 +552,7 @@ func (b *Builder) run(cID string) (err error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := b.docker.ContainerStart(cID, nil); err != nil {
|
if err := b.docker.ContainerStart(cID, nil, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NoArgs validate args and returns an error if there are any args
|
// NoArgs validates args and returns an error if there are any args
|
||||||
func NoArgs(cmd *cobra.Command, args []string) error {
|
func NoArgs(cmd *cobra.Command, args []string) error {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -195,7 +195,7 @@ func (s *State) ExitCode() int {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetExitCode set current exitcode for the state. Take lock before if state
|
// SetExitCode sets current exitcode for the state. Take lock before if state
|
||||||
// may be shared.
|
// may be shared.
|
||||||
func (s *State) SetExitCode(ec int) {
|
func (s *State) SetExitCode(ec int) {
|
||||||
s.exitCode = ec
|
s.exitCode = ec
|
||||||
|
@ -214,7 +214,7 @@ func (s *State) SetRunning(pid int, initial bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStoppedLocking locks the container state is sets it to "stopped".
|
// SetStoppedLocking locks the container state and sets it to "stopped".
|
||||||
func (s *State) SetStoppedLocking(exitStatus *ExitStatus) {
|
func (s *State) SetStoppedLocking(exitStatus *ExitStatus) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
s.SetStopped(exitStatus)
|
s.SetStopped(exitStatus)
|
||||||
|
@ -290,7 +290,7 @@ func (s *State) SetRemovalInProgress() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetRemovalInProgress make the RemovalInProgress state to false.
|
// ResetRemovalInProgress makes the RemovalInProgress state to false.
|
||||||
func (s *State) ResetRemovalInProgress() {
|
func (s *State) ResetRemovalInProgress() {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
s.RemovalInProgress = false
|
s.RemovalInProgress = false
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
# setting environment variables.
|
# setting environment variables.
|
||||||
#
|
#
|
||||||
# DOCKER_COMPLETION_SHOW_NETWORK_IDS
|
# DOCKER_COMPLETION_SHOW_NETWORK_IDS
|
||||||
|
# DOCKER_COMPLETION_SHOW_NODE_IDS
|
||||||
|
# DOCKER_COMPLETION_SHOW_SERVICE_IDS
|
||||||
# "no" - Show names only (default)
|
# "no" - Show names only (default)
|
||||||
# "yes" - Show names and ids
|
# "yes" - Show names and ids
|
||||||
#
|
#
|
||||||
|
@ -197,53 +199,84 @@ __docker_complete_runtimes() {
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") )
|
COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") )
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Returns a list of all nodes. Additional arguments to `docker node`
|
||||||
|
# may be specified in order to filter the node list, e.g.
|
||||||
|
# `__docker_nodes --filter role=manager`
|
||||||
|
# By default, only node names are completed.
|
||||||
|
# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs.
|
||||||
|
# An optional first argument `--id|--name` may be used to limit
|
||||||
|
# the output to the IDs or names of matching nodes. This setting takes
|
||||||
|
# precedence over the environment setting.
|
||||||
__docker_nodes() {
|
__docker_nodes() {
|
||||||
local fields='$1,$2' # node names & IDs
|
local fields='$2' # default: node name only
|
||||||
__docker_q node ls | sed -e 's/\*//g' | awk "NR>1 {print $fields}"
|
[ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name
|
||||||
|
|
||||||
|
if [ "$1" = "--id" ] ; then
|
||||||
|
fields='$1' # IDs only
|
||||||
|
shift
|
||||||
|
elif [ "$1" = "--name" ] ; then
|
||||||
|
fields='$2' # names only
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Applies completion of nodes based on the current value of `$cur` or
|
||||||
|
# the value of the optional first argument `--cur`, if given.
|
||||||
|
# Additional filters may be appended, see `__docker_nodes`.
|
||||||
__docker_complete_nodes() {
|
__docker_complete_nodes() {
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_nodes $1)" -- "$cur") )
|
local current=$cur
|
||||||
|
if [ "$1" = "--cur" ] ; then
|
||||||
|
current="$2"
|
||||||
|
shift 2
|
||||||
|
fi
|
||||||
|
COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") )
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker_complete_nodes_plus_self() {
|
__docker_complete_nodes_plus_self() {
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_nodes $1) self" -- "$cur") )
|
__docker_complete_nodes "$@"
|
||||||
}
|
COMPREPLY+=( self )
|
||||||
|
|
||||||
__docker_pending_nodes() {
|
|
||||||
local fields='$1' # node ID
|
|
||||||
__docker_q node ls --filter membership=pending | awk "NR>1 {print $fields}"
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker_complete_pending_nodes() {
|
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_pending_nodes $1)" -- "$cur") )
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker_manager_nodes() {
|
|
||||||
local fields='$1,$2' # node names & IDs
|
|
||||||
__docker_q node ls --filter role=manager | awk "NR>1 {print $fields}"
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker_complete_manager_nodes() {
|
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_manager_nodes $1)" -- "$cur") )
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker_worker_nodes() {
|
|
||||||
local fields='$1,$2' # node names & IDs
|
|
||||||
__docker_q node ls --filter role=worker | awk "NR>1 {print $fields}"
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker_complete_worker_nodes() {
|
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_worker_nodes $1)" -- "$cur") )
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Returns a list of all services. Additional arguments to `docker service ls`
|
||||||
|
# may be specified in order to filter the service list, e.g.
|
||||||
|
# `__docker_services --filter name=xxx`
|
||||||
|
# By default, only node names are completed.
|
||||||
|
# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete service IDs.
|
||||||
|
# An optional first argument `--id|--name` may be used to limit
|
||||||
|
# the output to the IDs or names of matching services. This setting takes
|
||||||
|
# precedence over the environment setting.
|
||||||
__docker_services() {
|
__docker_services() {
|
||||||
local fields='$1,$2' # service names & IDs
|
local fields='$2' # default: service name only
|
||||||
__docker_q service ls | awk "NR>1 {print $fields}"
|
[ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name
|
||||||
|
|
||||||
|
if [ "$1" = "--id" ] ; then
|
||||||
|
fields='$1' # IDs only
|
||||||
|
shift
|
||||||
|
elif [ "$1" = "--name" ] ; then
|
||||||
|
fields='$2' # names only
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
__docker_q service ls "$@" | awk "NR>1 {print $fields}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Applies completion of services based on the current value of `$cur` or
|
||||||
|
# the value of the optional first argument `--cur`, if given.
|
||||||
|
# Additional filters may be appended, see `__docker_services`.
|
||||||
__docker_complete_services() {
|
__docker_complete_services() {
|
||||||
COMPREPLY=( $(compgen -W "$(__docker_services $1)" -- "$cur") )
|
local current=$cur
|
||||||
|
if [ "$1" = "--cur" ] ; then
|
||||||
|
current="$2"
|
||||||
|
shift 2
|
||||||
|
fi
|
||||||
|
COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") )
|
||||||
|
}
|
||||||
|
|
||||||
|
# Appends the word passed as an argument to every word in `$COMPREPLY`.
|
||||||
|
# Normally you do this with `compgen -S`. This function exists so that you can use
|
||||||
|
# the __docker_complete_XXX functions in cases where you need a suffix.
|
||||||
|
__docker_append_to_completions() {
|
||||||
|
COMPREPLY=( ${COMPREPLY[@]/%/"$1"} )
|
||||||
}
|
}
|
||||||
|
|
||||||
# Finds the position of the first word that is neither option nor an option's argument.
|
# Finds the position of the first word that is neither option nor an option's argument.
|
||||||
|
@ -1579,15 +1612,29 @@ _docker_service_list() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_service_ls() {
|
_docker_service_ls() {
|
||||||
|
local key=$(__docker_map_key_of_current_option '--filter|-f')
|
||||||
|
case "$key" in
|
||||||
|
id)
|
||||||
|
__docker_complete_services --cur "${cur##*=}" --id
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
name)
|
||||||
|
__docker_complete_services --cur "${cur##*=}" --name
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
case "$prev" in
|
case "$prev" in
|
||||||
--format|-f)
|
--filter|-f)
|
||||||
|
COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) )
|
||||||
|
__docker_nospace
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
case "$cur" in
|
case "$cur" in
|
||||||
-*)
|
-*)
|
||||||
COMPREPLY=( $( compgen -W "-f --filter --help --quiet -q" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -1612,15 +1659,30 @@ _docker_service_scale() {
|
||||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
COMPREPLY=( $(compgen -S "=" -W "$(__docker_services $1)" -- "$cur") )
|
__docker_complete_services
|
||||||
|
__docker_append_to_completions "="
|
||||||
__docker_nospace
|
__docker_nospace
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_service_tasks() {
|
_docker_service_tasks() {
|
||||||
|
local key=$(__docker_map_key_of_current_option '--filter|-f')
|
||||||
|
case "$key" in
|
||||||
|
desired-state)
|
||||||
|
COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) )
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
name)
|
||||||
|
__docker_complete_services --cur "${cur##*=}" --name
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
case "$prev" in
|
case "$prev" in
|
||||||
--format|-f)
|
--filter|-f)
|
||||||
|
COMPREPLY=( $( compgen -W "desired-state id name" -S = -- "$cur" ) )
|
||||||
|
__docker_nospace
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -1630,7 +1692,11 @@ _docker_service_tasks() {
|
||||||
COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve -n" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve -n" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_complete_services
|
local counter=$(__docker_pos_first_nonflag '--filter|-f')
|
||||||
|
if [ $cword -eq $counter ]; then
|
||||||
|
__docker_complete_services
|
||||||
|
fi
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1701,7 +1767,7 @@ _docker_service_update() {
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
--restart-condition)
|
--restart-condition)
|
||||||
COMPREPLY=( $( compgen -W "any none on_failure" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) )
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
--user|-u)
|
--user|-u)
|
||||||
|
@ -1750,7 +1816,13 @@ _docker_swarm_init() {
|
||||||
COMPREPLY=( $( compgen -W "manager none worker" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "manager none worker" -- "$cur" ) )
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
--listen-addr|--secret)
|
--listen-addr)
|
||||||
|
if [[ $cur == *: ]] ; then
|
||||||
|
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )
|
||||||
|
fi
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
--secret)
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -1778,7 +1850,13 @@ _docker_swarm_inspect() {
|
||||||
|
|
||||||
_docker_swarm_join() {
|
_docker_swarm_join() {
|
||||||
case "$prev" in
|
case "$prev" in
|
||||||
--ca-hash|--listen-addr|--secret)
|
--ca-hash|--secret)
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
--listen-addr)
|
||||||
|
if [[ $cur == *: ]] ; then
|
||||||
|
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )
|
||||||
|
fi
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -1787,6 +1865,9 @@ _docker_swarm_join() {
|
||||||
-*)
|
-*)
|
||||||
COMPREPLY=( $( compgen -W "--ca-hash --help --listen-addr --manager --secret" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--ca-hash --help --listen-addr --manager --secret" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
|
*:)
|
||||||
|
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1845,7 +1926,7 @@ _docker_node_accept() {
|
||||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_complete_pending_nodes
|
__docker_complete_nodes --id --filter membership=pending
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1855,7 +1936,7 @@ _docker_node_demote() {
|
||||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_complete_manager_nodes
|
__docker_complete_nodes --filter role=manager
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1880,8 +1961,22 @@ _docker_node_list() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_node_ls() {
|
_docker_node_ls() {
|
||||||
|
local key=$(__docker_map_key_of_current_option '--filter|-f')
|
||||||
|
case "$key" in
|
||||||
|
id)
|
||||||
|
__docker_complete_nodes --cur "${cur##*=}" --id
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
name)
|
||||||
|
__docker_complete_nodes --cur "${cur##*=}" --name
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
case "$prev" in
|
case "$prev" in
|
||||||
--filter|-f)
|
--filter|-f)
|
||||||
|
COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) )
|
||||||
|
__docker_nospace
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -1899,7 +1994,7 @@ _docker_node_promote() {
|
||||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_complete_worker_nodes
|
__docker_complete_nodes --filter role=worker
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1918,8 +2013,22 @@ _docker_node_rm() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_node_tasks() {
|
_docker_node_tasks() {
|
||||||
|
local key=$(__docker_map_key_of_current_option '--filter|-f')
|
||||||
|
case "$key" in
|
||||||
|
desired-state)
|
||||||
|
COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) )
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
name)
|
||||||
|
__docker_complete_services --cur "${cur##*=}" --name
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
case "$prev" in
|
case "$prev" in
|
||||||
--filter|-f)
|
--filter|-f)
|
||||||
|
COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) )
|
||||||
|
__docker_nospace
|
||||||
return
|
return
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
@ -1929,7 +2038,11 @@ _docker_node_tasks() {
|
||||||
COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve -n" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve -n" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_complete_nodes_plus_self
|
local counter=$(__docker_pos_first_nonflag '--filter|-f')
|
||||||
|
if [ $cword -eq $counter ]; then
|
||||||
|
__docker_complete_nodes_plus_self
|
||||||
|
fi
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -299,6 +299,17 @@ __docker_complete_pid() {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__docker_complete_runtimes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
emulate -L zsh
|
||||||
|
setopt extendedglob
|
||||||
|
local -a runtimes_opts
|
||||||
|
runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}})
|
||||||
|
_describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0
|
||||||
|
}
|
||||||
|
|
||||||
__docker_complete_ps_filters() {
|
__docker_complete_ps_filters() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
integer ret=1
|
integer ret=1
|
||||||
|
@ -630,6 +641,602 @@ __docker_network_subcommand() {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# BO node
|
||||||
|
|
||||||
|
__docker_node_complete_ls_filters() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
if compset -P '*='; then
|
||||||
|
case "${${words[-1]%=*}#*=}" in
|
||||||
|
(id)
|
||||||
|
__docker_complete_nodes_ids && ret=0
|
||||||
|
;;
|
||||||
|
(membership)
|
||||||
|
membership_opts=('accepted' 'pending' 'rejected')
|
||||||
|
_describe -t membership-opts "membership options" membership_opts && ret=0
|
||||||
|
;;
|
||||||
|
(name)
|
||||||
|
__docker_complete_nodes_names && ret=0
|
||||||
|
;;
|
||||||
|
(role)
|
||||||
|
role_opts=('manager' 'worker')
|
||||||
|
_describe -t role-opts "role options" role_opts && ret=0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_message 'value' && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
opts=('id' 'label' 'membership' 'name' 'role')
|
||||||
|
_describe -t filter-opts "filter options" opts -qS "=" && ret=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_node_complete_tasks_filters() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
if compset -P '*='; then
|
||||||
|
case "${${words[-1]%=*}#*=}" in
|
||||||
|
(desired-state)
|
||||||
|
state_opts=('accepted' 'running')
|
||||||
|
_describe -t state-opts "desired state options" state_opts && ret=0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_message 'value' && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
opts=('desired-state' 'id' 'label' 'name')
|
||||||
|
_describe -t filter-opts "filter options" opts -qS "=" && ret=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_nodes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
local line s
|
||||||
|
declare -a lines nodes args
|
||||||
|
|
||||||
|
type=$1; shift
|
||||||
|
filter=$1; shift
|
||||||
|
[[ $filter != "none" ]] && args=("-f $filter")
|
||||||
|
|
||||||
|
lines=(${(f)"$(_call_program commands docker $docker_options node ls $args)"})
|
||||||
|
|
||||||
|
# Parse header line to find columns
|
||||||
|
local i=1 j=1 k header=${lines[1]}
|
||||||
|
declare -A begin end
|
||||||
|
while (( j < ${#header} - 1 )); do
|
||||||
|
i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
|
||||||
|
j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
|
||||||
|
k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
|
||||||
|
begin[${header[$i,$((j-1))]}]=$i
|
||||||
|
end[${header[$i,$((j-1))]}]=$k
|
||||||
|
done
|
||||||
|
end[${header[$i,$((j-1))]}]=-1
|
||||||
|
lines=(${lines[2,-1]})
|
||||||
|
|
||||||
|
# Node ID
|
||||||
|
if [[ $type = (ids|all) ]]; then
|
||||||
|
for line in $lines; do
|
||||||
|
s="${line[${begin[ID]},${end[ID]}]%% ##}"
|
||||||
|
nodes=($nodes $s)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Names
|
||||||
|
if [[ $type = (names|all) ]]; then
|
||||||
|
for line in $lines; do
|
||||||
|
s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
|
||||||
|
nodes=($nodes $s)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
_describe -t nodes-list "nodes" nodes "$@" && ret=0
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_nodes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes all none "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_nodes_ids() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes ids none "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_nodes_names() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes names none "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_pending_nodes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes all "membership=pending" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_manager_nodes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes all "role=manager" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_worker_nodes() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_nodes all "role=worker" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_node_commands() {
|
||||||
|
local -a _docker_node_subcommands
|
||||||
|
_docker_node_subcommands=(
|
||||||
|
"accept:Accept a node in the swarm"
|
||||||
|
"demote:Demote a node as manager in the swarm"
|
||||||
|
"inspect:Display detailed information on one or more nodes"
|
||||||
|
"ls:List nodes in the swarm"
|
||||||
|
"promote:Promote a node as manager in the swarm"
|
||||||
|
"rm:Remove a node from the swarm"
|
||||||
|
"tasks:List tasks running on a node"
|
||||||
|
"update:Update a node"
|
||||||
|
)
|
||||||
|
_describe -t docker-node-commands "docker node command" _docker_node_subcommands
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_node_subcommand() {
|
||||||
|
local -a _command_args opts_help
|
||||||
|
local expl help="--help"
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
opts_help=("(: -)--help[Print usage]")
|
||||||
|
|
||||||
|
case "$words[1]" in
|
||||||
|
(accept|rm|remove)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)*:node:__docker_complete_pending_nodes" && ret=0
|
||||||
|
;;
|
||||||
|
(demote)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)*:node:__docker_complete_manager_nodes" && ret=0
|
||||||
|
;;
|
||||||
|
(inspect)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
|
||||||
|
"($help -p --pretty)"{-p,--pretty}"[Print the information in a human friendly format]" \
|
||||||
|
"($help -)*:node:__docker_complete_nodes" && ret=0
|
||||||
|
;;
|
||||||
|
(ls|list)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \
|
||||||
|
"($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0
|
||||||
|
case $state in
|
||||||
|
(filter-options)
|
||||||
|
__docker_node_complete_ls_filters && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
(promote)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)*:node:__docker_complete_worker_nodes" && ret=0
|
||||||
|
;;
|
||||||
|
(tasks)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -a --all)"{-a,--all}"[Display all instances]" \
|
||||||
|
"($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \
|
||||||
|
"($help -n --no-resolve)"{-n,--no-resolve}"[Do not map IDs to Names]" \
|
||||||
|
"($help -)1:node:__docker_complete_nodes" && ret=0
|
||||||
|
case $state in
|
||||||
|
(filter-options)
|
||||||
|
__docker_node_complete_tasks_filters && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
(update)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)--availability=[Availability of the node]:availability:(active pause drain)" \
|
||||||
|
"($help)--membership=[Membership of the node]:membership:(accepted rejected)" \
|
||||||
|
"($help)--role=[Role of the node]:role:(manager worker)" \
|
||||||
|
"($help -)1:node:__docker_complete_nodes" && ret=0
|
||||||
|
;;
|
||||||
|
(help)
|
||||||
|
_arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# EO node
|
||||||
|
|
||||||
|
# BO plugin
|
||||||
|
|
||||||
|
__docker_complete_plugins() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
local line s
|
||||||
|
declare -a lines plugins
|
||||||
|
|
||||||
|
lines=(${(f)"$(_call_program commands docker $docker_options plugin ls)"})
|
||||||
|
|
||||||
|
# Parse header line to find columns
|
||||||
|
local i=1 j=1 k header=${lines[1]}
|
||||||
|
declare -A begin end
|
||||||
|
while (( j < ${#header} - 1 )); do
|
||||||
|
i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
|
||||||
|
j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
|
||||||
|
k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
|
||||||
|
begin[${header[$i,$((j-1))]}]=$i
|
||||||
|
end[${header[$i,$((j-1))]}]=$k
|
||||||
|
done
|
||||||
|
end[${header[$i,$((j-1))]}]=-1
|
||||||
|
lines=(${lines[2,-1]})
|
||||||
|
|
||||||
|
# Name
|
||||||
|
for line in $lines; do
|
||||||
|
s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
|
||||||
|
s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}"
|
||||||
|
plugins=($plugins $s)
|
||||||
|
done
|
||||||
|
|
||||||
|
_describe -t plugins-list "plugins" plugins "$@" && ret=0
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_plugin_commands() {
|
||||||
|
local -a _docker_plugin_subcommands
|
||||||
|
_docker_plugin_subcommands=(
|
||||||
|
"disable:Disable a plugin"
|
||||||
|
"enable:Enable a plugin"
|
||||||
|
"inspect:Return low-level information about a plugin"
|
||||||
|
"install:Install a plugin"
|
||||||
|
"ls:List plugins"
|
||||||
|
"push:Push a plugin"
|
||||||
|
"rm:Remove a plugin"
|
||||||
|
"set:Change settings for a plugin"
|
||||||
|
)
|
||||||
|
_describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_plugin_subcommand() {
|
||||||
|
local -a _command_args opts_help
|
||||||
|
local expl help="--help"
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
opts_help=("(: -)--help[Print usage]")
|
||||||
|
|
||||||
|
case "$words[1]" in
|
||||||
|
(disable|enable|inspect|install|ls|push|rm)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)1:plugin:__docker_complete_plugins" && ret=0
|
||||||
|
;;
|
||||||
|
(set)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)1:plugin:__docker_complete_plugins" \
|
||||||
|
"($help-)*:key=value: " && ret=0
|
||||||
|
;;
|
||||||
|
(help)
|
||||||
|
_arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# EO plugin
|
||||||
|
|
||||||
|
# BO service
|
||||||
|
|
||||||
|
__docker_service_complete_ls_filters() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
if compset -P '*='; then
|
||||||
|
case "${${words[-1]%=*}#*=}" in
|
||||||
|
(id)
|
||||||
|
__docker_complete_services_ids && ret=0
|
||||||
|
;;
|
||||||
|
(name)
|
||||||
|
__docker_complete_services_names && ret=0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_message 'value' && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
opts=('id' 'label' 'name')
|
||||||
|
_describe -t filter-opts "filter options" opts -qS "=" && ret=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_service_complete_tasks_filters() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
if compset -P '*='; then
|
||||||
|
case "${${words[-1]%=*}#*=}" in
|
||||||
|
(desired-state)
|
||||||
|
state_opts=('accepted' 'running')
|
||||||
|
_describe -t state-opts "desired state options" state_opts && ret=0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
_message 'value' && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
opts=('desired-state' 'id' 'label' 'name')
|
||||||
|
_describe -t filter-opts "filter options" opts -qS "=" && ret=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_services() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
integer ret=1
|
||||||
|
local line s
|
||||||
|
declare -a lines services
|
||||||
|
|
||||||
|
type=$1; shift
|
||||||
|
|
||||||
|
lines=(${(f)"$(_call_program commands docker $docker_options service ls)"})
|
||||||
|
|
||||||
|
# Parse header line to find columns
|
||||||
|
local i=1 j=1 k header=${lines[1]}
|
||||||
|
declare -A begin end
|
||||||
|
while (( j < ${#header} - 1 )); do
|
||||||
|
i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
|
||||||
|
j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
|
||||||
|
k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
|
||||||
|
begin[${header[$i,$((j-1))]}]=$i
|
||||||
|
end[${header[$i,$((j-1))]}]=$k
|
||||||
|
done
|
||||||
|
end[${header[$i,$((j-1))]}]=-1
|
||||||
|
lines=(${lines[2,-1]})
|
||||||
|
|
||||||
|
# Service ID
|
||||||
|
if [[ $type = (ids|all) ]]; then
|
||||||
|
for line in $lines; do
|
||||||
|
s="${line[${begin[ID]},${end[ID]}]%% ##}"
|
||||||
|
s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}"
|
||||||
|
services=($services $s)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Names
|
||||||
|
if [[ $type = (names|all) ]]; then
|
||||||
|
for line in $lines; do
|
||||||
|
s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
|
||||||
|
s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}"
|
||||||
|
services=($services $s)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
_describe -t services-list "services" services "$@" && ret=0
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_services() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_services all "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_services_ids() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_services ids "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_complete_services_names() {
|
||||||
|
[[ $PREFIX = -* ]] && return 1
|
||||||
|
__docker_services names "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_service_commands() {
|
||||||
|
local -a _docker_service_subcommands
|
||||||
|
_docker_service_subcommands=(
|
||||||
|
"create:Create a new service"
|
||||||
|
"inspect:Display detailed information on one or more services"
|
||||||
|
"ls:List services"
|
||||||
|
"rm:Remove a service"
|
||||||
|
"scale:Scale one or multiple services"
|
||||||
|
"tasks:List the tasks of a service"
|
||||||
|
"update:Update a service"
|
||||||
|
)
|
||||||
|
_describe -t docker-service-commands "docker service command" _docker_service_subcommands
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_service_subcommand() {
|
||||||
|
local -a _command_args opts_help opts_create_update
|
||||||
|
local expl help="--help"
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
opts_help=("(: -)--help[Print usage]")
|
||||||
|
opts_create_update=(
|
||||||
|
"($help)*--constraint=[Placement constraints]:constraint: "
|
||||||
|
"($help)--endpoint-mode=[Placement constraints]:mode:(VIP DNSRR)"
|
||||||
|
"($help)*"{-e=,--env=}"[Set environment variables]:env: "
|
||||||
|
"($help)*--label=[Service labels]:label: "
|
||||||
|
"($help)--limit-cpu=[Limit CPUs]:value: "
|
||||||
|
"($help)--limit-memory=[Limit Memory]:value: "
|
||||||
|
"($help)--mode=[Limit Memory]:mode:(global replicated)"
|
||||||
|
"($help)*"{-m=,--mount=}"[Attach a mount to the service]:mount: "
|
||||||
|
"($help)--name=[Service name]:name: "
|
||||||
|
"($help)*--network=[Network attachments]:network: "
|
||||||
|
"($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: "
|
||||||
|
"($help)--replicas=[Number of tasks]:replicas: "
|
||||||
|
"($help)--reserve-cpu=[Reserve CPUs]:value: "
|
||||||
|
"($help)--reserve-memory=[Reserve Memory]:value: "
|
||||||
|
"($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)"
|
||||||
|
"($help)--restart-delay=[Delay between restart attempts]:delay: "
|
||||||
|
"($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: "
|
||||||
|
"($help)--restart-window=[Window used to evaluate the restart policy]:window: "
|
||||||
|
"($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: "
|
||||||
|
"($help)--update-delay=[Delay between updates]:delay: "
|
||||||
|
"($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: "
|
||||||
|
"($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users"
|
||||||
|
"($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories"
|
||||||
|
)
|
||||||
|
|
||||||
|
case "$words[1]" in
|
||||||
|
(create)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
$opts_create_update \
|
||||||
|
"($help -): :__docker_images" \
|
||||||
|
"($help -):command: _command_names -e" \
|
||||||
|
"($help -)*::arguments: _normal" && ret=0
|
||||||
|
;;
|
||||||
|
(inspect)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
|
||||||
|
"($help -p --pretty)"{-p,--pretty}"[Print the information in a human friendly format]" \
|
||||||
|
"($help -)*:service:__docker_complete_services" && ret=0
|
||||||
|
;;
|
||||||
|
(ls|list)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \
|
||||||
|
"($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0
|
||||||
|
case $state in
|
||||||
|
(filter-options)
|
||||||
|
__docker_service_complete_ls_filters && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
(rm|remove)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)*:service:__docker_complete_services" && ret=0
|
||||||
|
;;
|
||||||
|
(scale)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -)*:service:->values" && ret=0
|
||||||
|
case $state in
|
||||||
|
(values)
|
||||||
|
if compset -P '*='; then
|
||||||
|
_message 'replicas' && ret=0
|
||||||
|
else
|
||||||
|
__docker_complete_services -qS "="
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
(tasks)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -a --all)"{-a,--all}"[Display all tasks]" \
|
||||||
|
"($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \
|
||||||
|
"($help -n --no-resolve)"{-n,--no-resolve}"[Do not map IDs to Names]" \
|
||||||
|
"($help -)1:service:__docker_complete_services" && ret=0
|
||||||
|
case $state in
|
||||||
|
(filter-options)
|
||||||
|
__docker_service_complete_tasks_filters && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
(update)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
$opts_create_update \
|
||||||
|
"($help)--arg=[Service command args]:arguments: _normal" \
|
||||||
|
"($help)--command=[Service command]:command: _command_names -e" \
|
||||||
|
"($help)--image=[Service image tag]:image:__docker_repositories" \
|
||||||
|
"($help -)1:service:__docker_complete_services" && ret=0
|
||||||
|
;;
|
||||||
|
(help)
|
||||||
|
_arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# EO service
|
||||||
|
|
||||||
|
# BO swarm
|
||||||
|
|
||||||
|
__docker_swarm_commands() {
|
||||||
|
local -a _docker_swarm_subcommands
|
||||||
|
_docker_swarm_subcommands=(
|
||||||
|
"init:Initialize a Swarm"
|
||||||
|
"inspect:Inspect the Swarm"
|
||||||
|
"join:Join a Swarm as a node and/or manager"
|
||||||
|
"leave:Leave a Swarm"
|
||||||
|
"update:Update the Swarm"
|
||||||
|
)
|
||||||
|
_describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_swarm_subcommand() {
|
||||||
|
local -a _command_args opts_help
|
||||||
|
local expl help="--help"
|
||||||
|
integer ret=1
|
||||||
|
|
||||||
|
opts_help=("(: -)--help[Print usage]")
|
||||||
|
|
||||||
|
case "$words[1]" in
|
||||||
|
(init)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)--auto-accept=[Acceptance policy]:policy:(manager none worker)" \
|
||||||
|
"($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \
|
||||||
|
"($help)--force-new-cluster[Force create a new cluster from current state]" \
|
||||||
|
"($help)--listen-addr[Listen address]:ip\:port: " \
|
||||||
|
"($help)--secret[Set secret value needed to accept nodes into cluster]:secret: " && ret=0
|
||||||
|
;;
|
||||||
|
(inspect)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0
|
||||||
|
;;
|
||||||
|
(join)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)--ca-hash=[Hash of the Root Certificate Authority certificate used for trusted join]:hash: " \
|
||||||
|
"($help)--listen-addr[Listen address]:ip\:port: " \
|
||||||
|
"($help)--manager[Try joining as a manager]" \
|
||||||
|
"($help)--secret[Secret for node acceptance]:secret: " \
|
||||||
|
"($help -):host\:port: " && ret=0
|
||||||
|
;;
|
||||||
|
(leave)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help && ret=0
|
||||||
|
;;
|
||||||
|
(update)
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help)--auto-accept=[Acceptance policy]:policy:(manager none worker)" \
|
||||||
|
"($help)--cert-expiry=[Validity period for node certificates]:duration: " \
|
||||||
|
"($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \
|
||||||
|
"($help)--secret[Set secret value needed to accept nodes into cluster]:secret: " \
|
||||||
|
"($help)--task-history-limit[Task history retention limit]:limit: " && ret=0
|
||||||
|
;;
|
||||||
|
(help)
|
||||||
|
_arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# EO swarm
|
||||||
|
|
||||||
__docker_volume_complete_ls_filters() {
|
__docker_volume_complete_ls_filters() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
integer ret=1
|
integer ret=1
|
||||||
|
@ -933,6 +1540,7 @@ __docker_subcommand() {
|
||||||
(daemon)
|
(daemon)
|
||||||
_arguments $(__docker_arguments) \
|
_arguments $(__docker_arguments) \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
|
"($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \
|
||||||
"($help)--api-cors-header=[CORS headers in the remote API]:CORS headers: " \
|
"($help)--api-cors-header=[CORS headers in the remote API]:CORS headers: " \
|
||||||
"($help)*--authorization-plugin=[Authorization plugins to load]" \
|
"($help)*--authorization-plugin=[Authorization plugins to load]" \
|
||||||
"($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
|
"($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
|
||||||
|
@ -1162,6 +1770,23 @@ __docker_subcommand() {
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
(node)
|
||||||
|
local curcontext="$curcontext" state
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -): :->command" \
|
||||||
|
"($help -)*:: :->option-or-argument" && ret=0
|
||||||
|
|
||||||
|
case $state in
|
||||||
|
(command)
|
||||||
|
__docker_node_commands && ret=0
|
||||||
|
;;
|
||||||
|
(option-or-argument)
|
||||||
|
curcontext=${curcontext%:*:*}:docker-${words[-1]}:
|
||||||
|
__docker_node_subcommand && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
(pause|unpause)
|
(pause|unpause)
|
||||||
_arguments $(__docker_arguments) \
|
_arguments $(__docker_arguments) \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
|
@ -1251,6 +1876,7 @@ __docker_subcommand() {
|
||||||
"($help)--health-timeout=[Maximum time to allow one check to run]:time: " \
|
"($help)--health-timeout=[Maximum time to allow one check to run]:time: " \
|
||||||
"($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \
|
"($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \
|
||||||
"($help)--rm[Remove intermediate containers when it exits]" \
|
"($help)--rm[Remove intermediate containers when it exits]" \
|
||||||
|
"($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \
|
||||||
"($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
|
"($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
|
||||||
"($help)--stop-signal=[Signal to kill a container]:signal:_signals" \
|
"($help)--stop-signal=[Signal to kill a container]:signal:_signals" \
|
||||||
"($help)--storage-opt=[Set storage driver options per container]:storage options:->storage-opt" \
|
"($help)--storage-opt=[Set storage driver options per container]:storage options:->storage-opt" \
|
||||||
|
@ -1297,6 +1923,23 @@ __docker_subcommand() {
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
(service)
|
||||||
|
local curcontext="$curcontext" state
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -): :->command" \
|
||||||
|
"($help -)*:: :->option-or-argument" && ret=0
|
||||||
|
|
||||||
|
case $state in
|
||||||
|
(command)
|
||||||
|
__docker_service_commands && ret=0
|
||||||
|
;;
|
||||||
|
(option-or-argument)
|
||||||
|
curcontext=${curcontext%:*:*}:docker-${words[-1]}:
|
||||||
|
__docker_service_subcommand && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
(start)
|
(start)
|
||||||
_arguments $(__docker_arguments) \
|
_arguments $(__docker_arguments) \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
|
@ -1312,6 +1955,23 @@ __docker_subcommand() {
|
||||||
"($help)--no-stream[Disable streaming stats and only pull the first result]" \
|
"($help)--no-stream[Disable streaming stats and only pull the first result]" \
|
||||||
"($help -)*:containers:__docker_runningcontainers" && ret=0
|
"($help -)*:containers:__docker_runningcontainers" && ret=0
|
||||||
;;
|
;;
|
||||||
|
(swarm)
|
||||||
|
local curcontext="$curcontext" state
|
||||||
|
_arguments $(__docker_arguments) \
|
||||||
|
$opts_help \
|
||||||
|
"($help -): :->command" \
|
||||||
|
"($help -)*:: :->option-or-argument" && ret=0
|
||||||
|
|
||||||
|
case $state in
|
||||||
|
(command)
|
||||||
|
__docker_swarm_commands && ret=0
|
||||||
|
;;
|
||||||
|
(option-or-argument)
|
||||||
|
curcontext=${curcontext%:*:*}:docker-${words[-1]}:
|
||||||
|
__docker_swarm_subcommand && ret=0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
(tag)
|
(tag)
|
||||||
_arguments $(__docker_arguments) \
|
_arguments $(__docker_arguments) \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
|
|
|
@ -11,8 +11,10 @@ Type=notify
|
||||||
# for containers run by docker
|
# for containers run by docker
|
||||||
ExecStart=/usr/bin/dockerd -H fd://
|
ExecStart=/usr/bin/dockerd -H fd://
|
||||||
ExecReload=/bin/kill -s HUP $MAINPID
|
ExecReload=/bin/kill -s HUP $MAINPID
|
||||||
LimitNOFILE=1048576
|
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||||
LimitNPROC=1048576
|
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||||
|
LimitNOFILE=infinity
|
||||||
|
LimitNPROC=infinity
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
# Uncomment TasksMax if your systemd version supports it.
|
# Uncomment TasksMax if your systemd version supports it.
|
||||||
# Only systemd 226 and above support this version.
|
# Only systemd 226 and above support this version.
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/runconfig"
|
"github.com/docker/docker/runconfig"
|
||||||
apitypes "github.com/docker/engine-api/types"
|
apitypes "github.com/docker/engine-api/types"
|
||||||
|
"github.com/docker/engine-api/types/filters"
|
||||||
types "github.com/docker/engine-api/types/swarm"
|
types "github.com/docker/engine-api/types/swarm"
|
||||||
swarmagent "github.com/docker/swarmkit/agent"
|
swarmagent "github.com/docker/swarmkit/agent"
|
||||||
swarmapi "github.com/docker/swarmkit/api"
|
swarmapi "github.com/docker/swarmkit/api"
|
||||||
|
@ -411,18 +412,18 @@ func (c *Cluster) Leave(force bool) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if active && reachable-2 <= unreachable {
|
if active && reachable-2 <= unreachable {
|
||||||
if reachable == 1 && unreachable == 0 {
|
if reachable == 1 && unreachable == 0 {
|
||||||
msg += "Leaving last manager will remove all current state of the cluster. Use `--force` to ignore this message. "
|
msg += "Removing the last manager will erase all current state of the cluster. Use `--force` to ignore this message. "
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return fmt.Errorf(msg)
|
return fmt.Errorf(msg)
|
||||||
}
|
}
|
||||||
msg += fmt.Sprintf("Leaving cluster will leave you with %v managers out of %v. This means Raft quorum will be lost and your cluster will become inaccessible. ", reachable-1, reachable+unreachable)
|
msg += fmt.Sprintf("Leaving the cluster will leave you with %v managers out of %v. This means Raft quorum will be lost and your cluster will become inaccessible. ", reachable-1, reachable+unreachable)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
msg += "Doing so may lose the consensus of your cluster. "
|
msg += "Doing so may lose the consensus of your cluster. "
|
||||||
}
|
}
|
||||||
|
|
||||||
msg += "Only way to restore a cluster that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to ignore this message."
|
msg += "The only way to restore a cluster that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to ignore this message."
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return fmt.Errorf(msg)
|
return fmt.Errorf(msg)
|
||||||
}
|
}
|
||||||
|
@ -653,7 +654,7 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var services []types.Service
|
services := []types.Service{}
|
||||||
|
|
||||||
for _, service := range r.Services {
|
for _, service := range r.Services {
|
||||||
services = append(services, convert.ServiceFromGRPC(*service))
|
services = append(services, convert.ServiceFromGRPC(*service))
|
||||||
|
@ -724,6 +725,13 @@ func (c *Cluster) UpdateService(serviceID string, version uint64, spec types.Ser
|
||||||
return c.errNoManager()
|
return c.errNoManager()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx := c.getRequestContext()
|
||||||
|
|
||||||
|
err := populateNetworkID(ctx, c.client, &spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
|
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -884,7 +892,33 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
|
||||||
return nil, c.errNoManager()
|
return nil, c.errNoManager()
|
||||||
}
|
}
|
||||||
|
|
||||||
filters, err := newListTasksFilters(options.Filter)
|
byName := func(filter filters.Args) error {
|
||||||
|
if filter.Include("service") {
|
||||||
|
serviceFilters := filter.Get("service")
|
||||||
|
for _, serviceFilter := range serviceFilters {
|
||||||
|
service, err := c.GetService(serviceFilter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
filter.Del("service", serviceFilter)
|
||||||
|
filter.Add("service", service.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if filter.Include("node") {
|
||||||
|
nodeFilters := filter.Get("node")
|
||||||
|
for _, nodeFilter := range nodeFilters {
|
||||||
|
node, err := c.GetNode(nodeFilter)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
filter.Del("node", nodeFilter)
|
||||||
|
filter.Add("node", node.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filters, err := newListTasksFilters(options.Filter, byName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1031,7 +1065,7 @@ func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*s
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(rl.Networks); l > 1 {
|
if l := len(rl.Networks); l > 1 {
|
||||||
return nil, fmt.Errorf("network %s is ambigious (%d matches found)", input, l)
|
return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rl.Networks[0], nil
|
return rl.Networks[0], nil
|
||||||
|
|
|
@ -26,7 +26,7 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
|
||||||
Target: m.Target,
|
Target: m.Target,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
|
Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
|
||||||
Writable: m.Writable,
|
ReadOnly: m.ReadOnly,
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.BindOptions != nil {
|
if m.BindOptions != nil {
|
||||||
|
@ -37,8 +37,8 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.VolumeOptions != nil {
|
||||||
mount.VolumeOptions = &types.VolumeOptions{
|
mount.VolumeOptions = &types.VolumeOptions{
|
||||||
Populate: m.VolumeOptions.Populate,
|
NoCopy: m.VolumeOptions.NoCopy,
|
||||||
Labels: m.VolumeOptions.Labels,
|
Labels: m.VolumeOptions.Labels,
|
||||||
}
|
}
|
||||||
if m.VolumeOptions.DriverConfig != nil {
|
if m.VolumeOptions.DriverConfig != nil {
|
||||||
mount.VolumeOptions.DriverConfig = &types.Driver{
|
mount.VolumeOptions.DriverConfig = &types.Driver{
|
||||||
|
@ -77,7 +77,7 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
|
||||||
mount := swarmapi.Mount{
|
mount := swarmapi.Mount{
|
||||||
Target: m.Target,
|
Target: m.Target,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Writable: m.Writable,
|
ReadOnly: m.ReadOnly,
|
||||||
}
|
}
|
||||||
|
|
||||||
if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
|
if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
|
||||||
|
@ -98,8 +98,8 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.VolumeOptions != nil {
|
||||||
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
|
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
|
||||||
Populate: m.VolumeOptions.Populate,
|
NoCopy: m.VolumeOptions.NoCopy,
|
||||||
Labels: m.VolumeOptions.Labels,
|
Labels: m.VolumeOptions.Labels,
|
||||||
}
|
}
|
||||||
if m.VolumeOptions.DriverConfig != nil {
|
if m.VolumeOptions.DriverConfig != nil {
|
||||||
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
|
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
|
||||||
|
|
|
@ -219,7 +219,8 @@ func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error
|
||||||
var rp *swarmapi.RestartPolicy
|
var rp *swarmapi.RestartPolicy
|
||||||
if p != nil {
|
if p != nil {
|
||||||
rp = &swarmapi.RestartPolicy{}
|
rp = &swarmapi.RestartPolicy{}
|
||||||
if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[strings.ToUpper(string(p.Condition))]; ok {
|
sanatizedCondition := strings.ToUpper(strings.Replace(string(p.Condition), "-", "_", -1))
|
||||||
|
if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[sanatizedCondition]; ok {
|
||||||
rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
|
rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
|
||||||
} else if string(p.Condition) == "" {
|
} else if string(p.Condition) == "" {
|
||||||
rp.Condition = swarmapi.RestartOnAny
|
rp.Condition = swarmapi.RestartOnAny
|
||||||
|
|
|
@ -136,13 +136,13 @@ func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolic
|
||||||
hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0)
|
hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0)
|
||||||
hashs[*p.Secret] = hashPwd
|
hashs[*p.Secret] = hashPwd
|
||||||
}
|
}
|
||||||
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
|
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
||||||
Data: hashPwd,
|
Data: hashPwd,
|
||||||
Alg: "bcrypt",
|
Alg: "bcrypt",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one.
|
} else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one.
|
||||||
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
|
policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
||||||
Data: oldSecret.Data,
|
Data: oldSecret.Data,
|
||||||
Alg: oldSecret.Alg,
|
Alg: oldSecret.Alg,
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolic
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret {
|
func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret {
|
||||||
if oldSpec == nil {
|
if oldSpec == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,13 @@ package executor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
clustertypes "github.com/docker/docker/daemon/cluster/provider"
|
clustertypes "github.com/docker/docker/daemon/cluster/provider"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
"github.com/docker/engine-api/types/container"
|
"github.com/docker/engine-api/types/container"
|
||||||
|
"github.com/docker/engine-api/types/events"
|
||||||
|
"github.com/docker/engine-api/types/filters"
|
||||||
"github.com/docker/engine-api/types/network"
|
"github.com/docker/engine-api/types/network"
|
||||||
"github.com/docker/libnetwork/cluster"
|
"github.com/docker/libnetwork/cluster"
|
||||||
networktypes "github.com/docker/libnetwork/types"
|
networktypes "github.com/docker/libnetwork/types"
|
||||||
|
@ -18,8 +21,8 @@ type Backend interface {
|
||||||
DeleteManagedNetwork(name string) error
|
DeleteManagedNetwork(name string) error
|
||||||
SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error
|
SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error
|
||||||
PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
||||||
CreateManagedContainer(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
|
CreateManagedContainer(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error)
|
||||||
ContainerStart(name string, hostConfig *container.HostConfig) error
|
ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error
|
||||||
ContainerStop(name string, seconds int) error
|
ContainerStop(name string, seconds int) error
|
||||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||||
UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
|
UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
|
||||||
|
@ -33,4 +36,6 @@ type Backend interface {
|
||||||
SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
|
SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
|
||||||
SetClusterProvider(provider cluster.Provider)
|
SetClusterProvider(provider cluster.Provider)
|
||||||
IsSwarmCompatible() error
|
IsSwarmCompatible() error
|
||||||
|
SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{})
|
||||||
|
UnsubscribeFromEvents(listener chan interface{})
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,10 +7,14 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/api/server/httputils"
|
||||||
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
|
"github.com/docker/engine-api/types/events"
|
||||||
|
"github.com/docker/engine-api/types/versions"
|
||||||
"github.com/docker/libnetwork"
|
"github.com/docker/libnetwork"
|
||||||
"github.com/docker/swarmkit/api"
|
"github.com/docker/swarmkit/api"
|
||||||
"github.com/docker/swarmkit/log"
|
"github.com/docker/swarmkit/log"
|
||||||
|
@ -115,13 +119,16 @@ func (c *containerAdapter) removeNetworks(ctx context.Context) error {
|
||||||
func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error {
|
func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error {
|
||||||
var cr types.ContainerCreateResponse
|
var cr types.ContainerCreateResponse
|
||||||
var err error
|
var err error
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
validateHostname := versions.GreaterThanOrEqualTo(version, "1.24")
|
||||||
|
|
||||||
if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{
|
if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{
|
||||||
Name: c.container.name(),
|
Name: c.container.name(),
|
||||||
Config: c.container.config(),
|
Config: c.container.config(),
|
||||||
HostConfig: c.container.hostConfig(),
|
HostConfig: c.container.hostConfig(),
|
||||||
// Use the first network in container create
|
// Use the first network in container create
|
||||||
NetworkingConfig: c.container.createNetworkingConfig(),
|
NetworkingConfig: c.container.createNetworkingConfig(),
|
||||||
}); err != nil {
|
}, validateHostname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +152,9 @@ func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backe
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerAdapter) start(ctx context.Context) error {
|
func (c *containerAdapter) start(ctx context.Context) error {
|
||||||
return c.backend.ContainerStart(c.container.name(), nil)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
validateHostname := versions.GreaterThanOrEqualTo(version, "1.24")
|
||||||
|
return c.backend.ContainerStart(c.container.name(), nil, validateHostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
|
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
|
||||||
|
@ -161,9 +170,40 @@ func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, er
|
||||||
|
|
||||||
// events issues a call to the events API and returns a channel with all
|
// events issues a call to the events API and returns a channel with all
|
||||||
// events. The stream of events can be shutdown by cancelling the context.
|
// events. The stream of events can be shutdown by cancelling the context.
|
||||||
//
|
func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
|
||||||
// A chan struct{} is returned that will be closed if the event processing
|
log.G(ctx).Debugf("waiting on events")
|
||||||
// fails and needs to be restarted.
|
buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
|
||||||
|
eventsq := make(chan events.Message, len(buffer))
|
||||||
|
|
||||||
|
for _, event := range buffer {
|
||||||
|
eventsq <- event
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer c.backend.UnsubscribeFromEvents(l)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ev := <-l:
|
||||||
|
jev, ok := ev.(events.Message)
|
||||||
|
if !ok {
|
||||||
|
log.G(ctx).Warnf("unexpected event message: %q", ev)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case eventsq <- jev:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return eventsq
|
||||||
|
}
|
||||||
|
|
||||||
func (c *containerAdapter) wait(ctx context.Context) error {
|
func (c *containerAdapter) wait(ctx context.Context) error {
|
||||||
return c.backend.ContainerWaitWithContext(ctx, c.container.name())
|
return c.backend.ContainerWaitWithContext(ctx, c.container.name())
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,8 @@ import (
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
enginecontainer "github.com/docker/engine-api/types/container"
|
enginecontainer "github.com/docker/engine-api/types/container"
|
||||||
|
"github.com/docker/engine-api/types/events"
|
||||||
|
"github.com/docker/engine-api/types/filters"
|
||||||
"github.com/docker/engine-api/types/network"
|
"github.com/docker/engine-api/types/network"
|
||||||
"github.com/docker/swarmkit/agent/exec"
|
"github.com/docker/swarmkit/agent/exec"
|
||||||
"github.com/docker/swarmkit/api"
|
"github.com/docker/swarmkit/api"
|
||||||
|
@ -88,21 +90,6 @@ func (c *containerConfig) image() string {
|
||||||
return reference.WithDefaultTag(ref).String()
|
return reference.WithDefaultTag(ref).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerConfig) volumes() map[string]struct{} {
|
|
||||||
r := make(map[string]struct{})
|
|
||||||
|
|
||||||
for _, mount := range c.spec().Mounts {
|
|
||||||
// pick off all the volume mounts.
|
|
||||||
if mount.Type != api.MountTypeVolume {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
r[fmt.Sprintf("%s:%s", mount.Target, getMountMask(&mount))] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *containerConfig) config() *enginecontainer.Config {
|
func (c *containerConfig) config() *enginecontainer.Config {
|
||||||
config := &enginecontainer.Config{
|
config := &enginecontainer.Config{
|
||||||
Labels: c.labels(),
|
Labels: c.labels(),
|
||||||
|
@ -160,26 +147,67 @@ func (c *containerConfig) labels() map[string]string {
|
||||||
return labels
|
return labels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerConfig) bindMounts() []string {
|
// volumes gets placed into the Volumes field on the containerConfig.
|
||||||
var r []string
|
func (c *containerConfig) volumes() map[string]struct{} {
|
||||||
|
r := make(map[string]struct{})
|
||||||
for _, val := range c.spec().Mounts {
|
// Volumes *only* creates anonymous volumes. The rest is mixed in with
|
||||||
mask := getMountMask(&val)
|
// binds, which aren't actually binds. Basically, any volume that
|
||||||
if val.Type == api.MountTypeBind {
|
// results in a single component must be added here.
|
||||||
r = append(r, fmt.Sprintf("%s:%s:%s", val.Source, val.Target, mask))
|
//
|
||||||
|
// This is reversed engineered from the behavior of the engine API.
|
||||||
|
for _, mount := range c.spec().Mounts {
|
||||||
|
if mount.Type == api.MountTypeVolume && mount.Source == "" {
|
||||||
|
r[mount.Target] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *containerConfig) tmpfs() map[string]string {
|
||||||
|
r := make(map[string]string)
|
||||||
|
|
||||||
|
for _, spec := range c.spec().Mounts {
|
||||||
|
if spec.Type != api.MountTypeTmpfs {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r[spec.Target] = getMountMask(&spec)
|
||||||
|
}
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *containerConfig) binds() []string {
|
||||||
|
var r []string
|
||||||
|
for _, mount := range c.spec().Mounts {
|
||||||
|
if mount.Type == api.MountTypeBind || (mount.Type == api.MountTypeVolume && mount.Source != "") {
|
||||||
|
spec := fmt.Sprintf("%s:%s", mount.Source, mount.Target)
|
||||||
|
mask := getMountMask(&mount)
|
||||||
|
if mask != "" {
|
||||||
|
spec = fmt.Sprintf("%s:%s", spec, mask)
|
||||||
|
}
|
||||||
|
r = append(r, spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func getMountMask(m *api.Mount) string {
|
func getMountMask(m *api.Mount) string {
|
||||||
maskOpts := []string{"ro"}
|
var maskOpts []string
|
||||||
if m.Writable {
|
if m.ReadOnly {
|
||||||
maskOpts[0] = "rw"
|
maskOpts = append(maskOpts, "ro")
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.BindOptions != nil {
|
switch m.Type {
|
||||||
|
case api.MountTypeVolume:
|
||||||
|
if m.VolumeOptions != nil && m.VolumeOptions.NoCopy {
|
||||||
|
maskOpts = append(maskOpts, "nocopy")
|
||||||
|
}
|
||||||
|
case api.MountTypeBind:
|
||||||
|
if m.BindOptions == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
switch m.BindOptions.Propagation {
|
switch m.BindOptions.Propagation {
|
||||||
case api.MountPropagationPrivate:
|
case api.MountPropagationPrivate:
|
||||||
maskOpts = append(maskOpts, "private")
|
maskOpts = append(maskOpts, "private")
|
||||||
|
@ -194,21 +222,66 @@ func getMountMask(m *api.Mount) string {
|
||||||
case api.MountPropagationRSlave:
|
case api.MountPropagationRSlave:
|
||||||
maskOpts = append(maskOpts, "rslave")
|
maskOpts = append(maskOpts, "rslave")
|
||||||
}
|
}
|
||||||
}
|
case api.MountTypeTmpfs:
|
||||||
|
if m.TmpfsOptions == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.TmpfsOptions.Mode != 0 {
|
||||||
if !m.VolumeOptions.Populate {
|
maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode))
|
||||||
maskOpts = append(maskOpts, "nocopy")
|
}
|
||||||
|
|
||||||
|
if m.TmpfsOptions.SizeBytes != 0 {
|
||||||
|
// calculate suffix here, making this linux specific, but that is
|
||||||
|
// okay, since API is that way anyways.
|
||||||
|
|
||||||
|
// we do this by finding the suffix that divides evenly into the
|
||||||
|
// value, returing the value itself, with no suffix, if it fails.
|
||||||
|
//
|
||||||
|
// For the most part, we don't enforce any semantic to this values.
|
||||||
|
// The operating system will usually align this and enforce minimum
|
||||||
|
// and maximums.
|
||||||
|
var (
|
||||||
|
size = m.TmpfsOptions.SizeBytes
|
||||||
|
suffix string
|
||||||
|
)
|
||||||
|
for _, r := range []struct {
|
||||||
|
suffix string
|
||||||
|
divisor int64
|
||||||
|
}{
|
||||||
|
{"g", 1 << 30},
|
||||||
|
{"m", 1 << 20},
|
||||||
|
{"k", 1 << 10},
|
||||||
|
} {
|
||||||
|
if size%r.divisor == 0 {
|
||||||
|
size = size / r.divisor
|
||||||
|
suffix = r.suffix
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Join(maskOpts, ",")
|
return strings.Join(maskOpts, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
|
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
|
||||||
return &enginecontainer.HostConfig{
|
hc := &enginecontainer.HostConfig{
|
||||||
Resources: c.resources(),
|
Resources: c.resources(),
|
||||||
Binds: c.bindMounts(),
|
Binds: c.binds(),
|
||||||
|
Tmpfs: c.tmpfs(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.task.LogDriver != nil {
|
||||||
|
hc.LogConfig = enginecontainer.LogConfig{
|
||||||
|
Type: c.task.LogDriver.Name,
|
||||||
|
Config: c.task.LogDriver.Options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hc
|
||||||
}
|
}
|
||||||
|
|
||||||
// This handles the case of volumes that are defined inside a service Mount
|
// This handles the case of volumes that are defined inside a service Mount
|
||||||
|
@ -421,3 +494,11 @@ func (c *containerConfig) networkCreateRequest(name string) (clustertypes.Networ
|
||||||
|
|
||||||
return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
|
return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c containerConfig) eventFilter() filters.Args {
|
||||||
|
filter := filters.NewArgs()
|
||||||
|
filter.Add("type", events.ContainerEventType)
|
||||||
|
filter.Add("name", c.name())
|
||||||
|
filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID))
|
||||||
|
return filter
|
||||||
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
|
"github.com/docker/engine-api/types/events"
|
||||||
"github.com/docker/swarmkit/agent/exec"
|
"github.com/docker/swarmkit/agent/exec"
|
||||||
"github.com/docker/swarmkit/api"
|
"github.com/docker/swarmkit/api"
|
||||||
"github.com/docker/swarmkit/log"
|
"github.com/docker/swarmkit/log"
|
||||||
|
@ -150,20 +151,39 @@ func (r *controller) Wait(pctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(pctx)
|
ctx, cancel := context.WithCancel(pctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
healthErr := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
ectx, cancel := context.WithCancel(ctx) // cancel event context on first event
|
||||||
|
defer cancel()
|
||||||
|
if err := r.checkHealth(ectx); err == ErrContainerUnhealthy {
|
||||||
|
healthErr <- ErrContainerUnhealthy
|
||||||
|
if err := r.Shutdown(ectx); err != nil {
|
||||||
|
log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
err := r.adapter.wait(ctx)
|
err := r.adapter.wait(ctx)
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ee := &exitError{}
|
ee := &exitError{}
|
||||||
if err.Error() != "" {
|
|
||||||
ee.cause = err
|
|
||||||
}
|
|
||||||
if ec, ok := err.(exec.ExitCoder); ok {
|
if ec, ok := err.(exec.ExitCoder); ok {
|
||||||
ee.code = ec.ExitCode()
|
ee.code = ec.ExitCode()
|
||||||
}
|
}
|
||||||
|
select {
|
||||||
|
case e := <-healthErr:
|
||||||
|
ee.cause = e
|
||||||
|
default:
|
||||||
|
if err.Error() != "" {
|
||||||
|
ee.cause = err
|
||||||
|
}
|
||||||
|
}
|
||||||
return ee
|
return ee
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,6 +267,21 @@ func (r *controller) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *controller) matchevent(event events.Message) bool {
|
||||||
|
if event.Type != events.ContainerEventType {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Filter based on ID matching, in addition to name.
|
||||||
|
|
||||||
|
// Make sure the events are for this container.
|
||||||
|
if event.Actor.Attributes["name"] != r.adapter.container.name() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (r *controller) checkClosed() error {
|
func (r *controller) checkClosed() error {
|
||||||
select {
|
select {
|
||||||
case <-r.closed:
|
case <-r.closed:
|
||||||
|
@ -286,3 +321,26 @@ func (e *exitError) ExitCode() int {
|
||||||
func (e *exitError) Cause() error {
|
func (e *exitError) Cause() error {
|
||||||
return e.cause
|
return e.cause
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkHealth blocks until unhealthy container is detected or ctx exits
|
||||||
|
func (r *controller) checkHealth(ctx context.Context) error {
|
||||||
|
eventq := r.adapter.events(ctx)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
case <-r.closed:
|
||||||
|
return nil
|
||||||
|
case event := <-eventq:
|
||||||
|
if !r.matchevent(event) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch event.Action {
|
||||||
|
case "health_status: unhealthy":
|
||||||
|
return ErrContainerUnhealthy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -9,4 +9,7 @@ var (
|
||||||
// ErrContainerDestroyed returned when a container is prematurely destroyed
|
// ErrContainerDestroyed returned when a container is prematurely destroyed
|
||||||
// during a wait call.
|
// during a wait call.
|
||||||
ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed")
|
ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed")
|
||||||
|
|
||||||
|
// ErrContainerUnhealthy returned if controller detects the health check failure
|
||||||
|
ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container")
|
||||||
)
|
)
|
||||||
|
|
102
daemon/cluster/executor/container/health_test.go
Normal file
102
daemon/cluster/executor/container/health_test.go
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/container"
|
||||||
|
"github.com/docker/docker/daemon"
|
||||||
|
"github.com/docker/docker/daemon/events"
|
||||||
|
containertypes "github.com/docker/engine-api/types/container"
|
||||||
|
"github.com/docker/swarmkit/api"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealthStates(t *testing.T) {
|
||||||
|
|
||||||
|
// set up environment: events, task, container ....
|
||||||
|
e := events.New()
|
||||||
|
_, l, _ := e.Subscribe()
|
||||||
|
defer e.Evict(l)
|
||||||
|
|
||||||
|
task := &api.Task{
|
||||||
|
ID: "id",
|
||||||
|
ServiceID: "sid",
|
||||||
|
Spec: api.TaskSpec{
|
||||||
|
Runtime: &api.TaskSpec_Container{
|
||||||
|
Container: &api.ContainerSpec{
|
||||||
|
Image: "image_name",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"com.docker.swarm.task.id": "id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Annotations: api.Annotations{Name: "name"},
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &container.Container{
|
||||||
|
CommonContainer: container.CommonContainer{
|
||||||
|
ID: "id",
|
||||||
|
Name: "name",
|
||||||
|
Config: &containertypes.Config{
|
||||||
|
Image: "image_name",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"com.docker.swarm.task.id": "id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
daemon := &daemon.Daemon{
|
||||||
|
EventsService: e,
|
||||||
|
}
|
||||||
|
|
||||||
|
controller, err := newController(daemon, task)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create controller fail %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// fire checkHealth
|
||||||
|
go func() {
|
||||||
|
err := controller.checkHealth(ctx)
|
||||||
|
select {
|
||||||
|
case errChan <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// send an event and expect to get expectedErr
|
||||||
|
// if expectedErr is nil, shouldn't get any error
|
||||||
|
logAndExpect := func(msg string, expectedErr error) {
|
||||||
|
daemon.LogContainerEvent(c, msg)
|
||||||
|
|
||||||
|
timer := time.NewTimer(1 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errChan:
|
||||||
|
if err != expectedErr {
|
||||||
|
t.Fatalf("expect error %v, but get %v", expectedErr, err)
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
if expectedErr != nil {
|
||||||
|
t.Fatalf("time limit exceeded, didn't get expected error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// events that are ignored by checkHealth
|
||||||
|
logAndExpect("health_status: running", nil)
|
||||||
|
logAndExpect("health_status: healthy", nil)
|
||||||
|
logAndExpect("die", nil)
|
||||||
|
|
||||||
|
// unhealthy event will be caught by checkHealth
|
||||||
|
logAndExpect("health_status: unhealthy", ErrContainerUnhealthy)
|
||||||
|
}
|
|
@ -61,18 +61,23 @@ func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filters, error) {
|
func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) {
|
||||||
accepted := map[string]bool{
|
accepted := map[string]bool{
|
||||||
"name": true,
|
"name": true,
|
||||||
"id": true,
|
"id": true,
|
||||||
"label": true,
|
"label": true,
|
||||||
"service": true,
|
"service": true,
|
||||||
"node": true,
|
"node": true,
|
||||||
"desired_state": true,
|
"desired-state": true,
|
||||||
}
|
}
|
||||||
if err := filter.Validate(accepted); err != nil {
|
if err := filter.Validate(accepted); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if transformFunc != nil {
|
||||||
|
if err := transformFunc(filter); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
f := &swarmapi.ListTasksRequest_Filters{
|
f := &swarmapi.ListTasksRequest_Filters{
|
||||||
Names: filter.Get("name"),
|
Names: filter.Get("name"),
|
||||||
IDPrefixes: filter.Get("id"),
|
IDPrefixes: filter.Get("id"),
|
||||||
|
@ -81,11 +86,11 @@ func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filter
|
||||||
NodeIDs: filter.Get("node"),
|
NodeIDs: filter.Get("node"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range filter.Get("desired_state") {
|
for _, s := range filter.Get("desired-state") {
|
||||||
if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
|
if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
|
||||||
f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
|
f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
|
||||||
} else if s != "" {
|
} else if s != "" {
|
||||||
return nil, fmt.Errorf("Invalid desired_state filter: '%s'", s)
|
return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swar
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(rl.Nodes); l > 1 {
|
if l := len(rl.Nodes); l > 1 {
|
||||||
return nil, fmt.Errorf("node %s is ambigious (%d matches found)", input, l)
|
return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rl.Nodes[0], nil
|
return rl.Nodes[0], nil
|
||||||
|
@ -70,7 +70,7 @@ func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*s
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(rl.Services); l > 1 {
|
if l := len(rl.Services); l > 1 {
|
||||||
return nil, fmt.Errorf("service %s is ambigious (%d matches found)", input, l)
|
return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rl.Services[0], nil
|
return rl.Services[0], nil
|
||||||
|
@ -99,7 +99,7 @@ func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swar
|
||||||
}
|
}
|
||||||
|
|
||||||
if l := len(rl.Tasks); l > 1 {
|
if l := len(rl.Tasks); l > 1 {
|
||||||
return nil, fmt.Errorf("task %s is ambigious (%d matches found)", input, l)
|
return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rl.Tasks[0], nil
|
return rl.Tasks[0], nil
|
||||||
|
|
|
@ -271,7 +271,7 @@ func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Conf
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
|
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
|
||||||
// are not overriden by default truthy values from the flags that were not explicitly set.
|
// are not overridden by default truthy values from the flags that were not explicitly set.
|
||||||
// See https://github.com/docker/docker/issues/20289 for an example.
|
// See https://github.com/docker/docker/issues/20289 for an example.
|
||||||
//
|
//
|
||||||
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
|
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
|
||||||
|
|
|
@ -203,7 +203,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
|
||||||
|
|
||||||
// verifyContainerSettings performs validation of the hostconfig and config
|
// verifyContainerSettings performs validation of the hostconfig and config
|
||||||
// structures.
|
// structures.
|
||||||
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
|
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool, validateHostname bool) ([]string, error) {
|
||||||
|
|
||||||
// First perform verification of settings common across all platforms.
|
// First perform verification of settings common across all platforms.
|
||||||
if config != nil {
|
if config != nil {
|
||||||
|
@ -222,10 +222,10 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant.
|
// Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant.
|
||||||
if len(config.Hostname) > 0 {
|
if validateHostname && len(config.Hostname) > 0 {
|
||||||
// RFC1123 specifies that 63 bytes is the maximium length
|
// RFC1123 specifies that 63 bytes is the maximium length
|
||||||
// Windows has the limitation of 63 bytes in length
|
// Windows has the limitation of 63 bytes in length
|
||||||
// Linux hostname is limited to HOST_NAME_MAX=64, not not including the terminating null byte.
|
// Linux hostname is limited to HOST_NAME_MAX=64, not including the terminating null byte.
|
||||||
// We limit the length to 63 bytes here to match RFC1035 and RFC1123.
|
// We limit the length to 63 bytes here to match RFC1035 and RFC1123.
|
||||||
matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname)
|
matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname)
|
||||||
if len(config.Hostname) > 63 || !matched {
|
if len(config.Hostname) > 63 || !matched {
|
||||||
|
|
|
@ -328,7 +328,7 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func errClusterNetworkOnRun(n string) error {
|
func errClusterNetworkOnRun(n string) error {
|
||||||
return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can be only used docker service", n)
|
return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can only be used by a docker service", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateContainerNetworkSettings update the network settings
|
// updateContainerNetworkSettings update the network settings
|
||||||
|
|
|
@ -20,21 +20,21 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateManagedContainer creates a container that is managed by a Service
|
// CreateManagedContainer creates a container that is managed by a Service
|
||||||
func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
|
func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) {
|
||||||
return daemon.containerCreate(params, true)
|
return daemon.containerCreate(params, true, validateHostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerCreate creates a regular container
|
// ContainerCreate creates a regular container
|
||||||
func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
|
func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) {
|
||||||
return daemon.containerCreate(params, false)
|
return daemon.containerCreate(params, false, validateHostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (types.ContainerCreateResponse, error) {
|
func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool, validateHostname bool) (types.ContainerCreateResponse, error) {
|
||||||
if params.Config == nil {
|
if params.Config == nil {
|
||||||
return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container")
|
return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container")
|
||||||
}
|
}
|
||||||
|
|
||||||
warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false)
|
warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false, validateHostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.ContainerCreateResponse{Warnings: warnings}, err
|
return types.ContainerCreateResponse{Warnings: warnings}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -324,7 +324,7 @@ func (daemon *Daemon) waitForNetworks(c *container.Container) {
|
||||||
}
|
}
|
||||||
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
|
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
|
||||||
for netName := range c.NetworkSettings.Networks {
|
for netName := range c.NetworkSettings.Networks {
|
||||||
// If we get `ErrNoSuchNetwork` here, it can assumed that it is due to discovery not being ready
|
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
|
||||||
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
|
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
|
||||||
if _, err := daemon.netController.NetworkByName(netName); err != nil {
|
if _, err := daemon.netController.NetworkByName(netName); err != nil {
|
||||||
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
|
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
|
||||||
|
@ -365,7 +365,7 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetClusterProvider sets a component for quering the current cluster state.
|
// SetClusterProvider sets a component for querying the current cluster state.
|
||||||
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
|
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
|
||||||
daemon.clusterProvider = clusterProvider
|
daemon.clusterProvider = clusterProvider
|
||||||
daemon.netController.SetClusterProvider(clusterProvider)
|
daemon.netController.SetClusterProvider(clusterProvider)
|
||||||
|
@ -611,10 +611,10 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
||||||
// To terminate a process in freezer cgroup, we should send
|
// To terminate a process in freezer cgroup, we should send
|
||||||
// SIGTERM to this process then unfreeze it, and the process will
|
// SIGTERM to this process then unfreeze it, and the process will
|
||||||
// force to terminate immediately.
|
// force to terminate immediately.
|
||||||
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID)
|
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
|
||||||
sig, ok := signal.SignalMap["TERM"]
|
sig, ok := signal.SignalMap["TERM"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("System doesn not support SIGTERM")
|
return fmt.Errorf("System does not support SIGTERM")
|
||||||
}
|
}
|
||||||
if err := daemon.kill(c, int(sig)); err != nil {
|
if err := daemon.kill(c, int(sig)); err != nil {
|
||||||
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
|
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
|
||||||
|
@ -623,7 +623,7 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
||||||
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
|
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
|
||||||
}
|
}
|
||||||
if _, err := c.WaitStop(10 * time.Second); err != nil {
|
if _, err := c.WaitStop(10 * time.Second); err != nil {
|
||||||
logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID)
|
logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID)
|
||||||
sig, ok := signal.SignalMap["KILL"]
|
sig, ok := signal.SignalMap["KILL"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("System does not support SIGKILL")
|
return fmt.Errorf("System does not support SIGKILL")
|
||||||
|
@ -637,7 +637,7 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
||||||
}
|
}
|
||||||
// If container failed to exit in 10 seconds of SIGTERM, then using the force
|
// If container failed to exit in 10 seconds of SIGTERM, then using the force
|
||||||
if err := daemon.containerStop(c, 10); err != nil {
|
if err := daemon.containerStop(c, 10); err != nil {
|
||||||
return fmt.Errorf("Stop container %s with error: %v", c.ID, err)
|
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.WaitStop(-1 * time.Second)
|
c.WaitStop(-1 * time.Second)
|
||||||
|
@ -650,8 +650,12 @@ func (daemon *Daemon) Shutdown() error {
|
||||||
// Keep mounts and networking running on daemon shutdown if
|
// Keep mounts and networking running on daemon shutdown if
|
||||||
// we are to keep containers running and restore them.
|
// we are to keep containers running and restore them.
|
||||||
if daemon.configStore.LiveRestore {
|
if daemon.configStore.LiveRestore {
|
||||||
return nil
|
// check if there are any running containers, if none we should do some cleanup
|
||||||
|
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if daemon.containers != nil {
|
if daemon.containers != nil {
|
||||||
logrus.Debug("starting clean shutdown of all containers...")
|
logrus.Debug("starting clean shutdown of all containers...")
|
||||||
daemon.containers.ApplyAll(func(c *container.Container) {
|
daemon.containers.ApplyAll(func(c *container.Container) {
|
||||||
|
@ -681,6 +685,8 @@ func (daemon *Daemon) Shutdown() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pluginShutdown()
|
||||||
|
|
||||||
if err := daemon.cleanupMounts(); err != nil {
|
if err := daemon.cleanupMounts(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,15 @@
|
||||||
|
|
||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import "github.com/docker/engine-api/types/container"
|
import (
|
||||||
|
"github.com/docker/docker/plugin"
|
||||||
|
"github.com/docker/engine-api/types/container"
|
||||||
|
)
|
||||||
|
|
||||||
func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
|
func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func pluginShutdown() {
|
||||||
|
plugin.GetManager().Shutdown()
|
||||||
|
}
|
||||||
|
|
|
@ -7,3 +7,6 @@ import "github.com/docker/engine-api/types/container"
|
||||||
func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
|
func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func pluginShutdown() {
|
||||||
|
}
|
||||||
|
|
|
@ -579,7 +579,11 @@ func verifyDaemonSettings(config *Config) error {
|
||||||
if config.Runtimes == nil {
|
if config.Runtimes == nil {
|
||||||
config.Runtimes = make(map[string]types.Runtime)
|
config.Runtimes = make(map[string]types.Runtime)
|
||||||
}
|
}
|
||||||
config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary}
|
stockRuntimeOpts := []string{}
|
||||||
|
if UsingSystemd(config) {
|
||||||
|
stockRuntimeOpts = append(stockRuntimeOpts, "--systemd-cgroup=true")
|
||||||
|
}
|
||||||
|
config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
@ -21,6 +22,7 @@ import (
|
||||||
"github.com/docker/docker/pkg/directory"
|
"github.com/docker/docker/pkg/directory"
|
||||||
"github.com/docker/docker/pkg/idtools"
|
"github.com/docker/docker/pkg/idtools"
|
||||||
"github.com/docker/docker/pkg/mount"
|
"github.com/docker/docker/pkg/mount"
|
||||||
|
"github.com/docker/docker/pkg/parsers"
|
||||||
"github.com/docker/docker/pkg/parsers/kernel"
|
"github.com/docker/docker/pkg/parsers/kernel"
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
|
@ -92,6 +94,10 @@ func init() {
|
||||||
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
|
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
|
||||||
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
|
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
|
||||||
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||||
|
opts, err := parseOptions(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := supportsOverlay(); err != nil {
|
if err := supportsOverlay(); err != nil {
|
||||||
return nil, graphdriver.ErrNotSupported
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
@ -103,7 +109,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
|
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
|
||||||
return nil, graphdriver.ErrNotSupported
|
if !opts.overrideKernelCheck {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
|
||||||
}
|
}
|
||||||
|
|
||||||
fsMagic, err := graphdriver.GetFSMagic(home)
|
fsMagic, err := graphdriver.GetFSMagic(home)
|
||||||
|
@ -144,6 +153,31 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type overlayOptions struct {
|
||||||
|
overrideKernelCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseOptions(options []string) (*overlayOptions, error) {
|
||||||
|
o := &overlayOptions{}
|
||||||
|
for _, option := range options {
|
||||||
|
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key = strings.ToLower(key)
|
||||||
|
switch key {
|
||||||
|
case "overlay2.override_kernel_check":
|
||||||
|
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("overlay2: Unknown option %s\n", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
func supportsOverlay() error {
|
func supportsOverlay() error {
|
||||||
// We can try to modprobe overlay first before looking at
|
// We can try to modprobe overlay first before looking at
|
||||||
// proc/filesystems for when overlay is supported
|
// proc/filesystems for when overlay is supported
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Imag
|
||||||
} else {
|
} else {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else if danglingOnly {
|
} else if danglingOnly && len(newImage.RepoTags) > 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
||||||
meminfo, err := system.ReadMemInfo()
|
meminfo, err := system.ReadMemInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Could not read system memory info: %v", err)
|
logrus.Errorf("Could not read system memory info: %v", err)
|
||||||
|
meminfo = &system.MemInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
sysInfo := sysinfo.New(true)
|
sysInfo := sysinfo.New(true)
|
||||||
|
@ -71,7 +72,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
||||||
if sysInfo.AppArmor {
|
if sysInfo.AppArmor {
|
||||||
securityOptions = append(securityOptions, "apparmor")
|
securityOptions = append(securityOptions, "apparmor")
|
||||||
}
|
}
|
||||||
if sysInfo.Seccomp {
|
if sysInfo.Seccomp && supportsSeccomp {
|
||||||
securityOptions = append(securityOptions, "seccomp")
|
securityOptions = append(securityOptions, "seccomp")
|
||||||
}
|
}
|
||||||
if selinuxEnabled() {
|
if selinuxEnabled() {
|
||||||
|
|
|
@ -150,6 +150,10 @@ func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) err
|
||||||
cfg.Type = daemon.defaultLogConfig.Type
|
cfg.Type = daemon.defaultLogConfig.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.Config == nil {
|
||||||
|
cfg.Config = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.Type == daemon.defaultLogConfig.Type {
|
if cfg.Type == daemon.defaultLogConfig.Type {
|
||||||
for k, v := range daemon.defaultLogConfig.Config {
|
for k, v := range daemon.defaultLogConfig.Config {
|
||||||
if _, ok := cfg.Config[k]; !ok {
|
if _, ok := cfg.Config[k]; !ok {
|
||||||
|
|
15
daemon/logs_test.go
Normal file
15
daemon/logs_test.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package daemon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
containertypes "github.com/docker/engine-api/types/container"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) {
|
||||||
|
d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}}
|
||||||
|
cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type}
|
||||||
|
if err := d.mergeAndVerifyLogConfig(&cfg); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -127,6 +127,14 @@ func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nod
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cleanup any stale endpoints that might be left over during previous iterations
|
||||||
|
epList := n.Endpoints()
|
||||||
|
for _, ep := range epList {
|
||||||
|
if err := ep.Delete(true); err != nil {
|
||||||
|
logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := n.Delete(); err != nil {
|
if err := n.Delete(); err != nil {
|
||||||
logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err)
|
logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// +build !seccomp,!windows
|
// +build linux,!seccomp
|
||||||
|
|
||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
|
@ -9,6 +9,8 @@ import (
|
||||||
"github.com/opencontainers/specs/specs-go"
|
"github.com/opencontainers/specs/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var supportsSeccomp = false
|
||||||
|
|
||||||
func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error {
|
func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error {
|
||||||
if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" {
|
if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" {
|
||||||
return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile")
|
return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile")
|
||||||
|
|
|
@ -11,6 +11,8 @@ import (
|
||||||
"github.com/opencontainers/specs/specs-go"
|
"github.com/opencontainers/specs/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var supportsSeccomp = true
|
||||||
|
|
||||||
func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error {
|
func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error {
|
||||||
var profile *specs.Seccomp
|
var profile *specs.Seccomp
|
||||||
var err error
|
var err error
|
||||||
|
|
5
daemon/seccomp_unsupported.go
Normal file
5
daemon/seccomp_unsupported.go
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package daemon
|
||||||
|
|
||||||
|
var supportsSeccomp = false
|
|
@ -18,7 +18,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ContainerStart starts a container.
|
// ContainerStart starts a container.
|
||||||
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig) error {
|
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, validateHostname bool) error {
|
||||||
container, err := daemon.GetContainer(name)
|
container, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -68,7 +68,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
||||||
|
|
||||||
// check if hostConfig is in line with the current system settings.
|
// check if hostConfig is in line with the current system settings.
|
||||||
// It may happen cgroups are umounted or the like.
|
// It may happen cgroups are umounted or the like.
|
||||||
if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil {
|
if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false, validateHostname); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Adapt for old containers in case we have updates in this function and
|
// Adapt for old containers in case we have updates in this function and
|
||||||
|
|
|
@ -5,12 +5,82 @@ package daemon
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/engine-api/types"
|
"github.com/docker/engine-api/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func validatePSArgs(psArgs string) error {
|
||||||
|
// NOTE: \\s does not detect unicode whitespaces.
|
||||||
|
// So we use fieldsASCII instead of strings.Fields in parsePSOutput.
|
||||||
|
// See https://github.com/docker/docker/pull/24358
|
||||||
|
re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)")
|
||||||
|
for _, group := range re.FindAllStringSubmatch(psArgs, -1) {
|
||||||
|
if len(group) >= 3 {
|
||||||
|
k := group[1]
|
||||||
|
v := group[2]
|
||||||
|
if k != "pid" {
|
||||||
|
return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces
|
||||||
|
func fieldsASCII(s string) []string {
|
||||||
|
fn := func(r rune) bool {
|
||||||
|
switch r {
|
||||||
|
case '\t', '\n', '\f', '\r', ' ':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.FieldsFunc(s, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) {
|
||||||
|
procList := &types.ContainerProcessList{}
|
||||||
|
|
||||||
|
lines := strings.Split(string(output), "\n")
|
||||||
|
procList.Titles = fieldsASCII(lines[0])
|
||||||
|
|
||||||
|
pidIndex := -1
|
||||||
|
for i, name := range procList.Titles {
|
||||||
|
if name == "PID" {
|
||||||
|
pidIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pidIndex == -1 {
|
||||||
|
return nil, fmt.Errorf("Couldn't find PID field in ps output")
|
||||||
|
}
|
||||||
|
|
||||||
|
// loop through the output and extract the PID from each line
|
||||||
|
for _, line := range lines[1:] {
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := fieldsASCII(line)
|
||||||
|
p, err := strconv.Atoi(fields[pidIndex])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pid := range pids {
|
||||||
|
if pid == p {
|
||||||
|
// Make sure number of fields equals number of header titles
|
||||||
|
// merging "overhanging" fields
|
||||||
|
process := fields[:len(procList.Titles)-1]
|
||||||
|
process = append(process, strings.Join(fields[len(procList.Titles)-1:], " "))
|
||||||
|
procList.Processes = append(procList.Processes, process)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return procList, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerTop lists the processes running inside of the given
|
// ContainerTop lists the processes running inside of the given
|
||||||
// container by calling ps with the given args, or with the flags
|
// container by calling ps with the given args, or with the flags
|
||||||
// "-ef" if no args are given. An error is returned if the container
|
// "-ef" if no args are given. An error is returned if the container
|
||||||
|
@ -21,6 +91,10 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
|
||||||
psArgs = "-ef"
|
psArgs = "-ef"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := validatePSArgs(psArgs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(name)
|
container, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -43,42 +117,9 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error running ps: %v", err)
|
return nil, fmt.Errorf("Error running ps: %v", err)
|
||||||
}
|
}
|
||||||
|
procList, err := parsePSOutput(output, pids)
|
||||||
procList := &types.ContainerProcessList{}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
procList.Titles = strings.Fields(lines[0])
|
|
||||||
|
|
||||||
pidIndex := -1
|
|
||||||
for i, name := range procList.Titles {
|
|
||||||
if name == "PID" {
|
|
||||||
pidIndex = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pidIndex == -1 {
|
|
||||||
return nil, fmt.Errorf("Couldn't find PID field in ps output")
|
|
||||||
}
|
|
||||||
|
|
||||||
// loop through the output and extract the PID from each line
|
|
||||||
for _, line := range lines[1:] {
|
|
||||||
if len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
p, err := strconv.Atoi(fields[pidIndex])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pid := range pids {
|
|
||||||
if pid == p {
|
|
||||||
// Make sure number of fields equals number of header titles
|
|
||||||
// merging "overhanging" fields
|
|
||||||
process := fields[:len(procList.Titles)-1]
|
|
||||||
process = append(process, strings.Join(fields[len(procList.Titles)-1:], " "))
|
|
||||||
procList.Processes = append(procList.Processes, process)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
daemon.LogContainerEvent(container, "top")
|
daemon.LogContainerEvent(container, "top")
|
||||||
return procList, nil
|
return procList, nil
|
||||||
|
|
76
daemon/top_unix_test.go
Normal file
76
daemon/top_unix_test.go
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
//+build !windows
|
||||||
|
|
||||||
|
package daemon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestContainerTopValidatePSArgs(t *testing.T) {
|
||||||
|
tests := map[string]bool{
|
||||||
|
"ae -o uid=PID": true,
|
||||||
|
"ae -o \"uid= PID\"": true, // ascii space (0x20)
|
||||||
|
"ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83)
|
||||||
|
"ae o uid=PID": true,
|
||||||
|
"aeo uid=PID": true,
|
||||||
|
"ae -O uid=PID": true,
|
||||||
|
"ae -o pid=PID2 -o uid=PID": true,
|
||||||
|
"ae -o pid=PID": false,
|
||||||
|
"ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this
|
||||||
|
"aeo pid=PID": false,
|
||||||
|
"ae": false,
|
||||||
|
"": false,
|
||||||
|
}
|
||||||
|
for psArgs, errExpected := range tests {
|
||||||
|
err := validatePSArgs(psArgs)
|
||||||
|
t.Logf("tested %q, got err=%v", psArgs, err)
|
||||||
|
if errExpected && err == nil {
|
||||||
|
t.Fatalf("expected error, got %v (%q)", err, psArgs)
|
||||||
|
}
|
||||||
|
if !errExpected && err != nil {
|
||||||
|
t.Fatalf("expected nil, got %v (%q)", err, psArgs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContainerTopParsePSOutput(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
output []byte
|
||||||
|
pids []int
|
||||||
|
errExpected bool
|
||||||
|
}{
|
||||||
|
{[]byte(` PID COMMAND
|
||||||
|
42 foo
|
||||||
|
43 bar
|
||||||
|
100 baz
|
||||||
|
`), []int{42, 43}, false},
|
||||||
|
{[]byte(` UID COMMAND
|
||||||
|
42 foo
|
||||||
|
43 bar
|
||||||
|
100 baz
|
||||||
|
`), []int{42, 43}, true},
|
||||||
|
// unicode space (U+2003, 0xe2 0x80 0x83)
|
||||||
|
{[]byte(` PID COMMAND
|
||||||
|
42 foo
|
||||||
|
43 bar
|
||||||
|
100 baz
|
||||||
|
`), []int{42, 43}, true},
|
||||||
|
// the first space is U+2003, the second one is ascii.
|
||||||
|
{[]byte(` PID COMMAND
|
||||||
|
42 foo
|
||||||
|
43 bar
|
||||||
|
100 baz
|
||||||
|
`), []int{42, 43}, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range tests {
|
||||||
|
_, err := parsePSOutput(f.output, f.pids)
|
||||||
|
t.Logf("tested %q, got err=%v", string(f.output), err)
|
||||||
|
if f.errExpected && err == nil {
|
||||||
|
t.Fatalf("expected error, got %v (%q)", err, string(f.output))
|
||||||
|
}
|
||||||
|
if !f.errExpected && err != nil {
|
||||||
|
t.Fatalf("expected nil, got %v (%q)", err, string(f.output))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -7,10 +7,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ContainerUpdate updates configuration of the container
|
// ContainerUpdate updates configuration of the container
|
||||||
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) {
|
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error) {
|
||||||
var warnings []string
|
var warnings []string
|
||||||
|
|
||||||
warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true)
|
warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true, validateHostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return warnings, err
|
return warnings, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ func retryOnError(err error) error {
|
||||||
}
|
}
|
||||||
case errcode.Error:
|
case errcode.Error:
|
||||||
switch v.Code {
|
switch v.Code {
|
||||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests:
|
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown:
|
||||||
return xfer.DoNotRetry{Err: err}
|
return xfer.DoNotRetry{Err: err}
|
||||||
}
|
}
|
||||||
case *url.Error:
|
case *url.Error:
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/docker/api"
|
"github.com/docker/docker/api"
|
||||||
"github.com/docker/docker/distribution/metadata"
|
"github.com/docker/docker/distribution/metadata"
|
||||||
"github.com/docker/docker/distribution/xfer"
|
"github.com/docker/docker/distribution/xfer"
|
||||||
|
@ -203,3 +204,22 @@ func ValidateRepoName(name string) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, imageID image.ID) error {
|
||||||
|
dgstRef, err := reference.WithDigest(ref, dgst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldTagImageID, err := store.Get(dgstRef); err == nil {
|
||||||
|
if oldTagImageID != imageID {
|
||||||
|
// Updating digests not supported by reference store
|
||||||
|
logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagImageID, imageID)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} else if err != reference.ErrDoesNotExist {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return store.AddDigest(dgstRef, imageID, true)
|
||||||
|
}
|
||||||
|
|
|
@ -393,7 +393,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
|
||||||
oldTagImageID, err := p.config.ReferenceStore.Get(ref)
|
oldTagImageID, err := p.config.ReferenceStore.Get(ref)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if oldTagImageID == imageID {
|
if oldTagImageID == imageID {
|
||||||
return false, nil
|
return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID)
|
||||||
}
|
}
|
||||||
} else if err != reference.ErrDoesNotExist {
|
} else if err != reference.ErrDoesNotExist {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -403,10 +403,14 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
|
||||||
if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
|
if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
|
} else {
|
||||||
return false, err
|
if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -200,6 +200,11 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, ima
|
||||||
|
|
||||||
manifestDigest := digest.FromBytes(canonicalManifest)
|
manifestDigest := digest.FromBytes(canonicalManifest)
|
||||||
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
|
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
|
||||||
|
|
||||||
|
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Signal digest to the trust client so it can sign the
|
// Signal digest to the trust client so it can sign the
|
||||||
// push, if appropriate.
|
// push, if appropriate.
|
||||||
progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)})
|
progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)})
|
||||||
|
|
|
@ -73,7 +73,7 @@ The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE
|
||||||
|
|
||||||
This solution increases the volume size by first cloning it, then resizing it
|
This solution increases the volume size by first cloning it, then resizing it
|
||||||
using a disk partitioning tool. We recommend
|
using a disk partitioning tool. We recommend
|
||||||
[GParted](http://gparted.sourceforge.net/download.php/index.php). The tool comes
|
[GParted](https://sourceforge.net/projects/gparted/files/). The tool comes
|
||||||
as a bootable ISO, is a free download, and works well with VirtualBox.
|
as a bootable ISO, is a free download, and works well with VirtualBox.
|
||||||
|
|
||||||
1. Stop Boot2Docker
|
1. Stop Boot2Docker
|
||||||
|
@ -102,7 +102,7 @@ as a bootable ISO, is a free download, and works well with VirtualBox.
|
||||||
|
|
||||||
5. Download a disk partitioning tool ISO
|
5. Download a disk partitioning tool ISO
|
||||||
|
|
||||||
To resize the volume, we'll use [GParted](http://gparted.sourceforge.net/download.php/).
|
To resize the volume, we'll use [GParted](https://sourceforge.net/projects/gparted/files/).
|
||||||
Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus.
|
Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus.
|
||||||
You might need to create the bus before you can add the ISO.
|
You might need to create the bus before you can add the ISO.
|
||||||
|
|
||||||
|
|
|
@ -84,5 +84,6 @@ and removed when the service is stopped.
|
||||||
[Service]
|
[Service]
|
||||||
...
|
...
|
||||||
ExecStart=/usr/bin/docker run --env foo=bar --name redis_server redis
|
ExecStart=/usr/bin/docker run --env foo=bar --name redis_server redis
|
||||||
ExecStop=/usr/bin/docker stop -t 2 redis_server ; /usr/bin/docker rm -f redis_server
|
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
||||||
|
ExecStopPost=/usr/bin/docker rm -f redis_server
|
||||||
...
|
...
|
||||||
|
|
|
@ -34,7 +34,7 @@ Otherwise, the user must specify which project to log to using the `--gcp-projec
|
||||||
log option and Docker will attempt to obtain credentials from the
|
log option and Docker will attempt to obtain credentials from the
|
||||||
<a href="https://developers.google.com/identity/protocols/application-default-credentials" target="_blank">Google Application Default Credential</a>.
|
<a href="https://developers.google.com/identity/protocols/application-default-credentials" target="_blank">Google Application Default Credential</a>.
|
||||||
The `--gcp-project` takes precedence over information discovered from the metadata server
|
The `--gcp-project` takes precedence over information discovered from the metadata server
|
||||||
so a Docker daemon running in a Google Cloud Project can be overriden to log to a different
|
so a Docker daemon running in a Google Cloud Project can be overridden to log to a different
|
||||||
Google Cloud Project using `--gcp-project`.
|
Google Cloud Project using `--gcp-project`.
|
||||||
|
|
||||||
## gcplogs options
|
## gcplogs options
|
||||||
|
|
|
@ -25,7 +25,7 @@ supported:
|
||||||
| `json-file` | Default logging driver for Docker. Writes JSON messages to file. |
|
| `json-file` | Default logging driver for Docker. Writes JSON messages to file. |
|
||||||
| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. |
|
| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. |
|
||||||
| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. |
|
| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. |
|
||||||
| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. |
|
| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint like Graylog or Logstash. |
|
||||||
| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). |
|
| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). |
|
||||||
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. |
|
| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. |
|
||||||
| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. |
|
| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. |
|
||||||
|
@ -155,7 +155,7 @@ option is ignored if the address protocol is not `tcp+tls`.
|
||||||
is ignored if the address protocol is not `tcp+tls`.
|
is ignored if the address protocol is not `tcp+tls`.
|
||||||
|
|
||||||
`syslog-tls-skip-verify` configures the TLS verification. This verification is
|
`syslog-tls-skip-verify` configures the TLS verification. This verification is
|
||||||
enabled by default, but it can be overriden by setting this option to `true`.
|
enabled by default, but it can be overridden by setting this option to `true`.
|
||||||
This option is ignored if the address protocol is not `tcp+tls`.
|
This option is ignored if the address protocol is not `tcp+tls`.
|
||||||
|
|
||||||
`tag` configures a string that is appended to the APP-NAME in the syslog
|
`tag` configures a string that is appended to the APP-NAME in the syslog
|
||||||
|
|
|
@ -17,15 +17,15 @@ The following list of features are deprecated in Engine.
|
||||||
### Three argument form in `docker import`
|
### Three argument form in `docker import`
|
||||||
**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
|
**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
The `docker import` command format 'file|URL|- [REPOSITORY [TAG]]' is deprecated since November 2013. It's no more supported.
|
The `docker import` command format 'file|URL|- [REPOSITORY [TAG]]' is deprecated since November 2013. It's no more supported.
|
||||||
|
|
||||||
### `-h` shorthand for `--help`
|
### `-h` shorthand for `--help`
|
||||||
|
|
||||||
**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
**Target For Removal In Release: [v1.14.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Target For Removal In Release: [v1.14.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
The shorthand (`-h`) is less common than `--help` on Linux and cannot be used
|
The shorthand (`-h`) is less common than `--help` on Linux and cannot be used
|
||||||
on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on
|
on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on
|
||||||
|
@ -48,9 +48,9 @@ The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide
|
||||||
|
|
||||||
### `/containers/(id or name)/copy` endpoint
|
### `/containers/(id or name)/copy` endpoint
|
||||||
|
|
||||||
**Deprecated In Release: v1.8**
|
**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)**
|
||||||
|
|
||||||
**Removed In Release: v1.12.0**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`.
|
The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`.
|
||||||
|
|
||||||
|
@ -63,14 +63,14 @@ See the events API documentation for the new format.
|
||||||
### `-f` flag on `docker tag`
|
### `-f` flag on `docker tag`
|
||||||
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use.
|
To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use.
|
||||||
|
|
||||||
### HostConfig at API container start
|
### HostConfig at API container start
|
||||||
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of
|
Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of
|
||||||
defining it at container creation (`POST /containers/create`).
|
defining it at container creation (`POST /containers/create`).
|
||||||
|
@ -79,14 +79,14 @@ defining it at container creation (`POST /containers/create`).
|
||||||
|
|
||||||
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
The `docker ps --before` and `docker ps --since` options are deprecated.
|
The `docker ps --before` and `docker ps --since` options are deprecated.
|
||||||
Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
|
Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
|
||||||
|
|
||||||
### Docker search 'automated' and 'stars' options
|
### Docker search 'automated' and 'stars' options
|
||||||
|
|
||||||
**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
**Target For Removal In Release: v1.14**
|
**Target For Removal In Release: v1.14**
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ Use `docker search --filter=is-automated=...` and `docker search --filter=stars=
|
||||||
### Driver Specific Log Tags
|
### Driver Specific Log Tags
|
||||||
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
|
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
Log tags are now generated in a standard way across different logging drivers.
|
Log tags are now generated in a standard way across different logging drivers.
|
||||||
Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and
|
Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and
|
||||||
|
@ -162,7 +162,7 @@ The following double-dash options are deprecated and have no replacement:
|
||||||
|
|
||||||
**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)**
|
**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
The single-dash (`-help`) was removed, in favor of the double-dash `--help`
|
The single-dash (`-help`) was removed, in favor of the double-dash `--help`
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the d
|
||||||
### Docker Content Trust ENV passphrase variables name change
|
### Docker Content Trust ENV passphrase variables name change
|
||||||
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
|
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**
|
||||||
|
|
||||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/)**
|
||||||
|
|
||||||
Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables
|
Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables
|
||||||
|
|
||||||
|
|
|
@ -93,8 +93,8 @@ too.
|
||||||
**Option 5** creates a custom network of APT proxy server and Debian-based containers:
|
**Option 5** creates a custom network of APT proxy server and Debian-based containers:
|
||||||
|
|
||||||
$ docker network create mynetwork
|
$ docker network create mynetwork
|
||||||
$ docker run -d -p 3142:3142 --net=mynetwork --name test_apt_cacher_ng eg_apt_cacher_ng
|
$ docker run -d -p 3142:3142 --network=mynetwork --name test_apt_cacher_ng eg_apt_cacher_ng
|
||||||
$ docker run --rm -it --net=mynetwork -e http_proxy=http://test_apt_cacher_ng:3142/ debian bash
|
$ docker run --rm -it --network=mynetwork -e http_proxy=http://test_apt_cacher_ng:3142/ debian bash
|
||||||
|
|
||||||
Apt-cacher-ng has some tools that allow you to manage the repository,
|
Apt-cacher-ng has some tools that allow you to manage the repository,
|
||||||
and they can be used by leveraging the `VOLUME`
|
and they can be used by leveraging the `VOLUME`
|
||||||
|
|
|
@ -18,6 +18,17 @@ LibNetwork, which shares plugin infrastructure with Engine. Effectively, network
|
||||||
driver plugins are activated in the same way as other plugins, and use the same
|
driver plugins are activated in the same way as other plugins, and use the same
|
||||||
kind of protocol.
|
kind of protocol.
|
||||||
|
|
||||||
|
## Network driver plugins and swarm mode
|
||||||
|
|
||||||
|
Docker 1.12 adds support for cluster management and orchestration called
|
||||||
|
[swarm mode](../swarm/index.md). Docker Engine running in swarm mode currently
|
||||||
|
only supports the built-in overlay driver for networking. Therefore existing
|
||||||
|
networking plugins will not work in swarm mode.
|
||||||
|
|
||||||
|
When you run Docker Engine outside of swarm mode, all networking plugins that
|
||||||
|
worked in Docker 1.11 will continue to function normally. They do not require
|
||||||
|
any modification.
|
||||||
|
|
||||||
## Using network driver plugins
|
## Using network driver plugins
|
||||||
|
|
||||||
The means of installing and running a network driver plugin depend on the
|
The means of installing and running a network driver plugin depend on the
|
||||||
|
@ -35,7 +46,7 @@ Some network driver plugins are listed in [plugins](plugins.md)
|
||||||
The `mynet` network is now owned by `weave`, so subsequent commands
|
The `mynet` network is now owned by `weave`, so subsequent commands
|
||||||
referring to that network will be sent to the plugin,
|
referring to that network will be sent to the plugin,
|
||||||
|
|
||||||
$ docker run --net=mynet busybox top
|
$ docker run --network=mynet busybox top
|
||||||
|
|
||||||
|
|
||||||
## Write a network plugin
|
## Write a network plugin
|
||||||
|
|
|
@ -130,11 +130,13 @@ directory named `docker` in your current location.
|
||||||
$ tar -xvzf docker-latest.tgz
|
$ tar -xvzf docker-latest.tgz
|
||||||
|
|
||||||
docker/
|
docker/
|
||||||
docker/docker-containerd-ctr
|
|
||||||
docker/docker
|
docker/docker
|
||||||
docker/docker-containerd
|
docker/docker-containerd
|
||||||
docker/docker-runc
|
docker/docker-containerd-ctr
|
||||||
docker/docker-containerd-shim
|
docker/docker-containerd-shim
|
||||||
|
docker/docker-proxy
|
||||||
|
docker/docker-runc
|
||||||
|
docker/dockerd
|
||||||
```
|
```
|
||||||
|
|
||||||
Engine requires these binaries to be installed in your host's `$PATH`.
|
Engine requires these binaries to be installed in your host's `$PATH`.
|
||||||
|
@ -154,7 +156,7 @@ $ mv docker/* /usr/bin/
|
||||||
You can manually start the Engine in daemon mode using:
|
You can manually start the Engine in daemon mode using:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo docker daemon &
|
$ sudo dockerd &
|
||||||
```
|
```
|
||||||
|
|
||||||
The GitHub repository provides samples of init-scripts you can use to control
|
The GitHub repository provides samples of init-scripts you can use to control
|
||||||
|
|
|
@ -133,9 +133,12 @@ This section lists each version from latest to oldest. Each listing includes a
|
||||||
* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version.
|
* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version.
|
||||||
* API errors are now returned as JSON instead of plain text.
|
* API errors are now returned as JSON instead of plain text.
|
||||||
* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container.
|
* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container.
|
||||||
* `POST /v1.23/containers/<container ID>/exec` and `POST /v1.23/exec/<exec ID>/start`
|
* `POST /containers/<container ID>/exec` and `POST /exec/<exec ID>/start`
|
||||||
no longer expects a "Container" field to be present. This property was not used
|
no longer expects a "Container" field to be present. This property was not used
|
||||||
and is no longer sent by the docker client.
|
and is no longer sent by the docker client.
|
||||||
|
* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname).
|
||||||
|
* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:<name|id>`,
|
||||||
|
to have the container join the PID namespace of an existing container.
|
||||||
|
|
||||||
### v1.23 API changes
|
### v1.23 API changes
|
||||||
|
|
||||||
|
|
|
@ -169,6 +169,7 @@ Create a container
|
||||||
"MemorySwap": 0,
|
"MemorySwap": 0,
|
||||||
"CpuShares": 512,
|
"CpuShares": 512,
|
||||||
"CpusetCpus": "0,1",
|
"CpusetCpus": "0,1",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
|
@ -242,6 +243,9 @@ Json Parameters:
|
||||||
- **CpuShares** - An integer value containing the CPU Shares for container
|
- **CpuShares** - An integer value containing the CPU Shares for container
|
||||||
(ie. the relative weight vs other containers).
|
(ie. the relative weight vs other containers).
|
||||||
- **CpusetCpus** - String value containing the cgroups CpusetCpus to use.
|
- **CpusetCpus** - String value containing the cgroups CpusetCpus to use.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. It should be specified in the form
|
should map to. It should be specified in the form
|
||||||
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
||||||
|
@ -373,6 +377,7 @@ Return low-level information on the container `id`
|
||||||
"Memory": 0,
|
"Memory": 0,
|
||||||
"MemorySwap": 0,
|
"MemorySwap": 0,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -176,6 +176,7 @@ Create a container
|
||||||
"CpusetMems": "0,1",
|
"CpusetMems": "0,1",
|
||||||
"BlkioWeight": 300,
|
"BlkioWeight": 300,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
|
@ -253,6 +254,9 @@ Json Parameters:
|
||||||
- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
|
- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
|
||||||
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
||||||
|
@ -388,6 +392,7 @@ Return low-level information on the container `id`
|
||||||
"MemorySwap": 0,
|
"MemorySwap": 0,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -177,6 +177,7 @@ Create a container
|
||||||
"BlkioWeight": 300,
|
"BlkioWeight": 300,
|
||||||
"MemorySwappiness": 60,
|
"MemorySwappiness": 60,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
|
@ -256,6 +257,9 @@ Json Parameters:
|
||||||
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
||||||
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
||||||
|
@ -391,6 +395,7 @@ Return low-level information on the container `id`
|
||||||
"MemorySwap": 0,
|
"MemorySwap": 0,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -184,6 +184,7 @@ Create a container
|
||||||
"BlkioWeight": 300,
|
"BlkioWeight": 300,
|
||||||
"MemorySwappiness": 60,
|
"MemorySwappiness": 60,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
|
@ -271,6 +272,9 @@ Json Parameters:
|
||||||
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
|
||||||
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
||||||
|
@ -414,6 +418,7 @@ Return low-level information on the container `id`
|
||||||
"KernelMemory": 0,
|
"KernelMemory": 0,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -277,6 +277,7 @@ Create a container
|
||||||
"MemorySwappiness": 60,
|
"MemorySwappiness": 60,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
|
@ -382,6 +383,9 @@ Json Parameters:
|
||||||
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
`{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
|
||||||
|
@ -534,6 +538,7 @@ Return low-level information on the container `id`
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -296,6 +296,7 @@ Create a container
|
||||||
"MemorySwappiness": 60,
|
"MemorySwappiness": 60,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
|
"PidMode": "",
|
||||||
"PidsLimit": -1,
|
"PidsLimit": -1,
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
|
@ -402,6 +403,9 @@ Json Parameters:
|
||||||
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited.
|
- **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited.
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
|
@ -557,6 +561,7 @@ Return low-level information on the container `id`
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
|
|
@ -310,6 +310,7 @@ Create a container
|
||||||
"MemorySwappiness": 60,
|
"MemorySwappiness": 60,
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
|
"PidMode": "",
|
||||||
"PidsLimit": -1,
|
"PidsLimit": -1,
|
||||||
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
|
||||||
"PublishAllPorts": false,
|
"PublishAllPorts": false,
|
||||||
|
@ -361,7 +362,7 @@ Create a container
|
||||||
**JSON parameters**:
|
**JSON parameters**:
|
||||||
|
|
||||||
- **Hostname** - A string value containing the hostname to use for the
|
- **Hostname** - A string value containing the hostname to use for the
|
||||||
container.
|
container. This must be a valid RFC 1123 hostname.
|
||||||
- **Domainname** - A string value containing the domain name to use
|
- **Domainname** - A string value containing the domain name to use
|
||||||
for the container.
|
for the container.
|
||||||
- **User** - A string value specifying the user inside the container.
|
- **User** - A string value specifying the user inside the container.
|
||||||
|
@ -421,6 +422,9 @@ Create a container
|
||||||
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
- **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
|
||||||
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
|
||||||
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
- **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences.
|
||||||
|
- **PidMode** - Set the PID (Process) Namespace mode for the container;
|
||||||
|
`"container:<name|id>"`: joins another container's PID namespace
|
||||||
|
`"host"`: use the host's PID namespace inside the container
|
||||||
- **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited.
|
- **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited.
|
||||||
- **PortBindings** - A map of exposed container ports and the host port they
|
- **PortBindings** - A map of exposed container ports and the host port they
|
||||||
should map to. A JSON object in the form
|
should map to. A JSON object in the form
|
||||||
|
@ -583,6 +587,7 @@ Return low-level information on the container `id`
|
||||||
"OomKillDisable": false,
|
"OomKillDisable": false,
|
||||||
"OomScoreAdj": 500,
|
"OomScoreAdj": 500,
|
||||||
"NetworkMode": "bridge",
|
"NetworkMode": "bridge",
|
||||||
|
"PidMode": "",
|
||||||
"PortBindings": {},
|
"PortBindings": {},
|
||||||
"Privileged": false,
|
"Privileged": false,
|
||||||
"ReadonlyRootfs": false,
|
"ReadonlyRootfs": false,
|
||||||
|
@ -2194,7 +2199,6 @@ Display system-wide information
|
||||||
"DockerRootDir": "/var/lib/docker",
|
"DockerRootDir": "/var/lib/docker",
|
||||||
"Driver": "btrfs",
|
"Driver": "btrfs",
|
||||||
"DriverStatus": [[""]],
|
"DriverStatus": [[""]],
|
||||||
"ExecutionDriver": "native-0.1",
|
|
||||||
"ExperimentalBuild": false,
|
"ExperimentalBuild": false,
|
||||||
"HttpProxy": "http://test:test@localhost:8080",
|
"HttpProxy": "http://test:test@localhost:8080",
|
||||||
"HttpsProxy": "https://test:test@localhost:8080",
|
"HttpsProxy": "https://test:test@localhost:8080",
|
||||||
|
@ -3986,11 +3990,11 @@ JSON Parameters:
|
||||||
- **Target** – Container path.
|
- **Target** – Container path.
|
||||||
- **Source** – Mount source (e.g. a volume name, a host path).
|
- **Source** – Mount source (e.g. a volume name, a host path).
|
||||||
- **Type** – The mount type (`bind`, or `volume`).
|
- **Type** – The mount type (`bind`, or `volume`).
|
||||||
- **Writable** – A boolean indicating whether the mount should be writable.
|
- **ReadOnly** – A boolean indicating whether the mount should be read-only.
|
||||||
- **BindOptions** - Optional configuration for the `bind` type.
|
- **BindOptions** - Optional configuration for the `bind` type.
|
||||||
- **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.
|
- **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.
|
||||||
- **VolumeOptions** – Optional configuration for the `volume` type.
|
- **VolumeOptions** – Optional configuration for the `volume` type.
|
||||||
- **Populate** – A boolean indicating if volume should be
|
- **NoCopy** – A boolean indicating if volume should be
|
||||||
populated with the data from the target. (Default false)
|
populated with the data from the target. (Default false)
|
||||||
- **Labels** – User-defined name and labels for the volume.
|
- **Labels** – User-defined name and labels for the volume.
|
||||||
- **DriverConfig** – Map of driver-specific options.
|
- **DriverConfig** – Map of driver-specific options.
|
||||||
|
@ -4008,7 +4012,7 @@ JSON Parameters:
|
||||||
- **Memory** – Memory reservation
|
- **Memory** – Memory reservation
|
||||||
- **RestartPolicy** – Specification for the restart policy which applies to containers created
|
- **RestartPolicy** – Specification for the restart policy which applies to containers created
|
||||||
as part of this service.
|
as part of this service.
|
||||||
- **Condition** – Condition for restart (`none`, `on_failure`, or `any`).
|
- **Condition** – Condition for restart (`none`, `on-failure`, or `any`).
|
||||||
- **Delay** – Delay between restart attempts.
|
- **Delay** – Delay between restart attempts.
|
||||||
- **Attempts** – Maximum attempts to restart a given container before giving up (default value
|
- **Attempts** – Maximum attempts to restart a given container before giving up (default value
|
||||||
is 0, which is ignored).
|
is 0, which is ignored).
|
||||||
|
@ -4204,11 +4208,12 @@ Update the service `id`.
|
||||||
- **Target** – Container path.
|
- **Target** – Container path.
|
||||||
- **Source** – Mount source (e.g. a volume name, a host path).
|
- **Source** – Mount source (e.g. a volume name, a host path).
|
||||||
- **Type** – The mount type (`bind`, or `volume`).
|
- **Type** – The mount type (`bind`, or `volume`).
|
||||||
- **Writable** – A boolean indicating whether the mount should be writable.
|
- **ReadOnly** – A boolean indicating whether the mount should be read-only.
|
||||||
- **BindOptions** - Optional configuration for the `bind` type
|
- **BindOptions** - Optional configuration for the `bind` type
|
||||||
- **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.
|
- **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.
|
||||||
- **VolumeOptions** – Optional configuration for the `volume` type.
|
- **VolumeOptions** – Optional configuration for the `volume` type.
|
||||||
- **Populate** – A boolean indicating if volume should be populated with the data from the target. (Default false)
|
- **NoCopy** – A boolean indicating if volume should be
|
||||||
|
populated with the data from the target. (Default false)
|
||||||
- **Labels** – User-defined name and labels for the volume.
|
- **Labels** – User-defined name and labels for the volume.
|
||||||
- **DriverConfig** – Map of driver-specific options.
|
- **DriverConfig** – Map of driver-specific options.
|
||||||
- **Name** - Name of the driver to use to create the volume
|
- **Name** - Name of the driver to use to create the volume
|
||||||
|
@ -4225,7 +4230,7 @@ Update the service `id`.
|
||||||
- **Memory** – Memory reservation
|
- **Memory** – Memory reservation
|
||||||
- **RestartPolicy** – Specification for the restart policy which applies to containers created
|
- **RestartPolicy** – Specification for the restart policy which applies to containers created
|
||||||
as part of this service.
|
as part of this service.
|
||||||
- **Condition** – Condition for restart (`none`, `on_failure`, or `any`).
|
- **Condition** – Condition for restart (`none`, `on-failure`, or `any`).
|
||||||
- **Delay** – Delay between restart attempts.
|
- **Delay** – Delay between restart attempts.
|
||||||
- **Attempts** – Maximum attempts to restart a given container before giving up (default value
|
- **Attempts** – Maximum attempts to restart a given container before giving up (default value
|
||||||
is 0, which is ignored).
|
is 0, which is ignored).
|
||||||
|
|
|
@ -502,7 +502,7 @@ default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows)
|
||||||
- `RUN ["executable", "param1", "param2"]` (*exec* form)
|
- `RUN ["executable", "param1", "param2"]` (*exec* form)
|
||||||
|
|
||||||
The `RUN` instruction will execute any commands in a new layer on top of the
|
The `RUN` instruction will execute any commands in a new layer on top of the
|
||||||
current image and commit the results. The resulting committed image will be
|
current image and commit the results. The resulting comitted image will be
|
||||||
used for the next step in the `Dockerfile`.
|
used for the next step in the `Dockerfile`.
|
||||||
|
|
||||||
Layering `RUN` instructions and generating commits conforms to the core
|
Layering `RUN` instructions and generating commits conforms to the core
|
||||||
|
@ -544,7 +544,7 @@ RUN /bin/bash -c 'source $HOME/.bashrc ; echo $HOME'
|
||||||
>
|
>
|
||||||
> **Note**:
|
> **Note**:
|
||||||
> In the *JSON* form, it is necessary to escape backslashes. This is
|
> In the *JSON* form, it is necessary to escape backslashes. This is
|
||||||
> particularly relevant on Windows where the backslash is the path seperator.
|
> particularly relevant on Windows where the backslash is the path separator.
|
||||||
> The following line would otherwise be treated as *shell* form due to not
|
> The following line would otherwise be treated as *shell* form due to not
|
||||||
> being valid JSON, and fail in an unexpected way:
|
> being valid JSON, and fail in an unexpected way:
|
||||||
> `RUN ["c:\windows\system32\tasklist.exe"]`
|
> `RUN ["c:\windows\system32\tasklist.exe"]`
|
||||||
|
@ -572,7 +572,7 @@ The cache for `RUN` instructions can be invalidated by `ADD` instructions. See
|
||||||
For systems that have recent aufs version (i.e., `dirperm1` mount option can
|
For systems that have recent aufs version (i.e., `dirperm1` mount option can
|
||||||
be set), docker will attempt to fix the issue automatically by mounting
|
be set), docker will attempt to fix the issue automatically by mounting
|
||||||
the layers with `dirperm1` option. More details on `dirperm1` option can be
|
the layers with `dirperm1` option. More details on `dirperm1` option can be
|
||||||
found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html)
|
found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs)
|
||||||
|
|
||||||
If your system doesn't have support for `dirperm1`, the issue describes a workaround.
|
If your system doesn't have support for `dirperm1`, the issue describes a workaround.
|
||||||
|
|
||||||
|
@ -1292,8 +1292,9 @@ subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is
|
||||||
defined and the `what_user` value was passed on the command line. Prior to its definition by an
|
defined and the `what_user` value was passed on the command line. Prior to its definition by an
|
||||||
`ARG` instruction, any use of a variable results in an empty string.
|
`ARG` instruction, any use of a variable results in an empty string.
|
||||||
|
|
||||||
> **Note:** It is not recommended to use build-time variables for
|
> **Warning:** It is not recommended to use build-time variables for
|
||||||
> passing secrets like github keys, user credentials etc.
|
> passing secrets like github keys, user credentials etc. Build-time variable
|
||||||
|
> values are visible to any user of the image with the `docker history` command.
|
||||||
|
|
||||||
You can use an `ARG` or an `ENV` instruction to specify variables that are
|
You can use an `ARG` or an `ENV` instruction to specify variables that are
|
||||||
available to the `RUN` instruction. Environment variables defined using the
|
available to the `RUN` instruction. Environment variables defined using the
|
||||||
|
|
|
@ -10,14 +10,17 @@ parent = "smn_cli"
|
||||||
|
|
||||||
# attach
|
# attach
|
||||||
|
|
||||||
Usage: docker attach [OPTIONS] CONTAINER
|
```markdown
|
||||||
|
Usage: docker attach [OPTIONS] CONTAINER
|
||||||
|
|
||||||
Attach to a running container
|
Attach to a running container
|
||||||
|
|
||||||
--detach-keys="<sequence>" Set up escape key sequence
|
Options:
|
||||||
--help Print usage
|
--detach-keys string Override the key sequence for detaching a container
|
||||||
--no-stdin Do not attach STDIN
|
--help Print usage
|
||||||
--sig-proxy=true Proxy all received signals to the process
|
--no-stdin Do not attach STDIN
|
||||||
|
--sig-proxy Proxy all received signals to the process (default true)
|
||||||
|
```
|
||||||
|
|
||||||
The `docker attach` command allows you to attach to a running container using
|
The `docker attach` command allows you to attach to a running container using
|
||||||
the container's ID or name, either to view its ongoing output or to control it
|
the container's ID or name, either to view its ongoing output or to control it
|
||||||
|
|
|
@ -10,32 +10,38 @@ parent = "smn_cli"
|
||||||
|
|
||||||
# build
|
# build
|
||||||
|
|
||||||
Usage: docker build [OPTIONS] PATH | URL | -
|
```markdown
|
||||||
|
Usage: docker build [OPTIONS] PATH | URL | -
|
||||||
|
|
||||||
Build a new image from the source code at PATH
|
Build an image from a Dockerfile
|
||||||
|
|
||||||
--build-arg=[] Set build-time variables
|
Options:
|
||||||
--cpu-shares CPU Shares (relative weight)
|
--build-arg value Set build-time variables (default [])
|
||||||
--cgroup-parent="" Optional parent cgroup for the container
|
--cgroup-parent string Optional parent cgroup for the container
|
||||||
--cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period
|
--cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period
|
||||||
--cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota
|
--cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota
|
||||||
--cpuset-cpus="" CPUs in which to allow execution, e.g. `0-3`, `0,1`
|
-c, --cpu-shares int CPU shares (relative weight)
|
||||||
--cpuset-mems="" MEMs in which to allow execution, e.g. `0-3`, `0,1`
|
--cpuset-cpus string CPUs in which to allow execution (0-3, 0,1)
|
||||||
--disable-content-trust=true Skip image verification
|
--cpuset-mems string MEMs in which to allow execution (0-3, 0,1)
|
||||||
-f, --file="" Name of the Dockerfile (Default is 'PATH/Dockerfile')
|
--disable-content-trust Skip image verification (default true)
|
||||||
--force-rm Always remove intermediate containers
|
-f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile')
|
||||||
--help Print usage
|
--force-rm Always remove intermediate containers
|
||||||
--isolation="" Container isolation technology
|
--help Print usage
|
||||||
--label=[] Set metadata for an image
|
--isolation string Container isolation technology
|
||||||
-m, --memory="" Memory limit for all build containers
|
--label value Set metadata for an image (default [])
|
||||||
--memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap.
|
-m, --memory string Memory limit
|
||||||
--no-cache Do not use cache when building the image
|
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
|
||||||
--pull Always attempt to pull a newer version of the image
|
--no-cache Do not use cache when building the image
|
||||||
-q, --quiet Suppress the build output and print image ID on success
|
--pull Always attempt to pull a newer version of the image
|
||||||
--rm=true Remove intermediate containers after a successful build
|
-q, --quiet Suppress the build output and print image ID on success
|
||||||
--shm-size=[] Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
|
--rm Remove intermediate containers after a successful build (default true)
|
||||||
-t, --tag=[] Name and optionally a tag in the 'name:tag' format
|
--shm-size string Size of /dev/shm, default value is 64MB.
|
||||||
--ulimit=[] Ulimit options
|
The format is `<number><unit>`. `number` must be greater than `0`.
|
||||||
|
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
|
||||||
|
or `g` (gigabytes). If you omit the unit, the system uses bytes.
|
||||||
|
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
|
||||||
|
--ulimit value Ulimit options (default [])
|
||||||
|
```
|
||||||
|
|
||||||
Builds Docker images from a Dockerfile and a "context". A build's context is
|
Builds Docker images from a Dockerfile and a "context". A build's context is
|
||||||
the files located in the specified `PATH` or `URL`. The build process can refer
|
the files located in the specified `PATH` or `URL`. The build process can refer
|
||||||
|
|
|
@ -14,16 +14,31 @@ weight = -2
|
||||||
To list available commands, either run `docker` with no parameters
|
To list available commands, either run `docker` with no parameters
|
||||||
or execute `docker help`:
|
or execute `docker help`:
|
||||||
|
|
||||||
$ docker
|
```bash
|
||||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
$ docker
|
||||||
docker daemon [ --help | ... ]
|
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||||
docker [ --help | -v | --version ]
|
docker [ --help | -v | --version ]
|
||||||
|
|
||||||
-H, --host=[]: The socket(s) to talk to the Docker daemon in the format of tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd.
|
A self-sufficient runtime for containers.
|
||||||
|
|
||||||
A self-sufficient runtime for Linux containers.
|
Options:
|
||||||
|
|
||||||
...
|
--config=~/.docker Location of client config files
|
||||||
|
-D, --debug Enable debug mode
|
||||||
|
-H, --host=[] Daemon socket(s) to connect to
|
||||||
|
-h, --help Print usage
|
||||||
|
-l, --log-level=info Set the logging level
|
||||||
|
--tls Use TLS; implied by --tlsverify
|
||||||
|
--tlscacert=~/.docker/ca.pem Trust certs signed only by this CA
|
||||||
|
--tlscert=~/.docker/cert.pem Path to TLS certificate file
|
||||||
|
--tlskey=~/.docker/key.pem Path to TLS key file
|
||||||
|
--tlsverify Use TLS and verify the remote
|
||||||
|
-v, --version Print version information and quit
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
attach Attach to a running container
|
||||||
|
# […]
|
||||||
|
```
|
||||||
|
|
||||||
Depending on your Docker system configuration, you may be required to preface
|
Depending on your Docker system configuration, you may be required to preface
|
||||||
each `docker` command with `sudo`. To avoid having to use `sudo` with the
|
each `docker` command with `sudo`. To avoid having to use `sudo` with the
|
||||||
|
|
|
@ -10,15 +10,18 @@ parent = "smn_cli"
|
||||||
|
|
||||||
# commit
|
# commit
|
||||||
|
|
||||||
Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
|
```markdown
|
||||||
|
Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
|
||||||
|
|
||||||
Create a new image from a container's changes
|
Create a new image from a container's changes
|
||||||
|
|
||||||
-a, --author="" Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
|
Options:
|
||||||
-c, --change=[] Apply specified Dockerfile instructions while committing the image
|
-a, --author string Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
|
||||||
--help Print usage
|
-c, --change value Apply Dockerfile instruction to the created image (default [])
|
||||||
-m, --message="" Commit message
|
--help Print usage
|
||||||
-p, --pause=true Pause container during commit
|
-m, --message string Commit message
|
||||||
|
-p, --pause Pause container during commit (default true)
|
||||||
|
```
|
||||||
|
|
||||||
It can be useful to commit a container's file changes or settings into a new
|
It can be useful to commit a container's file changes or settings into a new
|
||||||
image. This allows you debug a container by running an interactive shell, or to
|
image. This allows you debug a container by running an interactive shell, or to
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue