Merge pull request #31134 from thaJeztah/update-swagger
[1.13.x] swagger cherry-picks
This commit is contained in:
commit
d4e2280b0f
8 changed files with 275 additions and 132 deletions
|
@ -65,7 +65,8 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
|||
rawVersion := r.URL.Query().Get("version")
|
||||
version, err := strconv.ParseUint(rawVersion, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
|
||||
err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err)
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
var flags types.UpdateFlags
|
||||
|
@ -73,7 +74,8 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
|||
if value := r.URL.Query().Get("rotateWorkerToken"); value != "" {
|
||||
rot, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for rotateWorkerToken: %s", value)
|
||||
err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value)
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
flags.RotateWorkerToken = rot
|
||||
|
@ -82,7 +84,8 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
|||
if value := r.URL.Query().Get("rotateManagerToken"); value != "" {
|
||||
rot, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for rotateManagerToken: %s", value)
|
||||
err := fmt.Errorf("invalid value for rotateManagerToken: %s", value)
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
flags.RotateManagerToken = rot
|
||||
|
@ -91,7 +94,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
|||
if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" {
|
||||
rot, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)
|
||||
return errors.NewBadRequestError(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value))
|
||||
}
|
||||
|
||||
flags.RotateManagerUnlockKey = rot
|
||||
|
@ -184,7 +187,8 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
|||
rawVersion := r.URL.Query().Get("version")
|
||||
version, err := strconv.ParseUint(rawVersion, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error())
|
||||
err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err)
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
|
@ -294,7 +298,8 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
|
|||
rawVersion := r.URL.Query().Get("version")
|
||||
version, err := strconv.ParseUint(rawVersion, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error())
|
||||
err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err)
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
|
||||
|
|
201
api/swagger.yaml
201
api/swagger.yaml
|
@ -1066,6 +1066,8 @@ definitions:
|
|||
$ref: "#/definitions/IPAM"
|
||||
Internal:
|
||||
type: "boolean"
|
||||
Attachable:
|
||||
type: "boolean"
|
||||
Containers:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
|
@ -1093,6 +1095,7 @@ definitions:
|
|||
Options:
|
||||
foo: "bar"
|
||||
Internal: false
|
||||
Attachable: false
|
||||
Containers:
|
||||
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
|
||||
Name: "test"
|
||||
|
@ -2453,22 +2456,21 @@ paths:
|
|||
- name: "filters"
|
||||
in: "query"
|
||||
description: |
|
||||
Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers.
|
||||
Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters:
|
||||
|
||||
Available filters:
|
||||
- `exited=<int>` containers with exit code of `<int>`
|
||||
- `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
|
||||
- `label=key` or `label="key=value"` of a container label
|
||||
- `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
|
||||
- `id=<ID>` a container's ID
|
||||
- `name=<name>` a container's name
|
||||
- `is-task=`(`true`|`false`)
|
||||
- `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
|
||||
- `before`=(`<container id>` or `<container name>`)
|
||||
- `since`=(`<container id>` or `<container name>`)
|
||||
- `volume`=(`<volume name>` or `<mount point destination>`)
|
||||
- `network`=(`<network id>` or `<network name>`)
|
||||
- `exited=<int>` containers with exit code of `<int>`
|
||||
- `health`=(`starting`|`healthy`|`unhealthy`|`none`)
|
||||
- `id=<ID>` a container's ID
|
||||
- `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
|
||||
- `is-task=`(`true`|`false`)
|
||||
- `label=key` or `label="key=value"` of a container label
|
||||
- `name=<name>` a container's name
|
||||
- `network`=(`<network id>` or `<network name>`)
|
||||
- `since`=(`<container id>` or `<container name>`)
|
||||
- `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
|
||||
- `volume`=(`<volume name>` or `<mount point destination>`)
|
||||
type: "string"
|
||||
responses:
|
||||
200:
|
||||
|
@ -4014,6 +4016,13 @@ paths:
|
|||
examples:
|
||||
application/json:
|
||||
message: "No such container: c2ada9df5af8"
|
||||
409:
|
||||
description: "conflict"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
examples:
|
||||
application/json:
|
||||
message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or use -f"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
|
@ -4265,14 +4274,13 @@ paths:
|
|||
- name: "filters"
|
||||
in: "query"
|
||||
description: |
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list.
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
|
||||
|
||||
Available filters:
|
||||
- `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
|
||||
- `dangling=true`
|
||||
- `label=key` or `label="key=value"` of an image label
|
||||
- `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
|
||||
- `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
|
||||
- `reference`=(`<image-name>[:<tag>]`)
|
||||
- `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
|
||||
type: "string"
|
||||
- name: "digests"
|
||||
in: "query"
|
||||
|
@ -4846,9 +4854,9 @@ paths:
|
|||
description: |
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
|
||||
|
||||
- `stars=<number>`
|
||||
- `is-automated=(true|false)`
|
||||
- `is-official=(true|false)`
|
||||
- `stars=<number>` Matches images that has at least 'number' stars.
|
||||
type: "string"
|
||||
tags: ["Image"]
|
||||
/images/prune:
|
||||
|
@ -4861,9 +4869,8 @@ paths:
|
|||
- name: "filters"
|
||||
in: "query"
|
||||
description: |
|
||||
Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
|
||||
Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
|
||||
|
||||
Available filters:
|
||||
- `dangling=<boolean>` When set to `true` (or `1`), prune only
|
||||
unused *and* untagged images. When set to `false`
|
||||
(or `0`), all unused images are pruned.
|
||||
|
@ -5275,7 +5282,7 @@ paths:
|
|||
|
||||
Various objects within Docker report events when something happens to them.
|
||||
|
||||
Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update`
|
||||
Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update`
|
||||
|
||||
Images report these events: `delete, import, load, pull, push, save, tag, untag`
|
||||
|
||||
|
@ -5329,6 +5336,10 @@ paths:
|
|||
image: "alpine"
|
||||
name: "my-container"
|
||||
time: 1461943101
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
|
@ -5348,13 +5359,14 @@ paths:
|
|||
A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
|
||||
|
||||
- `container=<string>` container name or ID
|
||||
- `daemon=<string>` daemon name or ID
|
||||
- `event=<string>` event type
|
||||
- `image=<string>` image name or ID
|
||||
- `label=<string>` image or container label
|
||||
- `network=<string>` network name or ID
|
||||
- `plugin`=<string> plugin name or ID
|
||||
- `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon`
|
||||
- `volume=<string>` volume name or ID
|
||||
- `network=<string>` network name or ID
|
||||
- `daemon=<string>` daemon name or ID
|
||||
type: "string"
|
||||
tags: ["System"]
|
||||
/system/df:
|
||||
|
@ -5828,13 +5840,14 @@ paths:
|
|||
JSON encoded value of the filters (a `map[string][]string`) to
|
||||
process on the volumes list. Available filters:
|
||||
|
||||
- `name=<volume-name>` Matches all or part of a volume name.
|
||||
- `dangling=<boolean>` When set to `true` (or `1`), returns all
|
||||
volumes that are not in use by a container. When set to `false`
|
||||
(or `0`), only volumes that are in use by one or more
|
||||
containers are returned.
|
||||
- `driver=<volume-driver-name>` Matches all or part of a volume
|
||||
driver name.
|
||||
- `driver=<volume-driver-name>` Matches volumes based on their driver.
|
||||
- `label=<key>` or `label=<key>:<value>` Matches volumes based on
|
||||
the presence of a `label` alone or a `label` and a value.
|
||||
- `name=<volume-name>` Matches all or part of a volume name.
|
||||
type: "string"
|
||||
format: "json"
|
||||
tags: ["Volume"]
|
||||
|
@ -6002,6 +6015,7 @@ paths:
|
|||
Driver: "bridge"
|
||||
EnableIPv6: false
|
||||
Internal: false
|
||||
Attachable: false
|
||||
IPAM:
|
||||
Driver: "default"
|
||||
Config:
|
||||
|
@ -6027,6 +6041,7 @@ paths:
|
|||
Driver: "null"
|
||||
EnableIPv6: false
|
||||
Internal: false
|
||||
Attachable: false
|
||||
IPAM:
|
||||
Driver: "default"
|
||||
Config: []
|
||||
|
@ -6039,6 +6054,7 @@ paths:
|
|||
Driver: "host"
|
||||
EnableIPv6: false
|
||||
Internal: false
|
||||
Attachable: false
|
||||
IPAM:
|
||||
Driver: "default"
|
||||
Config: []
|
||||
|
@ -6163,6 +6179,9 @@ paths:
|
|||
Internal:
|
||||
description: "Restrict external access to the network."
|
||||
type: "boolean"
|
||||
Attachable:
|
||||
description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode."
|
||||
type: "boolean"
|
||||
IPAM:
|
||||
description: "Optional custom IP scheme for the network."
|
||||
$ref: "#/definitions/IPAM"
|
||||
|
@ -6195,6 +6214,7 @@ paths:
|
|||
Options:
|
||||
foo: "bar"
|
||||
Internal: true
|
||||
Attachable: false
|
||||
Options:
|
||||
com.docker.network.bridge.default_bridge: "true"
|
||||
com.docker.network.bridge.enable_icc: "true"
|
||||
|
@ -6557,6 +6577,10 @@ paths:
|
|||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
404:
|
||||
description: "plugin is not installed"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
|
@ -6580,6 +6604,10 @@ paths:
|
|||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
404:
|
||||
description: "plugin is not installed"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
|
@ -6746,10 +6774,22 @@ paths:
|
|||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/Node"
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
description: "no such node"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "filters"
|
||||
in: "query"
|
||||
|
@ -6781,6 +6821,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -6802,6 +6846,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -6829,6 +6877,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -6893,10 +6945,18 @@ paths:
|
|||
UpdatedAt: "2016-08-15T16:32:09.623207604Z"
|
||||
Version:
|
||||
Index: 51
|
||||
404:
|
||||
description: "no such swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
tags: ["Swarm"]
|
||||
/swarm/init:
|
||||
post:
|
||||
|
@ -6916,14 +6976,14 @@ paths:
|
|||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
406:
|
||||
description: "node is already part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is already part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "body"
|
||||
in: "body"
|
||||
|
@ -7089,6 +7149,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
tags: ["Swarm"]
|
||||
/swarm/unlock:
|
||||
post:
|
||||
|
@ -7117,6 +7181,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
tags: ["Swarm"]
|
||||
/services:
|
||||
get:
|
||||
|
@ -7133,6 +7201,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "filters"
|
||||
in: "query"
|
||||
|
@ -7141,8 +7213,8 @@ paths:
|
|||
A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:
|
||||
|
||||
- `id=<service id>`
|
||||
- `name=<service name>`
|
||||
- `label=<service label>`
|
||||
- `name=<service name>`
|
||||
tags: ["Service"]
|
||||
/services/create:
|
||||
post:
|
||||
|
@ -7167,6 +7239,10 @@ paths:
|
|||
example:
|
||||
ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
|
||||
Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
403:
|
||||
description: "network is not eligible for services"
|
||||
schema:
|
||||
|
@ -7180,7 +7256,7 @@ paths:
|
|||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "server error or node is not part of a swarm"
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
|
@ -7262,6 +7338,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7283,6 +7363,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7301,6 +7385,10 @@ paths:
|
|||
description: "no error"
|
||||
schema:
|
||||
$ref: "#/definitions/ImageDeleteResponse"
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
description: "no such service"
|
||||
schema:
|
||||
|
@ -7309,6 +7397,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7397,6 +7489,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7566,11 +7662,14 @@ paths:
|
|||
Gateway: "10.255.0.1"
|
||||
Addresses:
|
||||
- "10.255.0.5/16"
|
||||
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "filters"
|
||||
in: "query"
|
||||
|
@ -7578,12 +7677,12 @@ paths:
|
|||
description: |
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:
|
||||
|
||||
- `id=<task id>`
|
||||
- `name=<task name>`
|
||||
- `service=<service name>`
|
||||
- `node=<node id or name>`
|
||||
- `label=key` or `label="key=value"`
|
||||
- `desired-state=(running | shutdown | accepted)`
|
||||
- `id=<task id>`
|
||||
- `label=key` or `label="key=value"`
|
||||
- `name=<task name>`
|
||||
- `node=<node id or name>`
|
||||
- `service=<service name>`
|
||||
tags: ["Task"]
|
||||
/tasks/{id}:
|
||||
get:
|
||||
|
@ -7604,6 +7703,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7636,6 +7739,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "filters"
|
||||
in: "query"
|
||||
|
@ -7664,10 +7771,6 @@ paths:
|
|||
type: "string"
|
||||
example:
|
||||
ID: "ktnbjxoalbkvbvedmg1urrz8h"
|
||||
406:
|
||||
description: "server error or node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
409:
|
||||
description: "name conflicts with an existing object"
|
||||
schema:
|
||||
|
@ -7676,6 +7779,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "body"
|
||||
in: "body"
|
||||
|
@ -7712,14 +7819,14 @@ paths:
|
|||
description: "secret not found"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
406:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
@ -7743,6 +7850,10 @@ paths:
|
|||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
503:
|
||||
description: "node is not part of a swarm"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
parameters:
|
||||
- name: "id"
|
||||
in: "path"
|
||||
|
|
|
@ -419,7 +419,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
|||
|
||||
if err := validateAndSanitizeInitRequest(&req); err != nil {
|
||||
c.Unlock()
|
||||
return "", err
|
||||
return "", apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
|
||||
|
@ -506,7 +506,7 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
|||
}
|
||||
if err := validateAndSanitizeJoinRequest(&req); err != nil {
|
||||
c.Unlock()
|
||||
return err
|
||||
return apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
|
||||
|
@ -803,7 +803,7 @@ func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlag
|
|||
// will be used to swarmkit.
|
||||
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
return apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
_, err = c.client.UpdateCluster(
|
||||
|
@ -1085,7 +1085,7 @@ func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apity
|
|||
|
||||
serviceSpec, err := convert.ServiceSpecToGRPC(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
ctnr := serviceSpec.Task.GetContainer()
|
||||
|
@ -1168,7 +1168,7 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec typ
|
|||
|
||||
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
currentService, err := getService(ctx, c.client, serviceIDOrName)
|
||||
|
@ -1383,7 +1383,7 @@ func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, erro
|
|||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetNode returns a node based on an ID or name.
|
||||
// GetNode returns a node based on an ID.
|
||||
func (c *Cluster) GetNode(input string) (types.Node, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
@ -1413,7 +1413,7 @@ func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec)
|
|||
|
||||
nodeSpec, err := convert.NodeSpecToGRPC(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
return apierrors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
ctx, cancel := c.getRequestContext()
|
||||
|
|
|
@ -3,6 +3,7 @@ package cluster
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/errors"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -14,7 +15,7 @@ func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster,
|
|||
}
|
||||
|
||||
if len(rl.Clusters) == 0 {
|
||||
return nil, fmt.Errorf("swarm not found")
|
||||
return nil, errors.NewRequestNotFoundError(ErrNoSwarm)
|
||||
}
|
||||
|
||||
// TODO: assume one cluster only
|
||||
|
@ -38,7 +39,8 @@ func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swar
|
|||
}
|
||||
|
||||
if len(rl.Nodes) == 0 {
|
||||
return nil, fmt.Errorf("node %s not found", input)
|
||||
err := fmt.Errorf("node %s not found", input)
|
||||
return nil, errors.NewRequestNotFoundError(err)
|
||||
}
|
||||
|
||||
if l := len(rl.Nodes); l > 1 {
|
||||
|
@ -66,7 +68,8 @@ func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*s
|
|||
}
|
||||
|
||||
if len(rl.Services) == 0 {
|
||||
return nil, fmt.Errorf("service %s not found", input)
|
||||
err := fmt.Errorf("service %s not found", input)
|
||||
return nil, errors.NewRequestNotFoundError(err)
|
||||
}
|
||||
|
||||
if l := len(rl.Services); l > 1 {
|
||||
|
@ -95,7 +98,8 @@ func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swar
|
|||
}
|
||||
|
||||
if len(rl.Tasks) == 0 {
|
||||
return nil, fmt.Errorf("task %s not found", input)
|
||||
err := fmt.Errorf("task %s not found", input)
|
||||
return nil, errors.NewRequestNotFoundError(err)
|
||||
}
|
||||
|
||||
if l := len(rl.Tasks); l > 1 {
|
||||
|
|
|
@ -2632,6 +2632,7 @@ Docker daemon report the following event:
|
|||
**Status codes**:
|
||||
|
||||
- **200** – no error
|
||||
- **400** - bad parameter
|
||||
- **500** – server error
|
||||
|
||||
#### Get a tarball containing all images in a repository
|
||||
|
@ -3784,6 +3785,7 @@ Content-Type: text/plain; charset=utf-8
|
|||
**Status codes**:
|
||||
|
||||
- **200** - no error
|
||||
- **404** - plugin not installed
|
||||
- **500** - plugin is already enabled
|
||||
|
||||
#### Disable a plugin
|
||||
|
@ -3812,6 +3814,7 @@ Content-Type: text/plain; charset=utf-8
|
|||
**Status codes**:
|
||||
|
||||
- **200** - no error
|
||||
- **404** - plugin not installed
|
||||
- **500** - plugin is already disabled
|
||||
|
||||
#### Remove a plugin
|
||||
|
|
|
@ -48,7 +48,7 @@ Docker networks report the following events:
|
|||
|
||||
create, connect, disconnect, destroy
|
||||
|
||||
Docker daemon report the following events:
|
||||
Docker daemon reports the following events:
|
||||
|
||||
reload
|
||||
|
||||
|
@ -82,14 +82,14 @@ container container 588a23dac085 *AND* the event type is *start*
|
|||
The currently supported filters are:
|
||||
|
||||
* container (`container=<name or id>`)
|
||||
* daemon (`daemon=<name or id>`)
|
||||
* event (`event=<event action>`)
|
||||
* image (`image=<tag or id>`)
|
||||
* plugin (experimental) (`plugin=<name or id>`)
|
||||
* label (`label=<key>` or `label=<key>=<value>`)
|
||||
* network (`network=<name or id>`)
|
||||
* plugin (`plugin=<name or id>`)
|
||||
* type (`type=<container or image or volume or network or daemon>`)
|
||||
* volume (`volume=<name or id>`)
|
||||
* network (`network=<name or id>`)
|
||||
* daemon (`daemon=<name or id>`)
|
||||
|
||||
## Format
|
||||
|
||||
|
@ -111,91 +111,108 @@ You'll need two shells for this example.
|
|||
|
||||
**Shell 2: Start and Stop containers:**
|
||||
|
||||
$ docker start 4386fb97867d
|
||||
$ docker stop 4386fb97867d
|
||||
$ docker stop 7805c1d35632
|
||||
$ docker create --name test alpine:latest top
|
||||
$ docker start test
|
||||
$ docker stop test
|
||||
|
||||
**Shell 1: (Again .. now showing events):**
|
||||
|
||||
2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
**Show events in the past from a specified time:**
|
||||
|
||||
$ docker events --since 1378216169
|
||||
2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
$ docker events --since 1483283804
|
||||
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
|
||||
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
$ docker events --since '2013-09-03'
|
||||
2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
$ docker events --since '2017-01-05'
|
||||
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
|
||||
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
$ docker events --since '2013-09-03T15:49:29'
|
||||
2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
|
||||
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
This example outputs all events that were generated in the last 3 minutes,
|
||||
relative to the current time on the client machine:
|
||||
|
||||
$ docker events --since '3m'
|
||||
2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
$ docker events --since '10m'
|
||||
2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
|
||||
2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
|
||||
2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
**Filter events:**
|
||||
|
||||
$ docker events --filter 'event=stop'
|
||||
2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain)
|
||||
|
||||
$ docker events --filter 'image=ubuntu-1:14.04'
|
||||
2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
$ docker events --filter 'image=alpine'
|
||||
2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner)
|
||||
2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
|
||||
2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15)
|
||||
2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9)
|
||||
2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner)
|
||||
2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner)
|
||||
|
||||
$ docker events --filter 'container=7805c1d35632'
|
||||
2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8)
|
||||
$ docker events --filter 'container=test'
|
||||
2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
|
||||
2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
|
||||
2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
$ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d'
|
||||
2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8)
|
||||
2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8'
|
||||
2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
|
||||
2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9)
|
||||
2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test)
|
||||
2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test)
|
||||
|
||||
$ docker events --filter 'container=7805c1d35632' --filter 'event=stop'
|
||||
2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
|
||||
$ docker events --filter 'container=container_1' --filter 'container=container_2'
|
||||
2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
|
||||
2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8)
|
||||
2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
|
||||
$ docker events --filter 'container=test' --filter 'event=stop'
|
||||
2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test)
|
||||
|
||||
$ docker events --filter 'type=volume'
|
||||
2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
|
||||
2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate)
|
||||
2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local)
|
||||
2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate)
|
||||
2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local)
|
||||
2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
|
||||
|
||||
$ docker events --filter 'type=network'
|
||||
2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge)
|
||||
2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge)
|
||||
2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge)
|
||||
2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge)
|
||||
|
||||
$ docker events --filter 'type=plugin' (experimental)
|
||||
2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
|
||||
2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
|
||||
2016-07-25T17:30:14.825557616Z plugin pull ec7b...993f (name=tiborvass/sample-volume-plugin:latest)
|
||||
2016-07-25T17:30:14.888127370Z plugin enable ec7b...993f (name=tiborvass/sample-volume-plugin:latest)
|
||||
|
||||
**Format:**
|
||||
|
||||
|
|
|
@ -23,17 +23,20 @@ List containers
|
|||
Options:
|
||||
-a, --all Show all containers (default shows just running)
|
||||
-f, --filter value Filter output based on conditions provided (default [])
|
||||
- exited=<int> an exit code of <int>
|
||||
- label=<key> or label=<key>=<value>
|
||||
- status=(created|restarting|removing|running|paused|exited)
|
||||
- name=<string> a container's name
|
||||
- id=<ID> a container's ID
|
||||
- before=(<container-name>|<container-id>)
|
||||
- since=(<container-name>|<container-id>)
|
||||
- ancestor=(<image-name>[:tag]|<image-id>|<image@digest>)
|
||||
containers created from an image or a descendant.
|
||||
- is-task=(true|false)
|
||||
- before=(<container-name>|<container-id>)
|
||||
- exited=<int> an exit code of <int>
|
||||
- health=(starting|healthy|unhealthy|none)
|
||||
- id=<ID> a container's ID
|
||||
- isolation=(`default`|`process`|`hyperv`) (Windows daemon only)
|
||||
- is-task=(true|false)
|
||||
- label=<key> or label=<key>=<value>
|
||||
- name=<string> a container's name
|
||||
- network=(<network-id>|<network-name>)
|
||||
- since=(<container-name>|<container-id>)
|
||||
- status=(created|restarting|removing|running|paused|exited)
|
||||
- volume=(<volume name>|<mount point destination>)
|
||||
--format string Pretty-print containers using a Go template
|
||||
--help Print usage
|
||||
-n, --last int Show n last created containers (includes all states) (default -1)
|
||||
|
|
|
@ -966,7 +966,7 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
|
|||
}
|
||||
status, _, err := d.SockRequest("POST", "/swarm/init", req)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(status, checker.Equals, http.StatusInternalServerError)
|
||||
c.Assert(status, checker.Equals, http.StatusBadRequest)
|
||||
|
||||
req2 := swarm.JoinRequest{
|
||||
ListenAddr: "0.0.0.0:2377",
|
||||
|
@ -974,7 +974,7 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
|
|||
}
|
||||
status, _, err = d.SockRequest("POST", "/swarm/join", req2)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(status, checker.Equals, http.StatusInternalServerError)
|
||||
c.Assert(status, checker.Equals, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
||||
|
|
Loading…
Reference in a new issue