Compare commits
34 commits
Author | SHA1 | Date | |
---|---|---|---|
|
89658bed64 | ||
|
d1c9e9cfe9 | ||
|
c4bd13b650 | ||
|
90d35abf7b | ||
|
947cc7b591 | ||
|
e11e119022 | ||
|
a4e352ccb0 | ||
|
8c532a6822 | ||
|
9986762bc7 | ||
|
26d3cebaba | ||
|
bcbfaec7c6 | ||
|
a3600db8c5 | ||
|
fa4b810b41 | ||
|
c57fdb2a14 | ||
|
eaffbcb25d | ||
|
99ad8af0f2 | ||
|
8414a0d02e | ||
|
df996a9581 | ||
|
aea00bf675 | ||
|
7e06704af9 | ||
|
d17b69c5ae | ||
|
2d8ebec4a8 | ||
|
de8fbb4812 | ||
|
dbd2c592df | ||
|
4c29313d4c | ||
|
2c09664574 | ||
|
caafc0d5aa | ||
|
2f24f3a90b | ||
|
d1ab56925b | ||
|
13eac920b4 | ||
|
b569b8674c | ||
|
2878a859b5 | ||
|
a631e3b88a | ||
|
29368ac533 |
38 changed files with 493 additions and 165 deletions
86
CHANGELOG.md
86
CHANGELOG.md
|
@ -5,6 +5,92 @@ information on the list of deprecated flags and APIs please have a look at
|
|||
https://docs.docker.com/engine/deprecated/ where target removal dates can also
|
||||
be found.
|
||||
|
||||
## 17.05.0-ce (2017-05-04)
|
||||
|
||||
### Builder
|
||||
|
||||
+ Add multi-stage build support [#31257](https://github.com/docker/docker/pull/31257) [#32063](https://github.com/docker/docker/pull/32063)
|
||||
+ Allow using build-time args (`ARG`) in `FROM` [#31352](https://github.com/docker/docker/pull/31352)
|
||||
+ Add an option for specifying build target [#32496](https://github.com/docker/docker/pull/32496)
|
||||
* Accept `-f -` to read Dockerfile from `stdin`, but use local context for building [#31236](https://github.com/docker/docker/pull/31236)
|
||||
* The values of default build time arguments (e.g `HTTP_PROXY`) are no longer displayed in docker image history unless a corresponding `ARG` instruction is written in the Dockerfile. [#31584](https://github.com/docker/docker/pull/31584)
|
||||
- Fix setting command if a custom shell is used in a parent image [#32236](https://github.com/docker/docker/pull/32236)
|
||||
- Fix `docker build --label` when the label includes single quotes and a space [#31750](https://github.com/docker/docker/pull/31750)
|
||||
|
||||
### Client
|
||||
|
||||
* Add `--mount` flag to `docker run` and `docker create` [#32251](https://github.com/docker/docker/pull/32251)
|
||||
* Add `--type=secret` to `docker inspect` [#32124](https://github.com/docker/docker/pull/32124)
|
||||
* Add `--format` option to `docker secret ls` [#31552](https://github.com/docker/docker/pull/31552)
|
||||
* Add `--filter` option to `docker secret ls` [#30810](https://github.com/docker/docker/pull/30810)
|
||||
* Add `--filter scope=<swarm|local>` to `docker network ls` [#31529](https://github.com/docker/docker/pull/31529)
|
||||
* Add `--cpus` support to `docker update` [#31148](https://github.com/docker/docker/pull/31148)
|
||||
* Add label filter to `docker system prune` and other `prune` commands [#30740](https://github.com/docker/docker/pull/30740)
|
||||
* `docker stack rm` now accepts multiple stacks as input [#32110](https://github.com/docker/docker/pull/32110)
|
||||
* Improve `docker version --format` option when the client has downgraded the API version [#31022](https://github.com/docker/docker/pull/31022)
|
||||
* Prompt when using an encrypted client certificate to connect to a docker daemon [#31364](https://github.com/docker/docker/pull/31364)
|
||||
* Display created tags on successful `docker build` [#32077](https://github.com/docker/docker/pull/32077)
|
||||
* Cleanup compose convert error messages [#32087](https://github.com/moby/moby/pull/32087)
|
||||
|
||||
### Contrib
|
||||
|
||||
+ Add support for building docker debs for Ubuntu 17.04 Zesty on amd64 [#32435](https://github.com/docker/docker/pull/32435)
|
||||
|
||||
### Daemon
|
||||
|
||||
- Fix `--api-cors-header` being ignored if `--api-enable-cors` is not set [#32174](https://github.com/docker/docker/pull/32174)
|
||||
- Cleanup docker tmp dir on start [#31741](https://github.com/docker/docker/pull/31741)
|
||||
- Deprecate `--graph` flag in favor or `--data-root` [#28696](https://github.com/docker/docker/pull/28696)
|
||||
|
||||
### Logging
|
||||
|
||||
+ Add support for logging driver plugins [#28403](https://github.com/docker/docker/pull/28403)
|
||||
* Add support for showing logs of individual tasks to `docker service logs`, and add `/task/{id}/logs` REST endpoint [#32015](https://github.com/docker/docker/pull/32015)
|
||||
* Add `--log-opt env-regex` option to match environment variables using a regular expression [#27565](https://github.com/docker/docker/pull/27565)
|
||||
|
||||
### Networking
|
||||
|
||||
+ Allow user to replace, and customize the ingress network [#31714](https://github.com/docker/docker/pull/31714)
|
||||
- Fix UDP traffic in containers not working after the container is restarted [#32505](https://github.com/docker/docker/pull/32505)
|
||||
- Fix files being written to `/var/lib/docker` if a different data-root is set [#32505](https://github.com/docker/docker/pull/32505)
|
||||
|
||||
### Runtime
|
||||
|
||||
- Ensure health probe is stopped when a container exits [#32274](https://github.com/docker/docker/pull/32274)
|
||||
|
||||
### Swarm Mode
|
||||
|
||||
+ Add update/rollback order for services (`--update-order` / `--rollback-order`) [#30261](https://github.com/docker/docker/pull/30261)
|
||||
+ Add support for synchronous `service create` and `service update` [#31144](https://github.com/docker/docker/pull/31144)
|
||||
+ Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to
|
||||
`docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup
|
||||
time [#28938](https://github.com/docker/docker/pull/28938)
|
||||
* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/docker/docker/pull/32284)
|
||||
* `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/docker/docker/pull/32284)
|
||||
* Move `docker service logs` out of experimental [#32462](https://github.com/docker/docker/pull/32462)
|
||||
* Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/docker/docker/pull/32339)
|
||||
* Add `--entrypoint` flag to `docker service create` and `docker service update` [#29228](https://github.com/docker/docker/pull/29228)
|
||||
* Add `--network-add` and `--network-rm` to `docker service update` [#32062](https://github.com/docker/docker/pull/32062)
|
||||
* Add `--credential-spec` flag to `docker service create` and `docker service update` [#32339](https://github.com/docker/docker/pull/32339)
|
||||
* Add `--filter mode=<global|replicated>` to `docker service ls` [#31538](https://github.com/docker/docker/pull/31538)
|
||||
* Resolve network IDs on the client side, instead of in the daemon when creating services [#32062](https://github.com/docker/docker/pull/32062)
|
||||
* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424)
|
||||
* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302)
|
||||
* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813)
|
||||
- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364)
|
||||
- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333)
|
||||
- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631)
|
||||
|
||||
### Security
|
||||
|
||||
* Allow setting SELinux type or MCS labels when using `--ipc=container:` or `--ipc=host` [#30652](https://github.com/docker/docker/pull/30652)
|
||||
|
||||
|
||||
### Deprecation
|
||||
|
||||
- Deprecate `--api-enable-cors` daemon flag. This flag was marked deprecated in Docker 1.6.0 but not listed in deprecated features [#32352](https://github.com/docker/docker/pull/32352)
|
||||
- Remove Ubuntu 12.04 (Precise Pangolin) as supported platform. Ubuntu 12.04 is EOL, and no longer receives updates [#32520](https://github.com/docker/docker/pull/32520)
|
||||
|
||||
## 17.04.0-ce (2017-04-05)
|
||||
|
||||
### Builder
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
17.05.0-dev
|
||||
17.05.0-ce
|
||||
|
|
|
@ -377,7 +377,4 @@ type HostConfig struct {
|
|||
|
||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||
Init *bool `json:",omitempty"`
|
||||
|
||||
// Custom init path
|
||||
InitPath string `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -408,9 +408,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
|
|||
// that starts with "foo=abc" to be considered part of a build-time env var.
|
||||
saveCmd := config.Cmd
|
||||
if len(cmdBuildEnv) > 0 {
|
||||
sort.Strings(cmdBuildEnv)
|
||||
tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
|
||||
saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...))
|
||||
saveCmd = prependEnvOnCmd(b.buildArgs, cmdBuildEnv, saveCmd)
|
||||
}
|
||||
|
||||
b.runConfig.Cmd = saveCmd
|
||||
|
@ -445,26 +443,24 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
|
|||
// properly match it.
|
||||
b.runConfig.Env = env
|
||||
|
||||
// remove builtinAllowedBuildArgs (see: builder.go) from the saveCmd
|
||||
// these args are transparent so resulting image should be the same regardless of the value
|
||||
if len(cmdBuildEnv) > 0 {
|
||||
saveCmd = config.Cmd
|
||||
tmpBuildEnv := make([]string, len(cmdBuildEnv))
|
||||
copy(tmpBuildEnv, cmdBuildEnv)
|
||||
for i, env := range tmpBuildEnv {
|
||||
key := strings.SplitN(env, "=", 2)[0]
|
||||
if b.buildArgs.IsUnreferencedBuiltin(key) {
|
||||
tmpBuildEnv = append(tmpBuildEnv[:i], tmpBuildEnv[i+1:]...)
|
||||
}
|
||||
}
|
||||
sort.Strings(tmpBuildEnv)
|
||||
tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
|
||||
saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...))
|
||||
}
|
||||
b.runConfig.Cmd = saveCmd
|
||||
return b.commit(cID, cmd, "run")
|
||||
}
|
||||
|
||||
func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice {
|
||||
var tmpBuildEnv []string
|
||||
for _, env := range buildArgVars {
|
||||
key := strings.SplitN(env, "=", 2)[0]
|
||||
if !buildArgs.IsUnreferencedBuiltin(key) {
|
||||
tmpBuildEnv = append(tmpBuildEnv, env)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(tmpBuildEnv)
|
||||
tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
|
||||
return strslice.StrSlice(append(tmpEnv, cmd...))
|
||||
}
|
||||
|
||||
// CMD foo
|
||||
//
|
||||
// Set the default command to run in the container (which may be empty).
|
||||
|
|
|
@ -118,7 +118,6 @@ type containerOptions struct {
|
|||
runtime string
|
||||
autoRemove bool
|
||||
init bool
|
||||
initPath string
|
||||
|
||||
Image string
|
||||
Args []string
|
||||
|
@ -284,8 +283,6 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
|||
|
||||
flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes")
|
||||
flags.SetAnnotation("init", "version", []string{"1.25"})
|
||||
flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary")
|
||||
flags.SetAnnotation("init-path", "version", []string{"1.25"})
|
||||
return copts
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// ImageTag tags an image in the docker host
|
||||
func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
|
||||
if _, err := reference.ParseNormalizedNamed(source); err != nil {
|
||||
if _, err := reference.ParseAnyReference(source); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,17 @@ func TestImageTagInvalidSourceImageName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestImageTagHexSource(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusOK, "OK")),
|
||||
}
|
||||
|
||||
err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag")
|
||||
if err != nil {
|
||||
t.Fatalf("got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTag(t *testing.T) {
|
||||
expectedURL := "/images/image_id/tag"
|
||||
tagCases := []struct {
|
||||
|
|
|
@ -463,6 +463,16 @@ __docker_complete_services() {
|
|||
COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") )
|
||||
}
|
||||
|
||||
# __docker_tasks returns a list of all task IDs.
|
||||
__docker_tasks() {
|
||||
__docker_q service ps --format '{{.ID}}' ""
|
||||
}
|
||||
|
||||
# __docker_complete_services_and_tasks applies completion of services and task IDs.
|
||||
__docker_complete_services_and_tasks() {
|
||||
COMPREPLY=( $(compgen -W "$(__docker_services "$@") $(__docker_tasks)" -- "$cur") )
|
||||
}
|
||||
|
||||
# __docker_append_to_completions appends the word passed as an argument to every
|
||||
# word in `$COMPREPLY`.
|
||||
# Normally you do this with `compgen -S` while generating the completions.
|
||||
|
@ -1502,7 +1512,6 @@ _docker_container_run_and_create() {
|
|||
--expose
|
||||
--group-add
|
||||
--hostname -h
|
||||
--init-path
|
||||
--ip
|
||||
--ip6
|
||||
--ipc
|
||||
|
@ -1626,7 +1635,7 @@ _docker_container_run_and_create() {
|
|||
__docker_complete_capabilities_droppable
|
||||
return
|
||||
;;
|
||||
--cidfile|--env-file|--init-path|--label-file)
|
||||
--cidfile|--env-file|--label-file)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
|
@ -2258,7 +2267,7 @@ _docker_image_build() {
|
|||
__docker_complete_containers_all --cur "${cur#*:}"
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") )
|
||||
COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") )
|
||||
if [ "${COMPREPLY[*]}" = "container:" ] ; then
|
||||
__docker_nospace
|
||||
fi
|
||||
|
@ -2830,13 +2839,13 @@ _docker_service() {
|
|||
local subcommands="
|
||||
create
|
||||
inspect
|
||||
logs
|
||||
ls
|
||||
rm
|
||||
scale
|
||||
ps
|
||||
update
|
||||
"
|
||||
__docker_daemon_is_experimental && subcommands+="logs"
|
||||
|
||||
local aliases="
|
||||
list
|
||||
|
@ -2888,7 +2897,7 @@ _docker_service_logs() {
|
|||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--since|--tail')
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_complete_services
|
||||
__docker_complete_services_and_tasks
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -214,6 +214,8 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
|
|||
}
|
||||
}
|
||||
|
||||
label.Relabel(localMountPath, c.MountLabel, false)
|
||||
|
||||
// remount secrets ro
|
||||
if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil {
|
||||
return errors.Wrap(err, "unable to remount secret dir as readonly")
|
||||
|
|
|
@ -624,7 +624,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
|
|||
(c.HostConfig.Init == nil && daemon.configStore.Init) {
|
||||
s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...)
|
||||
var path string
|
||||
if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" {
|
||||
if daemon.configStore.InitPath == "" {
|
||||
path, err = exec.LookPath(daemonconfig.DefaultInitBinary)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -633,9 +633,6 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
|
|||
if daemon.configStore.InitPath != "" {
|
||||
path = daemon.configStore.InitPath
|
||||
}
|
||||
if c.HostConfig.InitPath != "" {
|
||||
path = c.HostConfig.InitPath
|
||||
}
|
||||
s.Mounts = append(s.Mounts, specs.Mount{
|
||||
Destination: "/dev/init",
|
||||
Type: "bind",
|
||||
|
|
|
@ -23,6 +23,7 @@ keywords: "API, Docker, rcli, REST, documentation"
|
|||
* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one.
|
||||
* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`).
|
||||
* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass.
|
||||
* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output.
|
||||
|
||||
## v1.28 API changes
|
||||
|
||||
|
|
|
@ -20,6 +20,17 @@ The following list of features are deprecated in Engine.
|
|||
To learn more about Docker Engine's deprecation policy,
|
||||
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
|
||||
|
||||
### Asynchronous `service create` and `service update`
|
||||
|
||||
**Deprecated In Release: v17.05.0**
|
||||
|
||||
**Disabled by default in release: v17.09**
|
||||
|
||||
Docker 17.05.0 added an optional `--detach=false` option to make the
|
||||
`docker service create` and `docker service update` work synchronously. This
|
||||
option will be enable by default in Docker 17.09, at which point the `--detach`
|
||||
flag can be used to use the previous (asynchronous) behavior.
|
||||
|
||||
### `-g` and `--graph` flags on `dockerd`
|
||||
|
||||
**Deprecated In Release: v17.05.0**
|
||||
|
|
|
@ -66,7 +66,6 @@ Options:
|
|||
--help Print usage
|
||||
-h, --hostname string Container host name
|
||||
--init Run an init inside the container that forwards signals and reaps processes
|
||||
--init-path string Path to the docker-init binary
|
||||
-i, --interactive Keep STDIN open even if not attached
|
||||
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
|
||||
--io-maxiops uint Maximum IOps limit for the system drive (Windows only)
|
||||
|
|
|
@ -70,7 +70,6 @@ Options:
|
|||
--help Print usage
|
||||
-h, --hostname string Container host name
|
||||
--init Run an init inside the container that forwards signals and reaps processes
|
||||
--init-path string Path to the docker-init binary
|
||||
-i, --interactive Keep STDIN open even if not attached
|
||||
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
|
||||
(Windows only). The format is `<number><unit>`.
|
||||
|
|
|
@ -145,6 +145,25 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
|
|||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
|
||||
// TODO: when root rotation is in, convert to a series of root rotation tests instead.
|
||||
// currently just makes sure that we don't have to provide a CA certificate when
|
||||
// providing an external CA
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
||||
d1.UpdateSwarm(c, func(s *swarm.Spec) {
|
||||
s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://thishasnoca.org",
|
||||
},
|
||||
}
|
||||
})
|
||||
info, err := d1.SwarmInfo()
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 1)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
|
|
|
@ -4344,23 +4344,37 @@ func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) {
|
|||
ARG %s
|
||||
ARG %s
|
||||
RUN echo "Testing Build Args!"`, envKey, explicitProxyKey)
|
||||
buildImage(imgName,
|
||||
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
|
||||
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
|
||||
"--build-arg", proxy),
|
||||
build.WithDockerfile(dockerfile),
|
||||
).Assert(c, icmd.Success)
|
||||
|
||||
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
|
||||
buildImage := func(imgName string) string {
|
||||
cli.BuildCmd(c, imgName,
|
||||
cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com",
|
||||
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
|
||||
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
|
||||
"--build-arg", proxy),
|
||||
build.WithDockerfile(dockerfile),
|
||||
)
|
||||
return getIDByName(c, imgName)
|
||||
}
|
||||
|
||||
origID := buildImage(imgName)
|
||||
result := cli.DockerCmd(c, "history", "--no-trunc", imgName)
|
||||
out := result.Stdout()
|
||||
|
||||
if strings.Contains(out, proxy) {
|
||||
c.Fatalf("failed to exclude proxy settings from history!")
|
||||
}
|
||||
if strings.Contains(out, "https_proxy") {
|
||||
c.Fatalf("failed to exclude proxy settings from history!")
|
||||
}
|
||||
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
|
||||
c.Fatalf("explicitly defined ARG %s is not in output", explicitProxyKey)
|
||||
}
|
||||
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
|
||||
c.Fatalf("missing build arguments from output")
|
||||
}
|
||||
|
||||
cacheID := buildImage(imgName + "-two")
|
||||
c.Assert(origID, checker.Equals, cacheID)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
|
||||
|
@ -6156,7 +6170,7 @@ func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
|
|||
func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) {
|
||||
testRequires(c, DaemonIsWindows)
|
||||
dockerfile := `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
COPY --from=0 %s c:\\oscopy
|
||||
`
|
||||
|
@ -6173,7 +6187,7 @@ func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) {
|
|||
func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) {
|
||||
testRequires(c, DaemonIsWindows)
|
||||
dockerfile := `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
COPY --from=0 %s c:\\oscopy
|
||||
`
|
||||
|
@ -6192,7 +6206,7 @@ func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) {
|
|||
testRequires(c, DaemonIsWindows)
|
||||
dockerfile := `
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
COPY foo /
|
||||
COPY foo /
|
||||
FROM ` + testEnv.MinimalBaseImage() + `
|
||||
COPY --from=0 c:\\fOo c:\\copied
|
||||
RUN type c:\\copied
|
||||
|
|
|
@ -88,8 +88,8 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) {
|
|||
|
||||
s.d.Restart(c)
|
||||
|
||||
if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
|
||||
c.Fatal(err)
|
||||
if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
|
||||
c.Fatal(err, out)
|
||||
}
|
||||
|
||||
if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
|
@ -50,6 +51,13 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
|
|||
c.Assert(out, checker.Contains, "minimum certificate expiry time")
|
||||
spec = getSpec()
|
||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
||||
|
||||
// passing an external CA (this is without starting a root rotation) does not fail
|
||||
out, err = d.Cmd("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||
|
||||
spec = getSpec()
|
||||
c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
|
||||
|
@ -60,12 +68,14 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
|
|||
return sw.Spec
|
||||
}
|
||||
|
||||
cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s"),
|
||||
cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s",
|
||||
"--external-ca", "protocol=cfssl,url=https://something.org"),
|
||||
cli.Daemon(d.Daemon)).Assert(c, icmd.Success)
|
||||
|
||||
spec := getSpec()
|
||||
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
|
||||
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second)
|
||||
c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1)
|
||||
|
||||
c.Assert(d.Leave(true), checker.IsNil)
|
||||
time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421
|
||||
|
@ -1212,10 +1222,6 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
|||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
// get d3's cert
|
||||
d3cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// demote manager back to worker - workers are not locked
|
||||
outs, err = d1.Cmd("node", "demote", d3.Info.NodeID)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -1228,12 +1234,16 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
|||
// is set to autolock)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
|
||||
certBytes, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
|
||||
if err != nil {
|
||||
return "", check.Commentf("error: %v", err)
|
||||
}
|
||||
return string(cert), check.Commentf("cert: %v", string(cert))
|
||||
}, checker.Not(checker.Equals), string(d3cert))
|
||||
certs, err := helpers.ParseCertificatesPEM(certBytes)
|
||||
if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
|
||||
return certs[0].Subject.OrganizationalUnit[0], nil
|
||||
}
|
||||
return "", check.Commentf("could not get organizational unit from certificate")
|
||||
}, checker.Equals, "swarm-worker")
|
||||
|
||||
// by now, it should *never* be locked on restart
|
||||
d3.Restart(c)
|
||||
|
|
|
@ -43,7 +43,6 @@ docker-run - Run a command in a new container
|
|||
[**-h**|**--hostname**[=*HOSTNAME*]]
|
||||
[**--help**]
|
||||
[**--init**]
|
||||
[**--init-path**[=*[]*]]
|
||||
[**-i**|**--interactive**]
|
||||
[**--ip**[=*IPv4-ADDRESS*]]
|
||||
[**--ip6**[=*IPv6-ADDRESS*]]
|
||||
|
@ -327,9 +326,6 @@ redirection on the host system.
|
|||
**--init**
|
||||
Run an init inside the container that forwards signals and reaps processes
|
||||
|
||||
**--init-path**=""
|
||||
Path to the docker-init binary
|
||||
|
||||
**-i**, **--interactive**=*true*|*false*
|
||||
Keep STDIN open even if not attached. The default is *false*.
|
||||
|
||||
|
|
|
@ -45,4 +45,5 @@ const (
|
|||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
mntDetach = 0
|
||||
)
|
||||
|
|
|
@ -82,4 +82,6 @@ const (
|
|||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = syscall.MS_STRICTATIME
|
||||
|
||||
mntDetach = syscall.MNT_DETACH
|
||||
)
|
||||
|
|
|
@ -27,4 +27,5 @@ const (
|
|||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
mntDetach = 0
|
||||
)
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetMounts retrieves a list of mounts for the current running process.
|
||||
func GetMounts() ([]*Info, error) {
|
||||
return parseMountTable()
|
||||
|
@ -49,23 +45,11 @@ func ForceMount(device, target, mType, options string) error {
|
|||
return mount(device, target, mType, uintptr(flag), data)
|
||||
}
|
||||
|
||||
// Unmount will unmount the target filesystem, so long as it is mounted.
|
||||
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
|
||||
// does a normal unmount.
|
||||
func Unmount(target string) error {
|
||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return ForceUnmount(target)
|
||||
}
|
||||
|
||||
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||
// it is mounted or not.
|
||||
func ForceUnmount(target string) (err error) {
|
||||
// Simple retry logic for unmount
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = unmount(target, 0); err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return
|
||||
return unmount(target, mntDetach)
|
||||
}
|
||||
|
|
|
@ -648,7 +648,7 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
|
|||
func getMounts(root string) ([]string, error) {
|
||||
infos, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read mount table while performing recursive unmount")
|
||||
return nil, errors.Wrap(err, "failed to read mount table")
|
||||
}
|
||||
|
||||
var mounts []string
|
||||
|
|
|
@ -199,9 +199,17 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs
|
|||
|
||||
pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
|
||||
orig := filepath.Join(pdir, "rootfs")
|
||||
|
||||
// Make sure nothing is mounted
|
||||
// This could happen if the plugin was disabled with `-f` with active mounts.
|
||||
// If there is anything in `orig` is still mounted, this should error out.
|
||||
if err := recursiveUnmount(orig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backup := orig + "-old"
|
||||
if err := os.Rename(orig, backup); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "error backing up plugin data before upgrade")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
|
|
@ -24,7 +24,7 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
|||
github.com/imdario/mergo 0.2.1
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork b13e0604016a4944025aaff521d9c125850b0d04
|
||||
github.com/docker/libnetwork 5d4e5de2f9962c2de8a7872128e2cc09dfdd99aa
|
||||
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
|
@ -105,7 +105,7 @@ github.com/docker/containerd 9048e5e50717ea4497b757314bad98ea3763c145
|
|||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit d5232280c510d70755ab11305d46a5704735371a
|
||||
github.com/docker/swarmkit ae52d9de97b91eee978bc2fe411bc85b33eb82dd
|
||||
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
|
|
7
vendor/github.com/docker/libnetwork/agent.go
generated
vendored
7
vendor/github.com/docker/libnetwork/agent.go
generated
vendored
|
@ -187,6 +187,13 @@ func (c *controller) agentSetup() error {
|
|||
clusterProvider := c.cfg.Daemon.ClusterProvider
|
||||
agent := c.agent
|
||||
c.Unlock()
|
||||
|
||||
if clusterProvider == nil {
|
||||
msg := "Aborting initialization of Libnetwork Agent because cluster provider is now unset"
|
||||
logrus.Errorf(msg)
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
|
||||
bindAddr := clusterProvider.GetLocalAddress()
|
||||
advAddr := clusterProvider.GetAdvertiseAddress()
|
||||
remote := clusterProvider.GetRemoteAddress()
|
||||
|
|
13
vendor/github.com/docker/libnetwork/default_gateway.go
generated
vendored
13
vendor/github.com/docker/libnetwork/default_gateway.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
@ -72,9 +73,19 @@ func (sb *sandbox) setupDefaultGW() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("container %s: endpoint create on GW Network failed: %v", sb.containerID, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err2 := newEp.Delete(true); err2 != nil {
|
||||
logrus.Warnf("Failed to remove gw endpoint for container %s after failing to join the gateway network: %v",
|
||||
sb.containerID, err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
epLocal := newEp.(*endpoint)
|
||||
|
||||
if err := epLocal.sbJoin(sb); err != nil {
|
||||
if err = epLocal.sbJoin(sb); err != nil {
|
||||
return fmt.Errorf("container %s: endpoint join on GW Network failed: %v", sb.containerID, err)
|
||||
}
|
||||
|
||||
|
|
22
vendor/github.com/docker/libnetwork/endpoint.go
generated
vendored
22
vendor/github.com/docker/libnetwork/endpoint.go
generated
vendored
|
@ -427,7 +427,7 @@ func (ep *endpoint) Join(sbox Sandbox, options ...EndpointOption) error {
|
|||
return ep.sbJoin(sb, options...)
|
||||
}
|
||||
|
||||
func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
||||
func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) (err error) {
|
||||
n, err := ep.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get network from store during join: %v", err)
|
||||
|
@ -462,7 +462,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
|
||||
d, err := n.driver(true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to join endpoint: %v", err)
|
||||
return fmt.Errorf("failed to get driver during join: %v", err)
|
||||
}
|
||||
|
||||
err = d.Join(nid, epid, sb.Key(), ep, sb.Labels())
|
||||
|
@ -471,8 +471,8 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err := d.Leave(nid, epid); err != nil {
|
||||
logrus.Warnf("driver leave failed while rolling back join: %v", err)
|
||||
if e := d.Leave(nid, epid); e != nil {
|
||||
logrus.Warnf("driver leave failed while rolling back join: %v", e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -538,11 +538,11 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
extN, err := extEp.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get network from store during join: %v", err)
|
||||
return fmt.Errorf("failed to get network from store for revoking external connectivity during join: %v", err)
|
||||
}
|
||||
extD, err := extN.driver(true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to join endpoint: %v", err)
|
||||
return fmt.Errorf("failed to get driver for revoking external connectivity during join: %v", err)
|
||||
}
|
||||
if err = extD.RevokeExternalConnectivity(extEp.network.ID(), extEp.ID()); err != nil {
|
||||
return types.InternalErrorf(
|
||||
|
@ -570,9 +570,9 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
}
|
||||
|
||||
if !sb.needDefaultGW() {
|
||||
if err := sb.clearDefaultGW(); err != nil {
|
||||
if e := sb.clearDefaultGW(); e != nil {
|
||||
logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
sb.ID(), sb.ContainerID(), err)
|
||||
sb.ID(), sb.ContainerID(), e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,7 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
|
||||
d, err := n.driver(!force)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to leave endpoint: %v", err)
|
||||
return fmt.Errorf("failed to get driver during endpoint leave: %v", err)
|
||||
}
|
||||
|
||||
ep.Lock()
|
||||
|
@ -765,11 +765,11 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
extN, err := extEp.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get network from store during leave: %v", err)
|
||||
return fmt.Errorf("failed to get network from store for programming external connectivity during leave: %v", err)
|
||||
}
|
||||
extD, err := extN.driver(true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to leave endpoint: %v", err)
|
||||
return fmt.Errorf("failed to get driver for programming external connectivity during leave: %v", err)
|
||||
}
|
||||
if err := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); err != nil {
|
||||
logrus.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v",
|
||||
|
|
9
vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
9
vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
|
@ -86,6 +86,15 @@ func (nDB *NetworkDB) sendNodeEvent(event NodeEvent_Type) error {
|
|||
notify: notifyCh,
|
||||
})
|
||||
|
||||
nDB.RLock()
|
||||
noPeers := len(nDB.nodes) <= 1
|
||||
nDB.RUnlock()
|
||||
|
||||
// Message enqueued, do not wait for a send if no peer is present
|
||||
if noPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for the broadcast
|
||||
select {
|
||||
case <-notifyCh:
|
||||
|
|
2
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
2
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
reapInterval = 60 * time.Second
|
||||
reapInterval = 30 * time.Minute
|
||||
reapPeriod = 5 * time.Second
|
||||
retryInterval = 1 * time.Second
|
||||
nodeReapInterval = 24 * time.Hour
|
||||
|
|
22
vendor/github.com/docker/swarmkit/agent/worker.go
generated
vendored
22
vendor/github.com/docker/swarmkit/agent/worker.go
generated
vendored
|
@ -426,14 +426,19 @@ func (w *worker) Listen(ctx context.Context, reporter StatusReporter) {
|
|||
}
|
||||
|
||||
func (w *worker) startTask(ctx context.Context, tx *bolt.Tx, task *api.Task) error {
|
||||
w.taskevents.Publish(task.Copy())
|
||||
_, err := w.taskManager(ctx, tx, task) // side-effect taskManager creation.
|
||||
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to start taskManager")
|
||||
// we ignore this error: it gets reported in the taskStatus within
|
||||
// `newTaskManager`. We log it here and move on. If their is an
|
||||
// attempted restart, the lack of taskManager will have this retry
|
||||
// again.
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Add start method for taskmanager
|
||||
// only publish if controller resolution was successful.
|
||||
w.taskevents.Publish(task.Copy())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -464,7 +469,7 @@ func (w *worker) newTaskManager(ctx context.Context, tx *bolt.Tx, task *api.Task
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
log.G(ctx).Error("controller resolution failed")
|
||||
log.G(ctx).WithError(err).Error("controller resolution failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -568,9 +573,14 @@ func (w *worker) Subscribe(ctx context.Context, subscription *api.SubscriptionMe
|
|||
case v := <-ch:
|
||||
task := v.(*api.Task)
|
||||
if match(task) {
|
||||
w.mu.Lock()
|
||||
go w.taskManagers[task.ID].Logs(ctx, *subscription.Options, publisher)
|
||||
w.mu.Unlock()
|
||||
w.mu.RLock()
|
||||
tm, ok := w.taskManagers[task.ID]
|
||||
w.mu.RUnlock()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
go tm.Logs(ctx, *subscription.Options, publisher)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
|
|
19
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
19
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
|
@ -164,7 +164,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
|||
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range services {
|
||||
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s, networkallocator.OnInit) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if nc.nwkAllocator.PortsAllocatedInHostPublishMode(s) {
|
||||
break
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bo
|
|||
// network configured or service endpoints have been
|
||||
// allocated.
|
||||
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
|
||||
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
|
||||
(s == nil || !nc.nwkAllocator.ServiceNeedsAllocation(s))
|
||||
}
|
||||
|
||||
func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) {
|
||||
|
@ -886,7 +886,7 @@ func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
err = fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
|
||||
return
|
||||
}
|
||||
|
@ -1000,7 +1000,7 @@ func (a *Allocator) procUnallocatedServices(ctx context.Context) {
|
|||
nc := a.netCtx
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range nc.unallocatedServices {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
|
||||
continue
|
||||
|
@ -1089,12 +1089,7 @@ func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
|
|||
|
||||
// IsIngressNetwork returns whether the passed network is an ingress network.
|
||||
func IsIngressNetwork(nw *api.Network) bool {
|
||||
if nw.Spec.Ingress {
|
||||
return true
|
||||
}
|
||||
// Check if legacy defined ingress network
|
||||
_, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]
|
||||
return ok && nw.Spec.Annotations.Name == "ingress"
|
||||
return networkallocator.IsIngressNetwork(nw)
|
||||
}
|
||||
|
||||
// GetIngressNetwork fetches the ingress network from store.
|
||||
|
|
141
vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
generated
vendored
141
vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
generated
vendored
|
@ -153,7 +153,7 @@ func (na *NetworkAllocator) Deallocate(n *api.Network) error {
|
|||
// IP and ports needed by the service.
|
||||
func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
|
||||
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -183,14 +183,7 @@ func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
|
|||
}
|
||||
|
||||
delete(na.services, s.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// First allocate VIPs for all the pre-populated endpoint attachments
|
||||
for _, eAttach := range s.Endpoint.VirtualIPs {
|
||||
if err = na.allocateVIP(eAttach); err != nil {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Always prefer NetworkAttachmentConfig in the TaskSpec
|
||||
|
@ -199,24 +192,57 @@ func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
|
|||
specNetworks = s.Spec.Networks
|
||||
}
|
||||
|
||||
outer:
|
||||
// Allocate VIPs for all the pre-populated endpoint attachments
|
||||
eVIPs := s.Endpoint.VirtualIPs[:0]
|
||||
|
||||
vipLoop:
|
||||
for _, eAttach := range s.Endpoint.VirtualIPs {
|
||||
if na.IsVIPOnIngressNetwork(eAttach) {
|
||||
if err = na.allocateVIP(eAttach); err != nil {
|
||||
return err
|
||||
}
|
||||
eVIPs = append(eVIPs, eAttach)
|
||||
continue vipLoop
|
||||
|
||||
}
|
||||
for _, nAttach := range specNetworks {
|
||||
if nAttach.Target == eAttach.NetworkID {
|
||||
if err = na.allocateVIP(eAttach); err != nil {
|
||||
return err
|
||||
}
|
||||
eVIPs = append(eVIPs, eAttach)
|
||||
continue vipLoop
|
||||
}
|
||||
}
|
||||
// If the network of the VIP is not part of the service spec,
|
||||
// deallocate the vip
|
||||
na.deallocateVIP(eAttach)
|
||||
}
|
||||
|
||||
networkLoop:
|
||||
for _, nAttach := range specNetworks {
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if vip.NetworkID == nAttach.Target {
|
||||
continue outer
|
||||
continue networkLoop
|
||||
}
|
||||
}
|
||||
|
||||
vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target}
|
||||
if err = na.allocateVIP(vip); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, vip)
|
||||
eVIPs = append(eVIPs, vip)
|
||||
}
|
||||
|
||||
na.services[s.ID] = struct{}{}
|
||||
return
|
||||
if len(eVIPs) > 0 {
|
||||
na.services[s.ID] = struct{}{}
|
||||
} else {
|
||||
delete(na.services, s.ID)
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = eVIPs
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceDeallocate de-allocates all the network resources such as
|
||||
|
@ -234,6 +260,7 @@ func (na *NetworkAllocator) ServiceDeallocate(s *api.Service) error {
|
|||
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
|
||||
}
|
||||
}
|
||||
s.Endpoint.VirtualIPs = nil
|
||||
|
||||
na.portAllocator.serviceDeallocatePorts(s)
|
||||
delete(na.services, s.ID)
|
||||
|
@ -300,41 +327,78 @@ func OnInit(options *ServiceAllocationOpts) {
|
|||
options.OnInit = true
|
||||
}
|
||||
|
||||
// IsServiceAllocated returns if the passed service has its network resources allocated or not.
|
||||
// init bool indicates if the func is called during allocator initialization stage.
|
||||
func (na *NetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool {
|
||||
// ServiceNeedsAllocation returns true if the passed service needs to have network resources allocated/updated.
|
||||
func (na *NetworkAllocator) ServiceNeedsAllocation(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool {
|
||||
var options ServiceAllocationOpts
|
||||
|
||||
for _, flag := range flags {
|
||||
flag(&options)
|
||||
}
|
||||
|
||||
// Always prefer NetworkAttachmentConfig in the TaskSpec
|
||||
specNetworks := s.Spec.Task.Networks
|
||||
if len(specNetworks) == 0 && len(s.Spec.Networks) != 0 {
|
||||
specNetworks = s.Spec.Networks
|
||||
}
|
||||
|
||||
// If endpoint mode is VIP and allocator does not have the
|
||||
// service in VIP allocated set then it is not allocated.
|
||||
if (len(s.Spec.Task.Networks) != 0 || len(s.Spec.Networks) != 0) &&
|
||||
// service in VIP allocated set then it needs to be allocated.
|
||||
if len(specNetworks) != 0 &&
|
||||
(s.Spec.Endpoint == nil ||
|
||||
s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {
|
||||
|
||||
if _, ok := na.services[s.ID]; !ok {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
if s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the spec has networks which don't have a corresponding VIP,
|
||||
// the service needs to be allocated.
|
||||
networkLoop:
|
||||
for _, net := range specNetworks {
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if vip.NetworkID == net.Target {
|
||||
continue networkLoop
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// If the spec no longer has networks attached and has a vip allocated
|
||||
// from previous spec the service needs to allocated.
|
||||
if s.Endpoint != nil {
|
||||
vipLoop:
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if na.IsVIPOnIngressNetwork(vip) {
|
||||
continue vipLoop
|
||||
}
|
||||
for _, net := range specNetworks {
|
||||
if vip.NetworkID == net.Target {
|
||||
continue vipLoop
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// If the endpoint mode is DNSRR and allocator has the service
|
||||
// in VIP allocated set then we return not allocated to make
|
||||
// in VIP allocated set then we return to be allocated to make
|
||||
// sure the allocator triggers networkallocator to free up the
|
||||
// resources if any.
|
||||
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
|
||||
if _, ok := na.services[s.ID]; ok {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
|
||||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
|
||||
return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
return !na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
}
|
||||
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNodeAllocated returns if the passed node has its network resources allocated or not.
|
||||
|
@ -828,3 +892,26 @@ func initializeDrivers(reg *drvregistry.DrvRegistry) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsVIPOnIngressNetwork check if the vip is in ingress network
|
||||
func (na *NetworkAllocator) IsVIPOnIngressNetwork(vip *api.Endpoint_VirtualIP) bool {
|
||||
if vip == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
localNet := na.getNetwork(vip.NetworkID)
|
||||
if localNet != nil && localNet.nw != nil {
|
||||
return IsIngressNetwork(localNet.nw)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsIngressNetwork check if the network is an ingress network
|
||||
func IsIngressNetwork(nw *api.Network) bool {
|
||||
if nw.Spec.Ingress {
|
||||
return true
|
||||
}
|
||||
// Check if legacy defined ingress network
|
||||
_, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]
|
||||
return ok && nw.Spec.Annotations.Name == "ingress"
|
||||
}
|
||||
|
|
14
vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go
generated
vendored
14
vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go
generated
vendored
|
@ -148,14 +148,16 @@ func validateHasAtLeastOneExternalCA(ctx context.Context, externalCAs map[string
|
|||
|
||||
// validates that the list of external CAs have valid certs associated with them, and produce a mapping of subject/pubkey:external
|
||||
// for later validation of required external CAs
|
||||
func getNormalizedExtCAs(caConfig *api.CAConfig) (map[string][]*api.ExternalCA, error) {
|
||||
func getNormalizedExtCAs(caConfig *api.CAConfig, normalizedCurrentRootCACert []byte) (map[string][]*api.ExternalCA, error) {
|
||||
extCAs := make(map[string][]*api.ExternalCA)
|
||||
|
||||
for _, extCA := range caConfig.ExternalCAs {
|
||||
if len(extCA.CACert) == 0 {
|
||||
return nil, grpc.Errorf(codes.InvalidArgument, "must specify CA certificate for each external CA")
|
||||
associatedCert := normalizedCurrentRootCACert
|
||||
// if no associated cert is provided, assume it's the current root cert
|
||||
if len(extCA.CACert) > 0 {
|
||||
associatedCert = ca.NormalizePEMs(extCA.CACert)
|
||||
}
|
||||
certKey := string(ca.NormalizePEMs(extCA.CACert))
|
||||
certKey := string(associatedCert)
|
||||
extCAs[certKey] = append(extCAs[certKey], extCA)
|
||||
}
|
||||
|
||||
|
@ -191,12 +193,12 @@ func validateCAConfig(ctx context.Context, securityConfig *ca.SecurityConfig, cl
|
|||
return nil, grpc.Errorf(codes.InvalidArgument, "if a signing CA key is provided, the signing CA cert must also be provided")
|
||||
}
|
||||
|
||||
extCAs, err := getNormalizedExtCAs(newConfig) // validate that the list of external CAs is not malformed
|
||||
normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert)
|
||||
extCAs, err := getNormalizedExtCAs(newConfig, normalizedRootCA) // validate that the list of external CAs is not malformed
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert)
|
||||
var oldCertExtCAs []*api.ExternalCA
|
||||
if !hasSigningKey(&cluster.RootCA) {
|
||||
oldCertExtCAs, err = validateHasAtLeastOneExternalCA(ctx, extCAs, securityConfig, normalizedRootCA, "current")
|
||||
|
|
4
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
4
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
|
@ -297,8 +297,9 @@ func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []strin
|
|||
updates := make(map[*api.Service][]orchestrator.Slot)
|
||||
|
||||
_, err := g.store.Batch(func(batch *store.Batch) error {
|
||||
var updateTasks []orchestrator.Slot
|
||||
for _, serviceID := range serviceIDs {
|
||||
var updateTasks []orchestrator.Slot
|
||||
|
||||
if _, exists := nodeTasks[serviceID]; !exists {
|
||||
continue
|
||||
}
|
||||
|
@ -352,7 +353,6 @@ func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []strin
|
|||
for service, updateTasks := range updates {
|
||||
g.updater.Update(ctx, g.cluster, service, updateTasks)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// updateNode updates g.nodes based on the current node value
|
||||
|
|
60
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
60
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
|
@ -133,6 +133,30 @@ type Node struct {
|
|||
manager *manager.Manager
|
||||
notifyNodeChange chan *agent.NodeChanges // used by the agent to relay node updates from the dispatcher Session stream to (*Node).run
|
||||
unlockKey []byte
|
||||
|
||||
// lastNodeRole is the last-seen value of Node.Role, used to make role
|
||||
// changes "edge triggered" and avoid renewal loops.
|
||||
lastNodeRole lastSeenRole
|
||||
// lastNodeDesiredRole is the last-seen value of Node.Spec.DesiredRole,
|
||||
// used to make role changes "edge triggered" and avoid renewal loops.
|
||||
// This exists in addition to lastNodeRole to support older CAs that
|
||||
// only fill in the DesiredRole field.
|
||||
lastNodeDesiredRole lastSeenRole
|
||||
}
|
||||
|
||||
type lastSeenRole struct {
|
||||
role *api.NodeRole
|
||||
}
|
||||
|
||||
// observe notes the latest value of this node role, and returns true if it
|
||||
// is the first seen value, or is different from the most recently seen value.
|
||||
func (l *lastSeenRole) observe(newRole api.NodeRole) bool {
|
||||
changed := l.role == nil || *l.role != newRole
|
||||
if l.role == nil {
|
||||
l.role = new(api.NodeRole)
|
||||
}
|
||||
*l.role = newRole
|
||||
return changed
|
||||
}
|
||||
|
||||
// RemoteAPIAddr returns address on which remote manager api listens.
|
||||
|
@ -279,17 +303,35 @@ func (n *Node) run(ctx context.Context) (err error) {
|
|||
return
|
||||
case nodeChanges := <-n.notifyNodeChange:
|
||||
n.Lock()
|
||||
currentRole := n.role
|
||||
currentRole := api.NodeRoleWorker
|
||||
if n.role == ca.ManagerRole {
|
||||
currentRole = api.NodeRoleManager
|
||||
}
|
||||
n.Unlock()
|
||||
|
||||
if nodeChanges.Node != nil {
|
||||
role := ca.WorkerRole
|
||||
if nodeChanges.Node.Role == api.NodeRoleManager {
|
||||
role = ca.ManagerRole
|
||||
}
|
||||
|
||||
// If the server is sending us a ForceRenewal State, or if the new node role doesn't match our current role, renew
|
||||
if currentRole != role || nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate {
|
||||
// This is a bit complex to be backward compatible with older CAs that
|
||||
// don't support the Node.Role field. They only use what's presently
|
||||
// called DesiredRole.
|
||||
// 1) If we haven't seen the node object before, and the desired role
|
||||
// is different from our current role, renew the cert. This covers
|
||||
// the case of starting up after a role change.
|
||||
// 2) If we have seen the node before, the desired role is
|
||||
// different from our current role, and either the actual role or
|
||||
// desired role has changed relative to the last values we saw in
|
||||
// those fields, renew the cert. This covers the case of the role
|
||||
// changing while this node is running, but prevents getting into a
|
||||
// rotation loop if Node.Role isn't what we expect (because it's
|
||||
// unset). We may renew the certificate an extra time (first when
|
||||
// DesiredRole changes, and then again when Role changes).
|
||||
// 3) If the server is sending us IssuanceStateRotate, renew the cert as
|
||||
// requested by the CA.
|
||||
roleChanged := n.lastNodeRole.observe(nodeChanges.Node.Role)
|
||||
desiredRoleChanged := n.lastNodeDesiredRole.observe(nodeChanges.Node.Spec.DesiredRole)
|
||||
if (currentRole != nodeChanges.Node.Spec.DesiredRole &&
|
||||
((roleChanged && currentRole != nodeChanges.Node.Role) ||
|
||||
desiredRoleChanged)) ||
|
||||
nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate {
|
||||
renewCert()
|
||||
}
|
||||
}
|
||||
|
@ -298,7 +340,7 @@ func (n *Node) run(ctx context.Context) (err error) {
|
|||
// We only want to update the root CA if this is a worker node. Manager nodes directly watch the raft
|
||||
// store and update the root CA, with the necessary signer, from the raft store (since the managers
|
||||
// need the CA key as well to potentially issue new TLS certificates).
|
||||
if currentRole == ca.ManagerRole || bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) {
|
||||
if currentRole == api.NodeRoleManager || bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) {
|
||||
continue
|
||||
}
|
||||
newRootCA, err := ca.NewRootCA(nodeChanges.RootCert, nil, nil, ca.DefaultNodeCertExpiration, nil)
|
||||
|
|
|
@ -218,6 +218,14 @@ func (r *Root) Remove(v volume.Volume) error {
|
|||
return fmt.Errorf("unknown volume type %T", v)
|
||||
}
|
||||
|
||||
if lv.active.count > 0 {
|
||||
return fmt.Errorf("volume has active mounts")
|
||||
}
|
||||
|
||||
if err := lv.unmount(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
realPath, err := filepath.EvalSymlinks(lv.path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
|
@ -306,6 +314,7 @@ func (v *localVolume) Path() string {
|
|||
}
|
||||
|
||||
// Mount implements the localVolume interface, returning the data location.
|
||||
// If there are any provided mount options, the resources will be mounted at this point
|
||||
func (v *localVolume) Mount(id string) (string, error) {
|
||||
v.m.Lock()
|
||||
defer v.m.Unlock()
|
||||
|
@ -321,19 +330,35 @@ func (v *localVolume) Mount(id string) (string, error) {
|
|||
return v.path, nil
|
||||
}
|
||||
|
||||
// Umount is for satisfying the localVolume interface and does not do anything in this driver.
|
||||
// Unmount dereferences the id, and if it is the last reference will unmount any resources
|
||||
// that were previously mounted.
|
||||
func (v *localVolume) Unmount(id string) error {
|
||||
v.m.Lock()
|
||||
defer v.m.Unlock()
|
||||
|
||||
// Always decrement the count, even if the unmount fails
|
||||
// Essentially docker doesn't care if this fails, it will send an error, but
|
||||
// ultimately there's nothing that can be done. If we don't decrement the count
|
||||
// this volume can never be removed until a daemon restart occurs.
|
||||
if v.opts != nil {
|
||||
v.active.count--
|
||||
if v.active.count == 0 {
|
||||
if err := mount.Unmount(v.path); err != nil {
|
||||
v.active.count++
|
||||
}
|
||||
|
||||
if v.active.count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return v.unmount()
|
||||
}
|
||||
|
||||
func (v *localVolume) unmount() error {
|
||||
if v.opts != nil {
|
||||
if err := mount.Unmount(v.path); err != nil {
|
||||
if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil {
|
||||
return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path)
|
||||
}
|
||||
v.active.mounted = false
|
||||
}
|
||||
v.active.mounted = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue