Compare commits

...

34 commits

Author SHA1 Message Date
Victor Vieux
89658bed64 bump to GA
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-04 13:56:32 -07:00
Brian Goff
d1c9e9cfe9 Ensure unmount before removing local volume.
When there is an error unmounting a local volume, it is still possible
to call `Remove()` on the volume causing removal of the mounted
resources which is generally not desirable.

This ensures that resources are unmounted before attempting removal.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit db3576f8a0)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-04 13:56:32 -07:00
Brian Goff
c4bd13b650 Use lazy unmount for local volume driver unmount
This fixes issues where the underlying filesystem may be disconnected and
attempting to unmount may cause a hang.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit acbfe6bc56)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-04 13:56:31 -07:00
Victor Vieux
90d35abf7b bump to rc3
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-01 22:49:10 -07:00
Victor Vieux
947cc7b591 Merge pull request #32950 from dnephin/cherry-pick-build-arg-fixes
[17.05.x] Cherry pick build arg fixes
2017-05-01 22:45:51 -07:00
Sebastiaan van Stijn
e11e119022 Deprecate "asynchronous" service create and service update
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f32b90f463)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-01 16:42:22 -07:00
Daniel Nephin
a4e352ccb0 Fix cache miss when builtin build args are used.
Signed-off-by: Daniel Nephin <dnephin@docker.com>
2017-05-01 18:20:32 -04:00
Dave Tucker
8c532a6822 builder: Make builtin arg pruning work with > 1 arg
The previous implementation would error out with "Unexpected EOF" which
was caused by an underlying "array index out-of-bounds" error.
The root cause was deleting items from the same array that was being
iterated over. The iteration was unaware that the array size had
changed, resulting in an error.

The new implementation builds a new array instead of mutating a copy of
the old one.

Fixes: #32744

Signed-off-by: Dave Tucker <dt@docker.com>
2017-05-01 18:18:01 -04:00
Harald Albers
9986762bc7 Remove bash completion for run|create --init-path
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit b7a32e1780)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-05-01 15:02:04 -07:00
Victor Vieux
26d3cebaba Merge pull request #32929 from aaronlehmann/vendor-swarmkit-ae52d9d
[17.05] Vendor swarmkit ae52d9d - fix service port publishing regression
2017-05-01 14:54:58 -07:00
Aaron Lehmann
bcbfaec7c6 Vendor swarmkit ae52d9d
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2017-04-28 18:35:29 -07:00
Aaron Lehmann
a3600db8c5 Merge pull request #32905 from dperny/17.05.x-fix-agent-logs-segfault
[17.05.x] Cherry-pick: Fix a rare segfault that can occur in service logs
2017-04-28 18:34:08 -07:00
Drew Erny
fa4b810b41 Fix a rare segfault that can occur in service logs
Revendors swarmkit with a change that fixes a rare segfault that can
occur when following logs on a brand new service with bad bind mount
options.

Fixes docker/swarmkit#2147

Signed-off-by: Drew Erny <drew.erny@docker.com>
2017-04-28 11:43:00 -07:00
Victor Vieux
c57fdb2a14 bump to rc2
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-26 15:08:31 -07:00
Victor Vieux
eaffbcb25d Merge pull request #32848 from aaronlehmann/vendor-swarmkit-78db8a5
[17.05] Vendor swarmkit 78db8a5
2017-04-26 15:07:09 -07:00
Aaron Lehmann
99ad8af0f2 [17.05] Vendor swarmkit 78db8a5
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2017-04-26 10:58:25 -07:00
Victor Vieux
8414a0d02e Merge pull request #32819 from aboch/17.05.x
[17.05.x] Vendoring libnetwork @5d4e5de
2017-04-25 17:07:12 -07:00
Antonio Murdaca
df996a9581 daemon: relabel secrets path
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
(cherry picked from commit b11af7b2f6)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:41:51 -07:00
Aaron Lehmann
aea00bf675 client: Allow hex strings as source references for ImageTag
The source of a tag operation is allowed to be a 64-character hex
string. This means it should use ParseAnyReference for validation
instead of ParseNormalizedNamed.

This fixes a regression that happened in 17.04.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
(cherry picked from commit 4a0704cdbd)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:41:30 -07:00
Corey Farrell
7e06704af9 Fix bash-completion script.
bash-completion script for 'docker build --network' calls
__docker_plugins, the correct name for this function is
__docker_plugins_bundled.

Closes #32588

Signed-off-by: Corey Farrell <git@cfware.com>
(cherry picked from commit eede2056fe)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:41:14 -07:00
Harald Albers
d17b69c5ae Bash completion treats service logs as stable
Implements the following new CLI features:
- service logs is no longer experimental
- service logs also accepts task IDs

Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 47615c9b9b)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:40:59 -07:00
Brian Goff
2d8ebec4a8 Make sure plugin rootfs is unmounted on upgraded
In some cases, if a user specifies `-f` when disabling a plugin mounts
can still exist on the plugin rootfs.
This can cause problems during upgrade where the rootfs is removed and
may cause data loss.

To resolve this, ensure the rootfs is unmounted
before performing an upgrade.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit 83f44d232d)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:40:22 -07:00
Antonio Murdaca
de8fbb4812 remove --init-path from client
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
(cherry picked from commit a18d103b5e)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-25 16:39:55 -07:00
Alessandro Boch
dbd2c592df [17.05.x] Vendoring libnetwork @5d4e5de
Signed-off-by: Alessandro Boch <aboch@docker.com>
2017-04-25 11:19:51 -07:00
Vincent Demeester
4c29313d4c Merge pull request #32679 from aaronlehmann/test-wait-for-role-change
[17.05] integration-cli: Have TestSwarmJoinPromoteLocked wait for the role to…
2017-04-20 14:55:15 +02:00
Aaron Lehmann
2c09664574 integration-cli: Have TestSwarmJoinPromoteLocked wait for the role to change in the certificate
Since the certificate may be renewed multiple times, this check is
necessary.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
(cherry picked from commit 2b5ef9bfef)
2017-04-17 22:22:39 -07:00
Victor Vieux
caafc0d5aa Merge pull request #32627 from aaronlehmann/vendor-swarmkit-78685cf
[17.05] Vendor swarmkit 78685cf
2017-04-15 11:57:35 -05:00
Aaron Lehmann
2f24f3a90b Vendor swarmkit 78685cf
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2017-04-14 10:22:17 -07:00
Vincent Demeester
d1ab56925b Merge pull request #32581 from cyli/re-vendor-swarmkit-17.05
Re vendor swarmkit for 17.05.x
2017-04-13 09:56:01 +02:00
Ying Li
13eac920b4 Add tests to ensure we can add an external CA to the cluster without
error.

Signed-off-by: Ying Li <ying.li@docker.com>
2017-04-12 16:53:07 -07:00
Ying Li
b569b8674c Re-vendor swarmkit to a version which does not require all cluster updates
to include an external CA certificate when updating external CAs.

Signed-off-by: Ying Li <ying.li@docker.com>
2017-04-12 16:53:06 -07:00
Victor Vieux
2878a859b5 bump to 17.05.0-rc1
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-11 11:17:25 -07:00
Victor Vieux
a631e3b88a Merge pull request #32498 from thaJeztah/17.05.0-rc1-changelog
17.05.0 rc1 changelog
2017-04-11 11:16:20 -07:00
Sebastiaan van Stijn
29368ac533
Update changelog for 17.05.0-ce-rc1
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-04-11 18:16:49 +02:00
38 changed files with 493 additions and 165 deletions

View file

@ -5,6 +5,92 @@ information on the list of deprecated flags and APIs please have a look at
https://docs.docker.com/engine/deprecated/ where target removal dates can also
be found.
## 17.05.0-ce (2017-05-04)
### Builder
+ Add multi-stage build support [#31257](https://github.com/docker/docker/pull/31257) [#32063](https://github.com/docker/docker/pull/32063)
+ Allow using build-time args (`ARG`) in `FROM` [#31352](https://github.com/docker/docker/pull/31352)
+ Add an option for specifying build target [#32496](https://github.com/docker/docker/pull/32496)
* Accept `-f -` to read Dockerfile from `stdin`, but use local context for building [#31236](https://github.com/docker/docker/pull/31236)
* The values of default build time arguments (e.g `HTTP_PROXY`) are no longer displayed in docker image history unless a corresponding `ARG` instruction is written in the Dockerfile. [#31584](https://github.com/docker/docker/pull/31584)
- Fix setting command if a custom shell is used in a parent image [#32236](https://github.com/docker/docker/pull/32236)
- Fix `docker build --label` when the label includes single quotes and a space [#31750](https://github.com/docker/docker/pull/31750)
### Client
* Add `--mount` flag to `docker run` and `docker create` [#32251](https://github.com/docker/docker/pull/32251)
* Add `--type=secret` to `docker inspect` [#32124](https://github.com/docker/docker/pull/32124)
* Add `--format` option to `docker secret ls` [#31552](https://github.com/docker/docker/pull/31552)
* Add `--filter` option to `docker secret ls` [#30810](https://github.com/docker/docker/pull/30810)
* Add `--filter scope=<swarm|local>` to `docker network ls` [#31529](https://github.com/docker/docker/pull/31529)
* Add `--cpus` support to `docker update` [#31148](https://github.com/docker/docker/pull/31148)
* Add label filter to `docker system prune` and other `prune` commands [#30740](https://github.com/docker/docker/pull/30740)
* `docker stack rm` now accepts multiple stacks as input [#32110](https://github.com/docker/docker/pull/32110)
* Improve `docker version --format` option when the client has downgraded the API version [#31022](https://github.com/docker/docker/pull/31022)
* Prompt when using an encrypted client certificate to connect to a docker daemon [#31364](https://github.com/docker/docker/pull/31364)
* Display created tags on successful `docker build` [#32077](https://github.com/docker/docker/pull/32077)
* Cleanup compose convert error messages [#32087](https://github.com/moby/moby/pull/32087)
### Contrib
+ Add support for building docker debs for Ubuntu 17.04 Zesty on amd64 [#32435](https://github.com/docker/docker/pull/32435)
### Daemon
- Fix `--api-cors-header` being ignored if `--api-enable-cors` is not set [#32174](https://github.com/docker/docker/pull/32174)
- Cleanup docker tmp dir on start [#31741](https://github.com/docker/docker/pull/31741)
- Deprecate `--graph` flag in favor or `--data-root` [#28696](https://github.com/docker/docker/pull/28696)
### Logging
+ Add support for logging driver plugins [#28403](https://github.com/docker/docker/pull/28403)
* Add support for showing logs of individual tasks to `docker service logs`, and add `/task/{id}/logs` REST endpoint [#32015](https://github.com/docker/docker/pull/32015)
* Add `--log-opt env-regex` option to match environment variables using a regular expression [#27565](https://github.com/docker/docker/pull/27565)
### Networking
+ Allow user to replace, and customize the ingress network [#31714](https://github.com/docker/docker/pull/31714)
- Fix UDP traffic in containers not working after the container is restarted [#32505](https://github.com/docker/docker/pull/32505)
- Fix files being written to `/var/lib/docker` if a different data-root is set [#32505](https://github.com/docker/docker/pull/32505)
### Runtime
- Ensure health probe is stopped when a container exits [#32274](https://github.com/docker/docker/pull/32274)
### Swarm Mode
+ Add update/rollback order for services (`--update-order` / `--rollback-order`) [#30261](https://github.com/docker/docker/pull/30261)
+ Add support for synchronous `service create` and `service update` [#31144](https://github.com/docker/docker/pull/31144)
+ Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to
`docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup
time [#28938](https://github.com/docker/docker/pull/28938)
* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/docker/docker/pull/32284)
* `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/docker/docker/pull/32284)
* Move `docker service logs` out of experimental [#32462](https://github.com/docker/docker/pull/32462)
* Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/docker/docker/pull/32339)
* Add `--entrypoint` flag to `docker service create` and `docker service update` [#29228](https://github.com/docker/docker/pull/29228)
* Add `--network-add` and `--network-rm` to `docker service update` [#32062](https://github.com/docker/docker/pull/32062)
* Add `--credential-spec` flag to `docker service create` and `docker service update` [#32339](https://github.com/docker/docker/pull/32339)
* Add `--filter mode=<global|replicated>` to `docker service ls` [#31538](https://github.com/docker/docker/pull/31538)
* Resolve network IDs on the client side, instead of in the daemon when creating services [#32062](https://github.com/docker/docker/pull/32062)
* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424)
* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302)
* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813)
- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364)
- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333)
- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631)
### Security
* Allow setting SELinux type or MCS labels when using `--ipc=container:` or `--ipc=host` [#30652](https://github.com/docker/docker/pull/30652)
### Deprecation
- Deprecate `--api-enable-cors` daemon flag. This flag was marked deprecated in Docker 1.6.0 but not listed in deprecated features [#32352](https://github.com/docker/docker/pull/32352)
- Remove Ubuntu 12.04 (Precise Pangolin) as supported platform. Ubuntu 12.04 is EOL, and no longer receives updates [#32520](https://github.com/docker/docker/pull/32520)
## 17.04.0-ce (2017-04-05)
### Builder

View file

@ -1 +1 @@
17.05.0-dev
17.05.0-ce

View file

@ -377,7 +377,4 @@ type HostConfig struct {
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View file

@ -408,9 +408,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
// that starts with "foo=abc" to be considered part of a build-time env var.
saveCmd := config.Cmd
if len(cmdBuildEnv) > 0 {
sort.Strings(cmdBuildEnv)
tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...))
saveCmd = prependEnvOnCmd(b.buildArgs, cmdBuildEnv, saveCmd)
}
b.runConfig.Cmd = saveCmd
@ -445,26 +443,24 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
// properly match it.
b.runConfig.Env = env
// remove builtinAllowedBuildArgs (see: builder.go) from the saveCmd
// these args are transparent so resulting image should be the same regardless of the value
if len(cmdBuildEnv) > 0 {
saveCmd = config.Cmd
tmpBuildEnv := make([]string, len(cmdBuildEnv))
copy(tmpBuildEnv, cmdBuildEnv)
for i, env := range tmpBuildEnv {
key := strings.SplitN(env, "=", 2)[0]
if b.buildArgs.IsUnreferencedBuiltin(key) {
tmpBuildEnv = append(tmpBuildEnv[:i], tmpBuildEnv[i+1:]...)
}
}
sort.Strings(tmpBuildEnv)
tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...))
}
b.runConfig.Cmd = saveCmd
return b.commit(cID, cmd, "run")
}
func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice {
var tmpBuildEnv []string
for _, env := range buildArgVars {
key := strings.SplitN(env, "=", 2)[0]
if !buildArgs.IsUnreferencedBuiltin(key) {
tmpBuildEnv = append(tmpBuildEnv, env)
}
}
sort.Strings(tmpBuildEnv)
tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
return strslice.StrSlice(append(tmpEnv, cmd...))
}
// CMD foo
//
// Set the default command to run in the container (which may be empty).

View file

@ -118,7 +118,6 @@ type containerOptions struct {
runtime string
autoRemove bool
init bool
initPath string
Image string
Args []string
@ -284,8 +283,6 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes")
flags.SetAnnotation("init", "version", []string{"1.25"})
flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary")
flags.SetAnnotation("init-path", "version", []string{"1.25"})
return copts
}

View file

@ -10,7 +10,7 @@ import (
// ImageTag tags an image in the docker host
func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
if _, err := reference.ParseNormalizedNamed(source); err != nil {
if _, err := reference.ParseAnyReference(source); err != nil {
return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
}

View file

@ -46,6 +46,17 @@ func TestImageTagInvalidSourceImageName(t *testing.T) {
}
}
func TestImageTagHexSource(t *testing.T) {
client := &Client{
client: newMockClient(errorMock(http.StatusOK, "OK")),
}
err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag")
if err != nil {
t.Fatalf("got error: %v", err)
}
}
func TestImageTag(t *testing.T) {
expectedURL := "/images/image_id/tag"
tagCases := []struct {

View file

@ -463,6 +463,16 @@ __docker_complete_services() {
COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") )
}
# __docker_tasks returns a list of all task IDs.
__docker_tasks() {
__docker_q service ps --format '{{.ID}}' ""
}
# __docker_complete_services_and_tasks applies completion of services and task IDs.
__docker_complete_services_and_tasks() {
COMPREPLY=( $(compgen -W "$(__docker_services "$@") $(__docker_tasks)" -- "$cur") )
}
# __docker_append_to_completions appends the word passed as an argument to every
# word in `$COMPREPLY`.
# Normally you do this with `compgen -S` while generating the completions.
@ -1502,7 +1512,6 @@ _docker_container_run_and_create() {
--expose
--group-add
--hostname -h
--init-path
--ip
--ip6
--ipc
@ -1626,7 +1635,7 @@ _docker_container_run_and_create() {
__docker_complete_capabilities_droppable
return
;;
--cidfile|--env-file|--init-path|--label-file)
--cidfile|--env-file|--label-file)
_filedir
return
;;
@ -2258,7 +2267,7 @@ _docker_image_build() {
__docker_complete_containers_all --cur "${cur#*:}"
;;
*)
COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") )
COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") )
if [ "${COMPREPLY[*]}" = "container:" ] ; then
__docker_nospace
fi
@ -2830,13 +2839,13 @@ _docker_service() {
local subcommands="
create
inspect
logs
ls
rm
scale
ps
update
"
__docker_daemon_is_experimental && subcommands+="logs"
local aliases="
list
@ -2888,7 +2897,7 @@ _docker_service_logs() {
*)
local counter=$(__docker_pos_first_nonflag '--since|--tail')
if [ $cword -eq $counter ]; then
__docker_complete_services
__docker_complete_services_and_tasks
fi
;;
esac

View file

@ -214,6 +214,8 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
}
}
label.Relabel(localMountPath, c.MountLabel, false)
// remount secrets ro
if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil {
return errors.Wrap(err, "unable to remount secret dir as readonly")

View file

@ -624,7 +624,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
(c.HostConfig.Init == nil && daemon.configStore.Init) {
s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...)
var path string
if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" {
if daemon.configStore.InitPath == "" {
path, err = exec.LookPath(daemonconfig.DefaultInitBinary)
if err != nil {
return err
@ -633,9 +633,6 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
if daemon.configStore.InitPath != "" {
path = daemon.configStore.InitPath
}
if c.HostConfig.InitPath != "" {
path = c.HostConfig.InitPath
}
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/dev/init",
Type: "bind",

View file

@ -23,6 +23,7 @@ keywords: "API, Docker, rcli, REST, documentation"
* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one.
* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`).
* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass.
* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output.
## v1.28 API changes

View file

@ -20,6 +20,17 @@ The following list of features are deprecated in Engine.
To learn more about Docker Engine's deprecation policy,
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
### Asynchronous `service create` and `service update`
**Deprecated In Release: v17.05.0**
**Disabled by default in release: v17.09**
Docker 17.05.0 added an optional `--detach=false` option to make the
`docker service create` and `docker service update` work synchronously. This
option will be enable by default in Docker 17.09, at which point the `--detach`
flag can be used to use the previous (asynchronous) behavior.
### `-g` and `--graph` flags on `dockerd`
**Deprecated In Release: v17.05.0**

View file

@ -66,7 +66,6 @@ Options:
--help Print usage
-h, --hostname string Container host name
--init Run an init inside the container that forwards signals and reaps processes
--init-path string Path to the docker-init binary
-i, --interactive Keep STDIN open even if not attached
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops uint Maximum IOps limit for the system drive (Windows only)

View file

@ -70,7 +70,6 @@ Options:
--help Print usage
-h, --hostname string Container host name
--init Run an init inside the container that forwards signals and reaps processes
--init-path string Path to the docker-init binary
-i, --interactive Keep STDIN open even if not attached
--io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only)
(Windows only). The format is `<number><unit>`.

View file

@ -145,6 +145,25 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
}
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
// TODO: when root rotation is in, convert to a series of root rotation tests instead.
// currently just makes sure that we don't have to provide a CA certificate when
// providing an external CA
d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
d1.UpdateSwarm(c, func(s *swarm.Spec) {
s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
{
Protocol: swarm.ExternalCAProtocolCFSSL,
URL: "https://thishasnoca.org",
},
}
})
info, err := d1.SwarmInfo()
c.Assert(err, checker.IsNil)
c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 1)
}
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false)

View file

@ -4344,23 +4344,37 @@ func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) {
ARG %s
ARG %s
RUN echo "Testing Build Args!"`, envKey, explicitProxyKey)
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
buildImage := func(imgName string) string {
cli.BuildCmd(c, imgName,
cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com",
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
"--build-arg", proxy),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Success)
)
return getIDByName(c, imgName)
}
origID := buildImage(imgName)
result := cli.DockerCmd(c, "history", "--no-trunc", imgName)
out := result.Stdout()
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
if strings.Contains(out, proxy) {
c.Fatalf("failed to exclude proxy settings from history!")
}
if strings.Contains(out, "https_proxy") {
c.Fatalf("failed to exclude proxy settings from history!")
}
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
c.Fatalf("explicitly defined ARG %s is not in output", explicitProxyKey)
}
if !strings.Contains(out, fmt.Sprintf("%s=%s", envKey, envVal)) {
c.Fatalf("missing build arguments from output")
}
cacheID := buildImage(imgName + "-two")
c.Assert(origID, checker.Equals, cacheID)
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {

View file

@ -88,8 +88,8 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) {
s.d.Restart(c)
if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
c.Fatal(err)
if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
c.Fatal(err, out)
}
if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {

View file

@ -15,6 +15,7 @@ import (
"strings"
"time"
"github.com/cloudflare/cfssl/helpers"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/integration-cli/checker"
@ -50,6 +51,13 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
c.Assert(out, checker.Contains, "minimum certificate expiry time")
spec = getSpec()
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
// passing an external CA (this is without starting a root rotation) does not fail
out, err = d.Cmd("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org")
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
spec = getSpec()
c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1)
}
func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
@ -60,12 +68,14 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
return sw.Spec
}
cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s"),
cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s",
"--external-ca", "protocol=cfssl,url=https://something.org"),
cli.Daemon(d.Daemon)).Assert(c, icmd.Success)
spec := getSpec()
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second)
c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 1)
c.Assert(d.Leave(true), checker.IsNil)
time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421
@ -1212,10 +1222,6 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
}
// get d3's cert
d3cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
c.Assert(err, checker.IsNil)
// demote manager back to worker - workers are not locked
outs, err = d1.Cmd("node", "demote", d3.Info.NodeID)
c.Assert(err, checker.IsNil)
@ -1228,12 +1234,16 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
// is set to autolock)
waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False)
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
certBytes, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
if err != nil {
return "", check.Commentf("error: %v", err)
}
return string(cert), check.Commentf("cert: %v", string(cert))
}, checker.Not(checker.Equals), string(d3cert))
certs, err := helpers.ParseCertificatesPEM(certBytes)
if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
return certs[0].Subject.OrganizationalUnit[0], nil
}
return "", check.Commentf("could not get organizational unit from certificate")
}, checker.Equals, "swarm-worker")
// by now, it should *never* be locked on restart
d3.Restart(c)

View file

@ -43,7 +43,6 @@ docker-run - Run a command in a new container
[**-h**|**--hostname**[=*HOSTNAME*]]
[**--help**]
[**--init**]
[**--init-path**[=*[]*]]
[**-i**|**--interactive**]
[**--ip**[=*IPv4-ADDRESS*]]
[**--ip6**[=*IPv6-ADDRESS*]]
@ -327,9 +326,6 @@ redirection on the host system.
**--init**
Run an init inside the container that forwards signals and reaps processes
**--init-path**=""
Path to the docker-init binary
**-i**, **--interactive**=*true*|*false*
Keep STDIN open even if not attached. The default is *false*.

View file

@ -45,4 +45,5 @@ const (
RELATIME = 0
REMOUNT = 0
STRICTATIME = 0
mntDetach = 0
)

View file

@ -82,4 +82,6 @@ const (
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = syscall.MS_STRICTATIME
mntDetach = syscall.MNT_DETACH
)

View file

@ -27,4 +27,5 @@ const (
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
mntDetach = 0
)

View file

@ -1,9 +1,5 @@
package mount
import (
"time"
)
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
@ -49,23 +45,11 @@ func ForceMount(device, target, mType, options string) error {
return mount(device, target, mType, uintptr(flag), data)
}
// Unmount will unmount the target filesystem, so long as it is mounted.
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
func Unmount(target string) error {
if mounted, err := Mounted(target); err != nil || !mounted {
return err
}
return ForceUnmount(target)
}
// ForceUnmount will force an unmount of the target filesystem, regardless if
// it is mounted or not.
func ForceUnmount(target string) (err error) {
// Simple retry logic for unmount
for i := 0; i < 10; i++ {
if err = unmount(target, 0); err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return
return unmount(target, mntDetach)
}

View file

@ -648,7 +648,7 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
func getMounts(root string) ([]string, error) {
infos, err := mount.GetMounts()
if err != nil {
return nil, errors.Wrap(err, "failed to read mount table while performing recursive unmount")
return nil, errors.Wrap(err, "failed to read mount table")
}
var mounts []string

View file

@ -199,9 +199,17 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs
pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
orig := filepath.Join(pdir, "rootfs")
// Make sure nothing is mounted
// This could happen if the plugin was disabled with `-f` with active mounts.
// If there is anything in `orig` is still mounted, this should error out.
if err := recursiveUnmount(orig); err != nil {
return err
}
backup := orig + "-old"
if err := os.Rename(orig, backup); err != nil {
return err
return errors.Wrap(err, "error backing up plugin data before upgrade")
}
defer func() {

View file

@ -24,7 +24,7 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
github.com/imdario/mergo 0.2.1
#get libnetwork packages
github.com/docker/libnetwork b13e0604016a4944025aaff521d9c125850b0d04
github.com/docker/libnetwork 5d4e5de2f9962c2de8a7872128e2cc09dfdd99aa
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@ -105,7 +105,7 @@ github.com/docker/containerd 9048e5e50717ea4497b757314bad98ea3763c145
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
# cluster
github.com/docker/swarmkit d5232280c510d70755ab11305d46a5704735371a
github.com/docker/swarmkit ae52d9de97b91eee978bc2fe411bc85b33eb82dd
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e

View file

@ -187,6 +187,13 @@ func (c *controller) agentSetup() error {
clusterProvider := c.cfg.Daemon.ClusterProvider
agent := c.agent
c.Unlock()
if clusterProvider == nil {
msg := "Aborting initialization of Libnetwork Agent because cluster provider is now unset"
logrus.Errorf(msg)
return fmt.Errorf(msg)
}
bindAddr := clusterProvider.GetLocalAddress()
advAddr := clusterProvider.GetAdvertiseAddress()
remote := clusterProvider.GetRemoteAddress()

View file

@ -4,6 +4,7 @@ import (
"fmt"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/types"
)
@ -72,9 +73,19 @@ func (sb *sandbox) setupDefaultGW() error {
if err != nil {
return fmt.Errorf("container %s: endpoint create on GW Network failed: %v", sb.containerID, err)
}
defer func() {
if err != nil {
if err2 := newEp.Delete(true); err2 != nil {
logrus.Warnf("Failed to remove gw endpoint for container %s after failing to join the gateway network: %v",
sb.containerID, err2)
}
}
}()
epLocal := newEp.(*endpoint)
if err := epLocal.sbJoin(sb); err != nil {
if err = epLocal.sbJoin(sb); err != nil {
return fmt.Errorf("container %s: endpoint join on GW Network failed: %v", sb.containerID, err)
}

View file

@ -427,7 +427,7 @@ func (ep *endpoint) Join(sbox Sandbox, options ...EndpointOption) error {
return ep.sbJoin(sb, options...)
}
func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) (err error) {
n, err := ep.getNetworkFromStore()
if err != nil {
return fmt.Errorf("failed to get network from store during join: %v", err)
@ -462,7 +462,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
d, err := n.driver(true)
if err != nil {
return fmt.Errorf("failed to join endpoint: %v", err)
return fmt.Errorf("failed to get driver during join: %v", err)
}
err = d.Join(nid, epid, sb.Key(), ep, sb.Labels())
@ -471,8 +471,8 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
}
defer func() {
if err != nil {
if err := d.Leave(nid, epid); err != nil {
logrus.Warnf("driver leave failed while rolling back join: %v", err)
if e := d.Leave(nid, epid); e != nil {
logrus.Warnf("driver leave failed while rolling back join: %v", e)
}
}
}()
@ -538,11 +538,11 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
extN, err := extEp.getNetworkFromStore()
if err != nil {
return fmt.Errorf("failed to get network from store during join: %v", err)
return fmt.Errorf("failed to get network from store for revoking external connectivity during join: %v", err)
}
extD, err := extN.driver(true)
if err != nil {
return fmt.Errorf("failed to join endpoint: %v", err)
return fmt.Errorf("failed to get driver for revoking external connectivity during join: %v", err)
}
if err = extD.RevokeExternalConnectivity(extEp.network.ID(), extEp.ID()); err != nil {
return types.InternalErrorf(
@ -570,9 +570,9 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
}
if !sb.needDefaultGW() {
if err := sb.clearDefaultGW(); err != nil {
if e := sb.clearDefaultGW(); e != nil {
logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
sb.ID(), sb.ContainerID(), err)
sb.ID(), sb.ContainerID(), e)
}
}
@ -705,7 +705,7 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
d, err := n.driver(!force)
if err != nil {
return fmt.Errorf("failed to leave endpoint: %v", err)
return fmt.Errorf("failed to get driver during endpoint leave: %v", err)
}
ep.Lock()
@ -765,11 +765,11 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
extN, err := extEp.getNetworkFromStore()
if err != nil {
return fmt.Errorf("failed to get network from store during leave: %v", err)
return fmt.Errorf("failed to get network from store for programming external connectivity during leave: %v", err)
}
extD, err := extN.driver(true)
if err != nil {
return fmt.Errorf("failed to leave endpoint: %v", err)
return fmt.Errorf("failed to get driver for programming external connectivity during leave: %v", err)
}
if err := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); err != nil {
logrus.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v",

View file

@ -86,6 +86,15 @@ func (nDB *NetworkDB) sendNodeEvent(event NodeEvent_Type) error {
notify: notifyCh,
})
nDB.RLock()
noPeers := len(nDB.nodes) <= 1
nDB.RUnlock()
// Message enqueued, do not wait for a send if no peer is present
if noPeers {
return nil
}
// Wait for the broadcast
select {
case <-notifyCh:

View file

@ -17,7 +17,7 @@ import (
)
const (
reapInterval = 60 * time.Second
reapInterval = 30 * time.Minute
reapPeriod = 5 * time.Second
retryInterval = 1 * time.Second
nodeReapInterval = 24 * time.Hour

View file

@ -426,14 +426,19 @@ func (w *worker) Listen(ctx context.Context, reporter StatusReporter) {
}
func (w *worker) startTask(ctx context.Context, tx *bolt.Tx, task *api.Task) error {
w.taskevents.Publish(task.Copy())
_, err := w.taskManager(ctx, tx, task) // side-effect taskManager creation.
if err != nil {
log.G(ctx).WithError(err).Error("failed to start taskManager")
// we ignore this error: it gets reported in the taskStatus within
// `newTaskManager`. We log it here and move on. If their is an
// attempted restart, the lack of taskManager will have this retry
// again.
return nil
}
// TODO(stevvooe): Add start method for taskmanager
// only publish if controller resolution was successful.
w.taskevents.Publish(task.Copy())
return nil
}
@ -464,7 +469,7 @@ func (w *worker) newTaskManager(ctx context.Context, tx *bolt.Tx, task *api.Task
}
if err != nil {
log.G(ctx).Error("controller resolution failed")
log.G(ctx).WithError(err).Error("controller resolution failed")
return nil, err
}
@ -568,9 +573,14 @@ func (w *worker) Subscribe(ctx context.Context, subscription *api.SubscriptionMe
case v := <-ch:
task := v.(*api.Task)
if match(task) {
w.mu.Lock()
go w.taskManagers[task.ID].Logs(ctx, *subscription.Options, publisher)
w.mu.Unlock()
w.mu.RLock()
tm, ok := w.taskManagers[task.ID]
w.mu.RUnlock()
if !ok {
continue
}
go tm.Logs(ctx, *subscription.Options, publisher)
}
case <-ctx.Done():
return ctx.Err()

View file

@ -164,7 +164,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
var allocatedServices []*api.Service
for _, s := range services {
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
if !nc.nwkAllocator.ServiceNeedsAllocation(s, networkallocator.OnInit) {
continue
}
@ -317,7 +317,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
break
}
if nc.nwkAllocator.IsServiceAllocated(s) {
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
break
}
@ -345,7 +345,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
break
}
if nc.nwkAllocator.IsServiceAllocated(s) {
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
if nc.nwkAllocator.PortsAllocatedInHostPublishMode(s) {
break
}
@ -544,7 +544,7 @@ func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bo
// network configured or service endpoints have been
// allocated.
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
(s == nil || !nc.nwkAllocator.ServiceNeedsAllocation(s))
}
func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) {
@ -886,7 +886,7 @@ func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
return
}
if !nc.nwkAllocator.IsServiceAllocated(s) {
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
err = fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
return
}
@ -1000,7 +1000,7 @@ func (a *Allocator) procUnallocatedServices(ctx context.Context) {
nc := a.netCtx
var allocatedServices []*api.Service
for _, s := range nc.unallocatedServices {
if !nc.nwkAllocator.IsServiceAllocated(s) {
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
if err := a.allocateService(ctx, s); err != nil {
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
continue
@ -1089,12 +1089,7 @@ func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
// IsIngressNetwork returns whether the passed network is an ingress network.
func IsIngressNetwork(nw *api.Network) bool {
if nw.Spec.Ingress {
return true
}
// Check if legacy defined ingress network
_, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]
return ok && nw.Spec.Annotations.Name == "ingress"
return networkallocator.IsIngressNetwork(nw)
}
// GetIngressNetwork fetches the ingress network from store.

View file

@ -153,7 +153,7 @@ func (na *NetworkAllocator) Deallocate(n *api.Network) error {
// IP and ports needed by the service.
func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
return
return err
}
defer func() {
if err != nil {
@ -183,14 +183,7 @@ func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
}
delete(na.services, s.ID)
return
}
// First allocate VIPs for all the pre-populated endpoint attachments
for _, eAttach := range s.Endpoint.VirtualIPs {
if err = na.allocateVIP(eAttach); err != nil {
return
}
return nil
}
// Always prefer NetworkAttachmentConfig in the TaskSpec
@ -199,24 +192,57 @@ func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
specNetworks = s.Spec.Networks
}
outer:
// Allocate VIPs for all the pre-populated endpoint attachments
eVIPs := s.Endpoint.VirtualIPs[:0]
vipLoop:
for _, eAttach := range s.Endpoint.VirtualIPs {
if na.IsVIPOnIngressNetwork(eAttach) {
if err = na.allocateVIP(eAttach); err != nil {
return err
}
eVIPs = append(eVIPs, eAttach)
continue vipLoop
}
for _, nAttach := range specNetworks {
if nAttach.Target == eAttach.NetworkID {
if err = na.allocateVIP(eAttach); err != nil {
return err
}
eVIPs = append(eVIPs, eAttach)
continue vipLoop
}
}
// If the network of the VIP is not part of the service spec,
// deallocate the vip
na.deallocateVIP(eAttach)
}
networkLoop:
for _, nAttach := range specNetworks {
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nAttach.Target {
continue outer
continue networkLoop
}
}
vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target}
if err = na.allocateVIP(vip); err != nil {
return
return err
}
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, vip)
eVIPs = append(eVIPs, vip)
}
if len(eVIPs) > 0 {
na.services[s.ID] = struct{}{}
return
} else {
delete(na.services, s.ID)
}
s.Endpoint.VirtualIPs = eVIPs
return nil
}
// ServiceDeallocate de-allocates all the network resources such as
@ -234,6 +260,7 @@ func (na *NetworkAllocator) ServiceDeallocate(s *api.Service) error {
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
}
}
s.Endpoint.VirtualIPs = nil
na.portAllocator.serviceDeallocatePorts(s)
delete(na.services, s.ID)
@ -300,41 +327,78 @@ func OnInit(options *ServiceAllocationOpts) {
options.OnInit = true
}
// IsServiceAllocated returns if the passed service has its network resources allocated or not.
// init bool indicates if the func is called during allocator initialization stage.
func (na *NetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool {
// ServiceNeedsAllocation returns true if the passed service needs to have network resources allocated/updated.
func (na *NetworkAllocator) ServiceNeedsAllocation(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool {
var options ServiceAllocationOpts
for _, flag := range flags {
flag(&options)
}
// Always prefer NetworkAttachmentConfig in the TaskSpec
specNetworks := s.Spec.Task.Networks
if len(specNetworks) == 0 && len(s.Spec.Networks) != 0 {
specNetworks = s.Spec.Networks
}
// If endpoint mode is VIP and allocator does not have the
// service in VIP allocated set then it is not allocated.
if (len(s.Spec.Task.Networks) != 0 || len(s.Spec.Networks) != 0) &&
// service in VIP allocated set then it needs to be allocated.
if len(specNetworks) != 0 &&
(s.Spec.Endpoint == nil ||
s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {
if _, ok := na.services[s.ID]; !ok {
return false
return true
}
if s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 {
return true
}
// If the spec has networks which don't have a corresponding VIP,
// the service needs to be allocated.
networkLoop:
for _, net := range specNetworks {
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == net.Target {
continue networkLoop
}
}
return true
}
}
// If the spec no longer has networks attached and has a vip allocated
// from previous spec the service needs to allocated.
if s.Endpoint != nil {
vipLoop:
for _, vip := range s.Endpoint.VirtualIPs {
if na.IsVIPOnIngressNetwork(vip) {
continue vipLoop
}
for _, net := range specNetworks {
if vip.NetworkID == net.Target {
continue vipLoop
}
}
return true
}
}
// If the endpoint mode is DNSRR and allocator has the service
// in VIP allocated set then we return not allocated to make
// in VIP allocated set then we return to be allocated to make
// sure the allocator triggers networkallocator to free up the
// resources if any.
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
if _, ok := na.services[s.ID]; ok {
return false
return true
}
}
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
return !na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
}
return true
return false
}
// IsNodeAllocated returns if the passed node has its network resources allocated or not.
@ -828,3 +892,26 @@ func initializeDrivers(reg *drvregistry.DrvRegistry) error {
}
return nil
}
// IsVIPOnIngressNetwork check if the vip is in ingress network
func (na *NetworkAllocator) IsVIPOnIngressNetwork(vip *api.Endpoint_VirtualIP) bool {
if vip == nil {
return false
}
localNet := na.getNetwork(vip.NetworkID)
if localNet != nil && localNet.nw != nil {
return IsIngressNetwork(localNet.nw)
}
return false
}
// IsIngressNetwork check if the network is an ingress network
func IsIngressNetwork(nw *api.Network) bool {
if nw.Spec.Ingress {
return true
}
// Check if legacy defined ingress network
_, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]
return ok && nw.Spec.Annotations.Name == "ingress"
}

View file

@ -148,14 +148,16 @@ func validateHasAtLeastOneExternalCA(ctx context.Context, externalCAs map[string
// validates that the list of external CAs have valid certs associated with them, and produce a mapping of subject/pubkey:external
// for later validation of required external CAs
func getNormalizedExtCAs(caConfig *api.CAConfig) (map[string][]*api.ExternalCA, error) {
func getNormalizedExtCAs(caConfig *api.CAConfig, normalizedCurrentRootCACert []byte) (map[string][]*api.ExternalCA, error) {
extCAs := make(map[string][]*api.ExternalCA)
for _, extCA := range caConfig.ExternalCAs {
if len(extCA.CACert) == 0 {
return nil, grpc.Errorf(codes.InvalidArgument, "must specify CA certificate for each external CA")
associatedCert := normalizedCurrentRootCACert
// if no associated cert is provided, assume it's the current root cert
if len(extCA.CACert) > 0 {
associatedCert = ca.NormalizePEMs(extCA.CACert)
}
certKey := string(ca.NormalizePEMs(extCA.CACert))
certKey := string(associatedCert)
extCAs[certKey] = append(extCAs[certKey], extCA)
}
@ -191,12 +193,12 @@ func validateCAConfig(ctx context.Context, securityConfig *ca.SecurityConfig, cl
return nil, grpc.Errorf(codes.InvalidArgument, "if a signing CA key is provided, the signing CA cert must also be provided")
}
extCAs, err := getNormalizedExtCAs(newConfig) // validate that the list of external CAs is not malformed
normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert)
extCAs, err := getNormalizedExtCAs(newConfig, normalizedRootCA) // validate that the list of external CAs is not malformed
if err != nil {
return nil, err
}
normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert)
var oldCertExtCAs []*api.ExternalCA
if !hasSigningKey(&cluster.RootCA) {
oldCertExtCAs, err = validateHasAtLeastOneExternalCA(ctx, extCAs, securityConfig, normalizedRootCA, "current")

View file

@ -297,8 +297,9 @@ func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []strin
updates := make(map[*api.Service][]orchestrator.Slot)
_, err := g.store.Batch(func(batch *store.Batch) error {
var updateTasks []orchestrator.Slot
for _, serviceID := range serviceIDs {
var updateTasks []orchestrator.Slot
if _, exists := nodeTasks[serviceID]; !exists {
continue
}
@ -352,7 +353,6 @@ func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []strin
for service, updateTasks := range updates {
g.updater.Update(ctx, g.cluster, service, updateTasks)
}
}
// updateNode updates g.nodes based on the current node value

View file

@ -133,6 +133,30 @@ type Node struct {
manager *manager.Manager
notifyNodeChange chan *agent.NodeChanges // used by the agent to relay node updates from the dispatcher Session stream to (*Node).run
unlockKey []byte
// lastNodeRole is the last-seen value of Node.Role, used to make role
// changes "edge triggered" and avoid renewal loops.
lastNodeRole lastSeenRole
// lastNodeDesiredRole is the last-seen value of Node.Spec.DesiredRole,
// used to make role changes "edge triggered" and avoid renewal loops.
// This exists in addition to lastNodeRole to support older CAs that
// only fill in the DesiredRole field.
lastNodeDesiredRole lastSeenRole
}
type lastSeenRole struct {
role *api.NodeRole
}
// observe notes the latest value of this node role, and returns true if it
// is the first seen value, or is different from the most recently seen value.
func (l *lastSeenRole) observe(newRole api.NodeRole) bool {
changed := l.role == nil || *l.role != newRole
if l.role == nil {
l.role = new(api.NodeRole)
}
*l.role = newRole
return changed
}
// RemoteAPIAddr returns address on which remote manager api listens.
@ -279,17 +303,35 @@ func (n *Node) run(ctx context.Context) (err error) {
return
case nodeChanges := <-n.notifyNodeChange:
n.Lock()
currentRole := n.role
currentRole := api.NodeRoleWorker
if n.role == ca.ManagerRole {
currentRole = api.NodeRoleManager
}
n.Unlock()
if nodeChanges.Node != nil {
role := ca.WorkerRole
if nodeChanges.Node.Role == api.NodeRoleManager {
role = ca.ManagerRole
}
// If the server is sending us a ForceRenewal State, or if the new node role doesn't match our current role, renew
if currentRole != role || nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate {
// This is a bit complex to be backward compatible with older CAs that
// don't support the Node.Role field. They only use what's presently
// called DesiredRole.
// 1) If we haven't seen the node object before, and the desired role
// is different from our current role, renew the cert. This covers
// the case of starting up after a role change.
// 2) If we have seen the node before, the desired role is
// different from our current role, and either the actual role or
// desired role has changed relative to the last values we saw in
// those fields, renew the cert. This covers the case of the role
// changing while this node is running, but prevents getting into a
// rotation loop if Node.Role isn't what we expect (because it's
// unset). We may renew the certificate an extra time (first when
// DesiredRole changes, and then again when Role changes).
// 3) If the server is sending us IssuanceStateRotate, renew the cert as
// requested by the CA.
roleChanged := n.lastNodeRole.observe(nodeChanges.Node.Role)
desiredRoleChanged := n.lastNodeDesiredRole.observe(nodeChanges.Node.Spec.DesiredRole)
if (currentRole != nodeChanges.Node.Spec.DesiredRole &&
((roleChanged && currentRole != nodeChanges.Node.Role) ||
desiredRoleChanged)) ||
nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate {
renewCert()
}
}
@ -298,7 +340,7 @@ func (n *Node) run(ctx context.Context) (err error) {
// We only want to update the root CA if this is a worker node. Manager nodes directly watch the raft
// store and update the root CA, with the necessary signer, from the raft store (since the managers
// need the CA key as well to potentially issue new TLS certificates).
if currentRole == ca.ManagerRole || bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) {
if currentRole == api.NodeRoleManager || bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) {
continue
}
newRootCA, err := ca.NewRootCA(nodeChanges.RootCert, nil, nil, ca.DefaultNodeCertExpiration, nil)

View file

@ -218,6 +218,14 @@ func (r *Root) Remove(v volume.Volume) error {
return fmt.Errorf("unknown volume type %T", v)
}
if lv.active.count > 0 {
return fmt.Errorf("volume has active mounts")
}
if err := lv.unmount(); err != nil {
return err
}
realPath, err := filepath.EvalSymlinks(lv.path)
if err != nil {
if !os.IsNotExist(err) {
@ -306,6 +314,7 @@ func (v *localVolume) Path() string {
}
// Mount implements the localVolume interface, returning the data location.
// If there are any provided mount options, the resources will be mounted at this point
func (v *localVolume) Mount(id string) (string, error) {
v.m.Lock()
defer v.m.Unlock()
@ -321,19 +330,35 @@ func (v *localVolume) Mount(id string) (string, error) {
return v.path, nil
}
// Umount is for satisfying the localVolume interface and does not do anything in this driver.
// Unmount dereferences the id, and if it is the last reference will unmount any resources
// that were previously mounted.
func (v *localVolume) Unmount(id string) error {
v.m.Lock()
defer v.m.Unlock()
// Always decrement the count, even if the unmount fails
// Essentially docker doesn't care if this fails, it will send an error, but
// ultimately there's nothing that can be done. If we don't decrement the count
// this volume can never be removed until a daemon restart occurs.
if v.opts != nil {
v.active.count--
if v.active.count == 0 {
}
if v.active.count > 0 {
return nil
}
return v.unmount()
}
func (v *localVolume) unmount() error {
if v.opts != nil {
if err := mount.Unmount(v.path); err != nil {
v.active.count++
if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil {
return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path)
}
v.active.mounted = false
}
v.active.mounted = false
}
return nil
}