Merge pull request #21822 from tiborvass/cherry-picks-1.11.0-rc4
Cherry picks 1.11.0 rc4
This commit is contained in:
commit
a573ab1f81
38 changed files with 788 additions and 152 deletions
82
.mailmap
82
.mailmap
|
@ -93,7 +93,8 @@ Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
|||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
<alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||
Alexandr Morozov <lk4d4math@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
<git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||
|
@ -106,7 +107,9 @@ Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
|||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||
|
@ -117,24 +120,27 @@ Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
|||
<marc@marc-abramowitz.com> <msabramo@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
|
||||
<bernat@luffy.cx> <vincent@bernat.im>
|
||||
<bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
|
||||
<p@pwaller.net> <peter@scraperwiki.com>
|
||||
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
<julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||
<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||
<lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||
<arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
<ewindisch@docker.com> <eric@windisch.us>
|
||||
<eric@windisch.us> <ewindisch@docker.com>
|
||||
<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
<hollie@docker.com> <hollie.teal@docker.com>
|
||||
<hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
<huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
Jessica Frazelle <jess@docker.com> Jessie Frazelle <jfrazelle@users.noreply.github.com>
|
||||
<jess@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jess@mesosphere.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <acidburn@docker.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <jess@docker.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <princess@docker.com>
|
||||
<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
|
||||
<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||
<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||
|
@ -142,6 +148,8 @@ Jessica Frazelle <jess@docker.com> Jessie Frazelle <jfrazelle@users.noreply.gith
|
|||
Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
|
||||
<oi@truffles.me.uk> <timruffles@googlemail.com>
|
||||
<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
|
@ -151,8 +159,9 @@ Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
|||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||
<jess@docker.com> <princess@docker.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> John Howard <jhoward@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
||||
|
@ -169,3 +178,60 @@ bin liu <liubin0329@users.noreply.github.com> <liubin0329@gmail.com>
|
|||
John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
||||
Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
|
||||
Allen Sun <allen.sun@daocloud.io>
|
||||
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
||||
<aanm90@gmail.com> <martins@noironetworks.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||
Chander G <chandergovind@gmail.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Ying Li <cyli@twistedmatrix.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
|
||||
<dqminh@cloudflare.com> <dqminh89@gmail.com>
|
||||
Daniel, Dao Quang Minh <dqminh@cloudflare.com>
|
||||
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
|
||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||
Doug Tangren <d.tangren@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
||||
Ben Golub <ben.golub@dotcloud.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
<justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||
<justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Kamil Domański <kamil@domanski.co>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
<leijitang@huawei.com> <leijitang@gmail.com>
|
||||
Linus Heckemann <lheckemann@twig-world.com>
|
||||
<lheckemann@twig-world.com> <anonymouse2048@gmail.com>
|
||||
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||
<lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
|
||||
Marianna Tessel <mtesselh@gmail.com>
|
||||
Michael Huettermann <michael@huettermann.net>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
<moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||
Nigel Poulton <nigelpoulton@hotmail.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
<h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com>
|
||||
<haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
||||
Soshi Katsuta <soshi.katsuta@gmail.com>
|
||||
<soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
<stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
<stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||
Toli Kuznets <toli@docker.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
<tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
<vincent@sbr.pm> <vincent+github@demeester.fr>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
|
||||
yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
|
||||
<zij@case.edu> <zjaffee@us.ibm.com>
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -181,7 +181,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -198,7 +198,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -74,13 +74,12 @@ WORKDIR /go/src/github.com/docker/docker
|
|||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
&& cd "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
&& git checkout -q "$RUNC_COMMIT" \
|
||||
&& (find . -name "*_ffjson.go" | xargs rm) \
|
||||
&& make static BUILDTAGS="seccomp apparmor selinux" \
|
||||
&& cp runc /usr/local/bin/docker-runc
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -178,7 +178,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -30,7 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install runc
|
||||
ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
|
||||
ENV RUNC_COMMIT 6c88a526cdd74aab90cc88018368c452c7294a06
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
|
||||
|
|
|
@ -102,7 +102,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
|
|||
// Save container state to disk. So that if error happens before
|
||||
// container meta file got removed from disk, then a restart of
|
||||
// docker should not make a dead container alive.
|
||||
if err := container.ToDiskLocking(); err != nil {
|
||||
if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Errorf("Error saving dying container to disk: %v", err)
|
||||
}
|
||||
|
||||
|
@ -123,10 +123,14 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
|
|||
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
|
||||
}
|
||||
|
||||
metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
|
||||
layer.LogReleaseMetadata(metadata)
|
||||
if err != nil && err != layer.ErrMountDoesNotExist {
|
||||
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err)
|
||||
// When container creation fails and `RWLayer` has not been created yet, we
|
||||
// do not call `ReleaseRWLayer`
|
||||
if container.RWLayer != nil {
|
||||
metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
|
||||
layer.LogReleaseMetadata(metadata)
|
||||
if err != nil && err != layer.ErrMountDoesNotExist {
|
||||
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -7,6 +7,10 @@ package btrfs
|
|||
#include <dirent.h>
|
||||
#include <btrfs/ioctl.h>
|
||||
#include <btrfs/ctree.h>
|
||||
|
||||
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
|
||||
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
|
@ -159,9 +163,10 @@ func subvolSnapshot(src, dest, name string) error {
|
|||
|
||||
var args C.struct_btrfs_ioctl_vol_args_v2
|
||||
args.fd = C.__s64(getDirFd(srcDir))
|
||||
for i, c := range []byte(name) {
|
||||
args.name[i] = C.char(c)
|
||||
}
|
||||
|
||||
var cs = C.CString(name)
|
||||
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
|
||||
C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
|
||||
uintptr(unsafe.Pointer(&args)))
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/devicemapper"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/loopback"
|
||||
|
@ -1656,7 +1657,12 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
|
||||
// https://github.com/docker/docker/issues/4036
|
||||
if supported := devicemapper.UdevSetSyncSupport(true); !supported {
|
||||
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
|
||||
if dockerversion.IAmStatic == "true" {
|
||||
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
|
||||
} else {
|
||||
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
|
||||
}
|
||||
|
||||
if !devices.overrideUdevSyncCheck {
|
||||
return graphdriver.ErrNotSupported
|
||||
}
|
||||
|
|
|
@ -536,6 +536,8 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c
|
|||
}
|
||||
}
|
||||
}
|
||||
s.Linux.ReadonlyPaths = nil
|
||||
s.Linux.MaskedPaths = nil
|
||||
}
|
||||
|
||||
// TODO: until a kernel/mount solution exists for handling remount in a user namespace,
|
||||
|
@ -660,10 +662,10 @@ func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, e
|
|||
|
||||
if apparmor.IsEnabled() {
|
||||
appArmorProfile := "docker-default"
|
||||
if c.HostConfig.Privileged {
|
||||
appArmorProfile = "unconfined"
|
||||
} else if len(c.AppArmorProfile) > 0 {
|
||||
if len(c.AppArmorProfile) > 0 {
|
||||
appArmorProfile = c.AppArmorProfile
|
||||
} else if c.HostConfig.Privileged {
|
||||
appArmorProfile = "unconfined"
|
||||
}
|
||||
s.Process.ApparmorProfile = appArmorProfile
|
||||
}
|
||||
|
|
|
@ -169,9 +169,10 @@ Responds with a list of Docker subsystems which this plugin implements.
|
|||
After activation, the plugin will then be sent events from this subsystem.
|
||||
|
||||
Possible values are:
|
||||
- [`authz`](plugins_authorization.md)
|
||||
- [`NetworkDriver`](plugins_network.md)
|
||||
- [`VolumeDriver`](plugins_volume.md)
|
||||
|
||||
* [`authz`](plugins_authorization.md)
|
||||
* [`NetworkDriver`](plugins_network.md)
|
||||
* [`VolumeDriver`](plugins_volume.md)
|
||||
|
||||
|
||||
## Plugin retries
|
||||
|
|
|
@ -154,14 +154,9 @@ Create a container
|
|||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"Mounts": [
|
||||
{
|
||||
"Source": "/data",
|
||||
"Destination": "/data",
|
||||
"Mode": "ro,Z",
|
||||
"RW": false
|
||||
}
|
||||
],
|
||||
"Volumes": {
|
||||
"/volumes/data": {}
|
||||
}
|
||||
"WorkingDir": "",
|
||||
"NetworkDisabled": false,
|
||||
"MacAddress": "12:34:56:78:9a:bc",
|
||||
|
@ -245,7 +240,8 @@ Json Parameters:
|
|||
- **Entrypoint** - Set the entry point for the container as a string or an array
|
||||
of strings.
|
||||
- **Image** - A string specifying the image name to use for the container.
|
||||
- **Mounts** - An array of mount points in the container.
|
||||
- **Volumes** - An object mapping mount point paths (strings) inside the
|
||||
container to empty objects.
|
||||
- **WorkingDir** - A string specifying the working directory for commands to
|
||||
run in.
|
||||
- **NetworkDisabled** - Boolean value, when true disables networking for the
|
||||
|
|
|
@ -158,14 +158,9 @@ Create a container
|
|||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"Mounts": [
|
||||
{
|
||||
"Source": "/data",
|
||||
"Destination": "/data",
|
||||
"Mode": "ro,Z",
|
||||
"RW": false
|
||||
}
|
||||
],
|
||||
"Volumes": {
|
||||
"/volumes/data": {}
|
||||
}
|
||||
"WorkingDir": "",
|
||||
"NetworkDisabled": false,
|
||||
"MacAddress": "12:34:56:78:9a:bc",
|
||||
|
@ -256,7 +251,8 @@ Json Parameters:
|
|||
- **Entrypoint** - Set the entry point for the container as a string or an array
|
||||
of strings.
|
||||
- **Image** - A string specifying the image name to use for the container.
|
||||
- **Mounts** - An array of mount points in the container.
|
||||
- **Volumes** - An object mapping mount point paths (strings) inside the
|
||||
container to empty objects.
|
||||
- **WorkingDir** - A string specifying the working directory for commands to
|
||||
run in.
|
||||
- **NetworkDisabled** - Boolean value, when true disables networking for the
|
||||
|
|
|
@ -246,17 +246,6 @@ Create a container
|
|||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"Mounts": [
|
||||
{
|
||||
"Name": "fac362...80535",
|
||||
"Source": "/data",
|
||||
"Destination": "/data",
|
||||
"Driver": "local",
|
||||
"Mode": "ro,Z",
|
||||
"RW": false,
|
||||
"Propagation": ""
|
||||
}
|
||||
],
|
||||
"Volumes": {
|
||||
"/volumes/data": {}
|
||||
}
|
||||
|
@ -366,7 +355,8 @@ Json Parameters:
|
|||
- **Entrypoint** - Set the entry point for the container as a string or an array
|
||||
of strings.
|
||||
- **Image** - A string specifying the image name to use for the container.
|
||||
- **Mounts** - An array of mount points in the container.
|
||||
- **Volumes** - An object mapping mount point paths (strings) inside the
|
||||
container to empty objects.
|
||||
- **WorkingDir** - A string specifying the working directory for commands to
|
||||
run in.
|
||||
- **NetworkDisabled** - Boolean value, when true disables networking for the
|
||||
|
|
|
@ -265,17 +265,6 @@ Create a container
|
|||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"Mounts": [
|
||||
{
|
||||
"Name": "fac362...80535",
|
||||
"Source": "/data",
|
||||
"Destination": "/data",
|
||||
"Driver": "local",
|
||||
"Mode": "ro,Z",
|
||||
"RW": false,
|
||||
"Propagation": ""
|
||||
}
|
||||
],
|
||||
"Volumes": {
|
||||
"/volumes/data": {}
|
||||
}
|
||||
|
@ -386,7 +375,8 @@ Json Parameters:
|
|||
- **Entrypoint** - Set the entry point for the container as a string or an array
|
||||
of strings.
|
||||
- **Image** - A string specifying the image name to use for the container.
|
||||
- **Mounts** - An array of mount points in the container.
|
||||
- **Volumes** - An object mapping mount point paths (strings) inside the
|
||||
container to empty objects.
|
||||
- **WorkingDir** - A string specifying the working directory for commands to
|
||||
run in.
|
||||
- **NetworkDisabled** - Boolean value, when true disables networking for the
|
||||
|
|
|
@ -1174,8 +1174,10 @@ To use these, simply pass them on the command line using the `--build-arg
|
|||
`ARG` variables are not persisted into the built image as `ENV` variables are.
|
||||
However, `ARG` variables do impact the build cache in similar ways. If a
|
||||
Dockerfile defines an `ARG` variable whose value is different from a previous
|
||||
build, then a "cache miss" occurs upon its first usage, not its declaration.
|
||||
For example, consider this Dockerfile:
|
||||
build, then a "cache miss" occurs upon first use of the `ARG` variable. The
|
||||
declaration of the `ARG` variable does not count as a use.
|
||||
|
||||
For example, consider these two Dockerfile:
|
||||
|
||||
```
|
||||
1 FROM ubuntu
|
||||
|
@ -1183,12 +1185,17 @@ For example, consider this Dockerfile:
|
|||
3 RUN echo $CONT_IMG_VER
|
||||
```
|
||||
|
||||
If you specify `--build-arg CONT_IMG_VER=<value>` on the command line the
|
||||
specification on line 2 does not cause a cache miss; line 3 does cause a cache
|
||||
miss. The definition on line 2 has no impact on the resulting image. The `RUN`
|
||||
on line 3 executes a command and in doing so defines a set of environment
|
||||
variables, including `CONT_IMG_VER`. At that point, the `ARG` variable may
|
||||
impact the resulting image, so a cache miss occurs.
|
||||
```
|
||||
1 FROM ubuntu
|
||||
2 ARG CONT_IMG_VER
|
||||
3 RUN echo hello
|
||||
```
|
||||
|
||||
If you specify `--build-arg CONT_IMG_VER=<value>` on the command line, in both
|
||||
cases, the specification on line 2 does not cause a cache miss; line 3 does
|
||||
cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified
|
||||
as the same as running `CONT_IMG_VER=<value>` echo hello, so if the `<value>`
|
||||
changes, we get a cache miss.
|
||||
|
||||
Consider another example under the same command line:
|
||||
|
||||
|
@ -1203,6 +1210,20 @@ the variable's value in the `ENV` references the `ARG` variable and that
|
|||
variable is changed through the command line. In this example, the `ENV`
|
||||
command causes the image to include the value.
|
||||
|
||||
If an `ENV` instruction overrides an `ARG` instruction of the same name, like
|
||||
this Dockerfile:
|
||||
|
||||
```
|
||||
1 FROM ubuntu
|
||||
2 ARG CONT_IMG_VER
|
||||
3 ENV CONT_IMG_VER hello
|
||||
4 RUN echo $CONT_IMG_VER
|
||||
```
|
||||
|
||||
Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a
|
||||
constant (`hello`). As a result, the environment variables and values used on
|
||||
the `RUN` (line 4) doesn't change between builds.
|
||||
|
||||
## ONBUILD
|
||||
|
||||
ONBUILD [INSTRUCTION]
|
||||
|
|
|
@ -1056,7 +1056,7 @@ Both flags take limits in the `<device-path>:<limit>` format. Both read and
|
|||
write rates must be a positive integer.
|
||||
|
||||
## Additional groups
|
||||
--group-add: Add Linux capabilities
|
||||
--group-add: Add additional groups to run as
|
||||
|
||||
By default, the docker container process runs with the supplementary groups looked
|
||||
up for the specified user. If one wants to add more to that list of groups, then
|
||||
|
|
|
@ -29,7 +29,7 @@ clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
|
|||
clone git github.com/imdario/mergo 0.2.1
|
||||
|
||||
#get libnetwork packages
|
||||
clone git github.com/docker/libnetwork v0.7.0-rc.1
|
||||
clone git github.com/docker/libnetwork v0.7.0-rc.4
|
||||
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
|
||||
|
@ -60,7 +60,7 @@ clone git github.com/docker/go v1.5.1-1-1-gbaf439e
|
|||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
clone git github.com/opencontainers/runc 7b6c4c418d5090f4f11eee949fdf49afd15838c9 # libcontainer
|
||||
clone git github.com/opencontainers/specs 3ce138b1934bf227a418e241ead496c383eaba1c # specs
|
||||
clone git github.com/opencontainers/specs 93ca97e83ca7fb4fba6d9e30d5470f99ddc02d11 # specs
|
||||
clone git github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
|
||||
# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
|
||||
clone git github.com/coreos/go-systemd v4
|
||||
|
|
|
@ -1109,7 +1109,7 @@ func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C
|
|||
func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) {
|
||||
// Not applicable for Windows as there is no concept of --privileged
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "umount /proc/sysrq-trigger && touch /proc/sysrq-trigger"); code != 0 {
|
||||
if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 {
|
||||
c.Fatalf("proc should be writable in privileged container")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,11 @@ var (
|
|||
dockerBasePath string
|
||||
volumesConfigPath string
|
||||
containerStoragePath string
|
||||
|
||||
// daemonStorageDriver is held globally so that tests can know the storage
|
||||
// driver of the daemon. This is initialized in docker_utils by sending
|
||||
// a version call to the daemon and examining the response header.
|
||||
daemonStorageDriver string
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -86,6 +86,7 @@ func init() {
|
|||
|
||||
var info types.Info
|
||||
err = json.Unmarshal(body, &info)
|
||||
daemonStorageDriver = info.Driver
|
||||
dockerBasePath = info.DockerRootDir
|
||||
volumesConfigPath = filepath.Join(dockerBasePath, "volumes")
|
||||
containerStoragePath = filepath.Join(dockerBasePath, "containers")
|
||||
|
|
|
@ -109,22 +109,14 @@ var (
|
|||
}
|
||||
NotOverlay = testRequirement{
|
||||
func() bool {
|
||||
cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return !strings.HasPrefix(daemonStorageDriver, "overlay")
|
||||
},
|
||||
"Test requires underlying root filesystem not be backed by overlay.",
|
||||
}
|
||||
|
||||
Devicemapper = testRequirement{
|
||||
func() bool {
|
||||
cmd := exec.Command("grep", "^devicemapper / devicemapper", "/proc/mounts")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return strings.HasPrefix(daemonStorageDriver, "devicemapper")
|
||||
},
|
||||
"Test requires underlying root filesystem to be backed by devicemapper.",
|
||||
}
|
||||
|
|
|
@ -79,6 +79,20 @@ func DefaultSpec() specs.Spec {
|
|||
}
|
||||
|
||||
s.Linux = specs.Linux{
|
||||
MaskedPaths: []string{
|
||||
"/proc/kcore",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
"/proc/asound",
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger",
|
||||
},
|
||||
Namespaces: []specs.Namespace{
|
||||
{Type: "mount"},
|
||||
{Type: "network"},
|
||||
|
|
|
@ -1,5 +1,18 @@
|
|||
# Changelog
|
||||
|
||||
## 0.7.0-rc.4 (2016-04-06)
|
||||
- Fix the handling for default gateway Endpoint join/leave.
|
||||
|
||||
## 0.7.0-rc.3 (2016-04-05)
|
||||
- Revert fix for default gateway endoint join/leave. Needs to be reworked.
|
||||
- Persist the network internal mode for bridge networks
|
||||
|
||||
## 0.7.0-rc.2 (2016-04-05)
|
||||
- Fixes https://github.com/docker/libnetwork/issues/1070
|
||||
- Move IPAM resource initialization out of init()
|
||||
- Initialize overlay driver before network delete
|
||||
- Fix the handling for default gateway Endpoint join/lean
|
||||
|
||||
## 0.7.0-rc.1 (2016-03-30)
|
||||
- Fixes https://github.com/docker/libnetwork/issues/985
|
||||
- Fixes https://github.com/docker/libnetwork/issues/945
|
||||
|
|
|
@ -65,20 +65,13 @@ func (sb *sandbox) setupDefaultGW() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// If present, removes the endpoint connecting the sandbox to the default gw network.
|
||||
// Unless it is the endpoint designated to provide the external connectivity.
|
||||
// If the sandbox is being deleted, removes the endpoint unconditionally.
|
||||
// If present, detach and remove the endpoint connecting the sandbox to the default gw network.
|
||||
func (sb *sandbox) clearDefaultGW() error {
|
||||
var ep *endpoint
|
||||
|
||||
if ep = sb.getEndpointInGWNetwork(); ep == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ep == sb.getGatewayEndpoint() && !sb.inDelete {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := ep.sbLeave(sb, false); err != nil {
|
||||
return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err)
|
||||
}
|
||||
|
@ -88,21 +81,26 @@ func (sb *sandbox) clearDefaultGW() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Evaluate whether the sandbox requires a default gateway based
|
||||
// on the endpoints to which it is connected. It does not account
|
||||
// for the default gateway network endpoint.
|
||||
|
||||
func (sb *sandbox) needDefaultGW() bool {
|
||||
var needGW bool
|
||||
|
||||
for _, ep := range sb.getConnectedEndpoints() {
|
||||
if ep.endpointInGWNetwork() {
|
||||
return false
|
||||
continue
|
||||
}
|
||||
if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" {
|
||||
continue
|
||||
}
|
||||
if ep.getNetwork().Internal() {
|
||||
return false
|
||||
continue
|
||||
}
|
||||
if ep.joinInfo.disableGatewayService {
|
||||
return false
|
||||
// During stale sandbox cleanup, joinInfo may be nil
|
||||
if ep.joinInfo != nil && ep.joinInfo.disableGatewayService {
|
||||
continue
|
||||
}
|
||||
// TODO v6 needs to be handled.
|
||||
if len(ep.Gateway()) > 0 {
|
||||
|
@ -115,6 +113,7 @@ func (sb *sandbox) needDefaultGW() bool {
|
|||
}
|
||||
needGW = true
|
||||
}
|
||||
|
||||
return needGW
|
||||
}
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) {
|
|||
nMap["EnableIPMasquerade"] = ncfg.EnableIPMasquerade
|
||||
nMap["EnableICC"] = ncfg.EnableICC
|
||||
nMap["Mtu"] = ncfg.Mtu
|
||||
nMap["Internal"] = ncfg.Internal
|
||||
nMap["DefaultBridge"] = ncfg.DefaultBridge
|
||||
nMap["DefaultBindingIP"] = ncfg.DefaultBindingIP.String()
|
||||
nMap["DefaultGatewayIPv4"] = ncfg.DefaultGatewayIPv4.String()
|
||||
|
@ -143,6 +144,9 @@ func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error {
|
|||
ncfg.EnableIPMasquerade = nMap["EnableIPMasquerade"].(bool)
|
||||
ncfg.EnableICC = nMap["EnableICC"].(bool)
|
||||
ncfg.Mtu = int(nMap["Mtu"].(float64))
|
||||
if v, ok := nMap["Internal"]; ok {
|
||||
ncfg.Internal = v.(bool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -104,6 +104,11 @@ func (d *driver) DeleteNetwork(nid string) error {
|
|||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
// Make sure driver resources are initialized before proceeding
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
|
|
|
@ -446,7 +446,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if sb.needDefaultGW() {
|
||||
if sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil {
|
||||
return sb.setupDefaultGW()
|
||||
}
|
||||
|
||||
|
@ -479,7 +479,14 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
}
|
||||
}
|
||||
|
||||
return sb.clearDefaultGW()
|
||||
if !sb.needDefaultGW() {
|
||||
if err := sb.clearDefaultGW(); err != nil {
|
||||
log.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
sb.ID(), sb.ContainerID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) rename(name string) error {
|
||||
|
@ -622,10 +629,7 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
}
|
||||
|
||||
sb.deleteHostsEntries(n.getSvcRecords(ep))
|
||||
if !sb.inDelete && sb.needDefaultGW() {
|
||||
if sb.getEPwithoutGateway() == nil {
|
||||
return fmt.Errorf("endpoint without GW expected, but not found")
|
||||
}
|
||||
if !sb.inDelete && sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil {
|
||||
return sb.setupDefaultGW()
|
||||
}
|
||||
|
||||
|
@ -639,7 +643,14 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
}
|
||||
}
|
||||
|
||||
return sb.clearDefaultGW()
|
||||
if !sb.needDefaultGW() {
|
||||
if err := sb.clearDefaultGW(); err != nil {
|
||||
log.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
sb.ID(), sb.ContainerID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) validateForceDelete(locator string) error {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/ipam"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/ipamutils"
|
||||
)
|
||||
|
||||
// Init registers the built-in ipam service with libnetwork
|
||||
|
@ -28,6 +29,9 @@ func Init(ic ipamapi.Callback, l, g interface{}) error {
|
|||
return fmt.Errorf("incorrect global datastore passed to built-in ipam init")
|
||||
}
|
||||
}
|
||||
|
||||
ipamutils.InitNetworks()
|
||||
|
||||
a, err := ipam.NewAllocator(localDs, globalDs)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
// Package ipamutils provides utililty functions for ipam management
|
||||
package ipamutils
|
||||
|
||||
import "net"
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// PredefinedBroadNetworks contains a list of 31 IPv4 private networks with host size 16 and 12
|
||||
|
@ -10,11 +13,16 @@ var (
|
|||
// PredefinedGranularNetworks contains a list of 64K IPv4 private networks with host size 8
|
||||
// (10.x.x.x/24) which do not overlap with the networks in `PredefinedBroadNetworks`
|
||||
PredefinedGranularNetworks []*net.IPNet
|
||||
|
||||
initNetworksOnce sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
PredefinedBroadNetworks = initBroadPredefinedNetworks()
|
||||
PredefinedGranularNetworks = initGranularPredefinedNetworks()
|
||||
// InitNetworks initializes the pre-defined networks used by the built-in IP allocator
|
||||
func InitNetworks() {
|
||||
initNetworksOnce.Do(func() {
|
||||
PredefinedBroadNetworks = initBroadPredefinedNetworks()
|
||||
PredefinedGranularNetworks = initGranularPredefinedNetworks()
|
||||
})
|
||||
}
|
||||
|
||||
func initBroadPredefinedNetworks() []*net.IPNet {
|
||||
|
|
|
@ -22,6 +22,8 @@ func ElectInterfaceAddresses(name string) (*net.IPNet, []*net.IPNet, error) {
|
|||
err error
|
||||
)
|
||||
|
||||
InitNetworks()
|
||||
|
||||
defer osl.InitOSContext()()
|
||||
|
||||
link, _ := netlink.LinkByName(name)
|
||||
|
|
|
@ -49,8 +49,14 @@ const (
|
|||
defaultRespSize = 512
|
||||
maxConcurrent = 50
|
||||
logInterval = 2 * time.Second
|
||||
maxDNSID = 65536
|
||||
)
|
||||
|
||||
type clientConn struct {
|
||||
dnsID uint16
|
||||
respWriter dns.ResponseWriter
|
||||
}
|
||||
|
||||
type extDNSEntry struct {
|
||||
ipStr string
|
||||
extConn net.Conn
|
||||
|
@ -69,6 +75,7 @@ type resolver struct {
|
|||
count int32
|
||||
tStamp time.Time
|
||||
queryLock sync.Mutex
|
||||
client map[uint16]clientConn
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -78,8 +85,9 @@ func init() {
|
|||
// NewResolver creates a new instance of the Resolver
|
||||
func NewResolver(sb *sandbox) Resolver {
|
||||
return &resolver{
|
||||
sb: sb,
|
||||
err: fmt.Errorf("setup not done yet"),
|
||||
sb: sb,
|
||||
err: fmt.Errorf("setup not done yet"),
|
||||
client: make(map[uint16]clientConn),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,7 +383,9 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
extConn.SetDeadline(time.Now().Add(extIOTimeout))
|
||||
co := &dns.Conn{Conn: extConn}
|
||||
|
||||
if r.concurrentQueryInc() == false {
|
||||
// forwardQueryStart stores required context to mux multiple client queries over
|
||||
// one connection; and limits the number of outstanding concurrent queries.
|
||||
if r.forwardQueryStart(w, query) == false {
|
||||
old := r.tStamp
|
||||
r.tStamp = time.Now()
|
||||
if r.tStamp.Sub(old) > logInterval {
|
||||
|
@ -391,18 +401,25 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
}()
|
||||
err = co.WriteMsg(query)
|
||||
if err != nil {
|
||||
r.concurrentQueryDec()
|
||||
r.forwardQueryEnd(w, query)
|
||||
log.Debugf("Send to DNS server failed, %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err = co.ReadMsg()
|
||||
r.concurrentQueryDec()
|
||||
if err != nil {
|
||||
r.forwardQueryEnd(w, query)
|
||||
log.Debugf("Read from DNS server failed, %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieves the context for the forwarded query and returns the client connection
|
||||
// to send the reply to
|
||||
w = r.forwardQueryEnd(w, resp)
|
||||
if w == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
resp.Compress = true
|
||||
break
|
||||
}
|
||||
|
@ -418,22 +435,71 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *resolver) concurrentQueryInc() bool {
|
||||
func (r *resolver) forwardQueryStart(w dns.ResponseWriter, msg *dns.Msg) bool {
|
||||
proto := w.LocalAddr().Network()
|
||||
dnsID := uint16(rand.Intn(maxDNSID))
|
||||
|
||||
cc := clientConn{
|
||||
dnsID: msg.Id,
|
||||
respWriter: w,
|
||||
}
|
||||
|
||||
r.queryLock.Lock()
|
||||
defer r.queryLock.Unlock()
|
||||
|
||||
if r.count == maxConcurrent {
|
||||
return false
|
||||
}
|
||||
r.count++
|
||||
|
||||
switch proto {
|
||||
case "tcp":
|
||||
break
|
||||
case "udp":
|
||||
for ok := true; ok == true; dnsID = uint16(rand.Intn(maxDNSID)) {
|
||||
_, ok = r.client[dnsID]
|
||||
}
|
||||
log.Debugf("client dns id %v, changed id %v", msg.Id, dnsID)
|
||||
r.client[dnsID] = cc
|
||||
msg.Id = dnsID
|
||||
default:
|
||||
log.Errorf("Invalid protocol..")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *resolver) concurrentQueryDec() bool {
|
||||
func (r *resolver) forwardQueryEnd(w dns.ResponseWriter, msg *dns.Msg) dns.ResponseWriter {
|
||||
var (
|
||||
cc clientConn
|
||||
ok bool
|
||||
)
|
||||
proto := w.LocalAddr().Network()
|
||||
|
||||
r.queryLock.Lock()
|
||||
defer r.queryLock.Unlock()
|
||||
|
||||
if r.count == 0 {
|
||||
return false
|
||||
log.Errorf("Invalid concurrent query count")
|
||||
} else {
|
||||
r.count--
|
||||
}
|
||||
r.count--
|
||||
return true
|
||||
|
||||
switch proto {
|
||||
case "tcp":
|
||||
break
|
||||
case "udp":
|
||||
if cc, ok = r.client[msg.Id]; ok == false {
|
||||
log.Debugf("Can't retrieve client context for dns id %v", msg.Id)
|
||||
return nil
|
||||
}
|
||||
delete(r.client, msg.Id)
|
||||
msg.Id = cc.dnsID
|
||||
w = cc.respWriter
|
||||
default:
|
||||
log.Errorf("Invalid protocol")
|
||||
return nil
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
|
|
@ -197,6 +197,10 @@ func (sb *sandbox) delete(force bool) error {
|
|||
// Detach from all endpoints
|
||||
retain := false
|
||||
for _, ep := range sb.getConnectedEndpoints() {
|
||||
// gw network endpoint detach and removal are automatic
|
||||
if ep.endpointInGWNetwork() {
|
||||
continue
|
||||
}
|
||||
// Retain the sanbdox if we can't obtain the network from store.
|
||||
if _, err := c.getNetworkFromStore(ep.getNetwork().ID()); err != nil {
|
||||
retain = true
|
||||
|
|
|
@ -96,9 +96,10 @@ type Mount struct {
|
|||
|
||||
// Hook specifies a command that is run at a particular event in the lifecycle of a container
|
||||
type Hook struct {
|
||||
Path string `json:"path"`
|
||||
Args []string `json:"args,omitempty"`
|
||||
Env []string `json:"env,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Args []string `json:"args,omitempty"`
|
||||
Env []string `json:"env,omitempty"`
|
||||
Timeout *int `json:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Hooks for container setup and teardown
|
||||
|
@ -128,13 +129,17 @@ type Linux struct {
|
|||
// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
|
||||
CgroupsPath *string `json:"cgroupsPath,omitempty"`
|
||||
// Namespaces contains the namespaces that are created and/or joined by the container
|
||||
Namespaces []Namespace `json:"namespaces"`
|
||||
Namespaces []Namespace `json:"namespaces,omitempty"`
|
||||
// Devices are a list of device nodes that are created for the container
|
||||
Devices []Device `json:"devices"`
|
||||
Devices []Device `json:"devices,omitempty"`
|
||||
// Seccomp specifies the seccomp security settings for the container.
|
||||
Seccomp *Seccomp `json:"seccomp,omitempty"`
|
||||
// RootfsPropagation is the rootfs mount propagation mode for the container.
|
||||
RootfsPropagation string `json:"rootfsPropagation,omitempty"`
|
||||
// MaskedPaths masks over the provided paths inside the container.
|
||||
MaskedPaths []string `json:"maskedPaths,omitempty"`
|
||||
// ReadonlyPaths sets the provided paths as RO inside the container.
|
||||
ReadonlyPaths []string `json:"readonlyPaths,omitempty"`
|
||||
}
|
||||
|
||||
// Namespace is the configuration for a Linux namespace
|
||||
|
|
|
@ -6,12 +6,12 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 0
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 4
|
||||
VersionMinor = 5
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
Loading…
Reference in a new issue