Merge pull request #21011 from tiborvass/1.10.3-cherrypicks
1.10.3 cherrypicks
This commit is contained in:
commit
831238928c
34 changed files with 544 additions and 183 deletions
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -5,6 +5,34 @@ information on the list of deprecated flags and APIs please have a look at
|
|||
https://docs.docker.com/misc/deprecated/ where target removal dates can also
|
||||
be found.
|
||||
|
||||
## 1.10.3 (2016-03-10)
|
||||
|
||||
### Runtime
|
||||
|
||||
- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706)
|
||||
- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647)
|
||||
- Fix loss of output in short-lived containers [#20729](https://github.com/docker/docker/pull/20729)
|
||||
- Fix an issue that caused the client to hang if the container process died [#20967](https://github.com/docker/docker/pull/20967)
|
||||
|
||||
### Distribution
|
||||
|
||||
- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831)
|
||||
|
||||
### Plugin system
|
||||
|
||||
- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834)
|
||||
- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682)
|
||||
- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680)
|
||||
|
||||
### Security
|
||||
|
||||
- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672)
|
||||
It was due to the `personality` syscall being blocked by the default seccomp profile.
|
||||
- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981)
|
||||
It was due to the `ipc` syscall being blocked by the default seccomp profile.
|
||||
- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685)
|
||||
- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725)
|
||||
|
||||
## 1.10.2 (2016-02-22)
|
||||
|
||||
### Runtime
|
||||
|
|
|
@ -1093,7 +1093,9 @@ func killProcessDirectly(container *container.Container) error {
|
|||
if err != syscall.ESRCH {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
|
||||
e := errNoSuchProcess{pid, 9}
|
||||
logrus.Debug(e)
|
||||
return e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
|
|||
logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
|
||||
d.LogContainerEvent(c, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " "))
|
||||
|
||||
if ec.OpenStdin {
|
||||
if ec.OpenStdin && stdin != nil {
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
|
|
|
@ -140,7 +140,7 @@ func InitContainer(c *Command) *configs.Config {
|
|||
container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
|
||||
container.Cgroups.Name = c.ID
|
||||
container.Cgroups.Resources.AllowedDevices = c.AllowedDevices
|
||||
container.Devices = c.AutoCreatedDevices
|
||||
container.Devices = filterDevices(c.AutoCreatedDevices, (c.RemappedRoot.UID != 0))
|
||||
container.Rootfs = c.Rootfs
|
||||
container.Readonlyfs = c.ReadonlyRootfs
|
||||
// This can be overridden later by driver during mount setup based
|
||||
|
@ -154,6 +154,24 @@ func InitContainer(c *Command) *configs.Config {
|
|||
return container
|
||||
}
|
||||
|
||||
func filterDevices(devices []*configs.Device, userNamespacesEnabled bool) []*configs.Device {
|
||||
if !userNamespacesEnabled {
|
||||
return devices
|
||||
}
|
||||
|
||||
filtered := []*configs.Device{}
|
||||
// if we have user namespaces enabled, these devices will not be created
|
||||
// because of the mknod limitation in the kernel for an unprivileged process.
|
||||
// Rather, they will be bind-mounted, which will only work if they exist;
|
||||
// check for existence and remove non-existent entries from the list
|
||||
for _, device := range devices {
|
||||
if _, err := os.Stat(device.Path); err == nil {
|
||||
filtered = append(filtered, device)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func getEnv(key string, env []string) string {
|
||||
for _, pair := range env {
|
||||
parts := strings.SplitN(pair, "=", 2)
|
||||
|
|
|
@ -149,7 +149,9 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
|
|||
User: c.ProcessConfig.User,
|
||||
}
|
||||
|
||||
if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil {
|
||||
wg := sync.WaitGroup{}
|
||||
writers, err := setupPipes(container, &c.ProcessConfig, p, pipes, &wg)
|
||||
if err != nil {
|
||||
return execdriver.ExitStatus{ExitCode: -1}, err
|
||||
}
|
||||
|
||||
|
@ -171,6 +173,10 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
|
|||
return execdriver.ExitStatus{ExitCode: -1}, err
|
||||
}
|
||||
|
||||
//close the write end of any opened pipes now that they are dup'ed into the container
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
// 'oom' is used to emit 'oom' events to the eventstream, 'oomKilled' is used
|
||||
// to set the 'OOMKilled' flag in state
|
||||
oom := notifyOnOOM(cont)
|
||||
|
@ -199,6 +205,9 @@ func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execd
|
|||
}
|
||||
ps = execErr.ProcessState
|
||||
}
|
||||
// wait for all IO goroutine copiers to finish
|
||||
wg.Wait()
|
||||
|
||||
cont.Destroy()
|
||||
destroyed = true
|
||||
// oomKilled will have an oom event if any process within the container was
|
||||
|
@ -485,24 +494,26 @@ func (t *TtyConsole) Close() error {
|
|||
return t.console.Close()
|
||||
}
|
||||
|
||||
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {
|
||||
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes, wg *sync.WaitGroup) ([]io.WriteCloser, error) {
|
||||
|
||||
writers := []io.WriteCloser{}
|
||||
|
||||
rootuid, err := container.HostUID()
|
||||
if err != nil {
|
||||
return err
|
||||
return writers, err
|
||||
}
|
||||
|
||||
if processConfig.Tty {
|
||||
cons, err := p.NewConsole(rootuid)
|
||||
if err != nil {
|
||||
return err
|
||||
return writers, err
|
||||
}
|
||||
term, err := NewTtyConsole(cons, pipes)
|
||||
if err != nil {
|
||||
return err
|
||||
return writers, err
|
||||
}
|
||||
processConfig.Terminal = term
|
||||
return nil
|
||||
return writers, nil
|
||||
}
|
||||
// not a tty--set up stdio pipes
|
||||
term := &execdriver.StdConsole{}
|
||||
|
@ -517,7 +528,7 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
|
|||
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
return writers, err
|
||||
}
|
||||
if pipes.Stdin != nil {
|
||||
go func() {
|
||||
|
@ -526,23 +537,32 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
|
|||
}()
|
||||
p.Stdin = r
|
||||
}
|
||||
return nil
|
||||
return writers, nil
|
||||
}
|
||||
|
||||
// if we have user namespaces enabled (rootuid != 0), we will set
|
||||
// up os pipes for stderr, stdout, stdin so we can chown them to
|
||||
// the proper ownership to allow for proper access to the underlying
|
||||
// fds
|
||||
var fds []int
|
||||
var fds []uintptr
|
||||
|
||||
copyPipes := func(out io.Writer, in io.ReadCloser) {
|
||||
defer wg.Done()
|
||||
io.Copy(out, in)
|
||||
in.Close()
|
||||
}
|
||||
|
||||
//setup stdout
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
w.Close()
|
||||
return writers, err
|
||||
}
|
||||
fds = append(fds, int(r.Fd()), int(w.Fd()))
|
||||
writers = append(writers, w)
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
if pipes.Stdout != nil {
|
||||
go io.Copy(pipes.Stdout, r)
|
||||
wg.Add(1)
|
||||
go copyPipes(pipes.Stdout, r)
|
||||
}
|
||||
term.Closers = append(term.Closers, r)
|
||||
p.Stdout = w
|
||||
|
@ -550,11 +570,14 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
|
|||
//setup stderr
|
||||
r, w, err = os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
w.Close()
|
||||
return writers, err
|
||||
}
|
||||
fds = append(fds, int(r.Fd()), int(w.Fd()))
|
||||
writers = append(writers, w)
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
if pipes.Stderr != nil {
|
||||
go io.Copy(pipes.Stderr, r)
|
||||
wg.Add(1)
|
||||
go copyPipes(pipes.Stderr, r)
|
||||
}
|
||||
term.Closers = append(term.Closers, r)
|
||||
p.Stderr = w
|
||||
|
@ -562,9 +585,10 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
|
|||
//setup stdin
|
||||
r, w, err = os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
r.Close()
|
||||
return writers, err
|
||||
}
|
||||
fds = append(fds, int(r.Fd()), int(w.Fd()))
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
if pipes.Stdin != nil {
|
||||
go func() {
|
||||
io.Copy(w, pipes.Stdin)
|
||||
|
@ -573,11 +597,11 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
|
|||
p.Stdin = r
|
||||
}
|
||||
for _, fd := range fds {
|
||||
if err := syscall.Fchown(fd, rootuid, rootuid); err != nil {
|
||||
return fmt.Errorf("Failed to chown pipes fd: %v", err)
|
||||
if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil {
|
||||
return writers, fmt.Errorf("Failed to chown pipes fd: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return writers, nil
|
||||
}
|
||||
|
||||
// SupportsHooks implements the execdriver Driver interface.
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
|
@ -52,13 +53,19 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
|
|||
}
|
||||
|
||||
config := active.Config()
|
||||
if err := setupPipes(&config, processConfig, p, pipes); err != nil {
|
||||
wg := sync.WaitGroup{}
|
||||
writers, err := setupPipes(&config, processConfig, p, pipes, &wg)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if err := active.Start(p); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
//close the write end of any opened pipes now that they are dup'ed into the container
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
if hooks.Start != nil {
|
||||
pid, err := p.Pid()
|
||||
|
@ -83,5 +90,7 @@ func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
|
|||
}
|
||||
ps = exitErr.ProcessState
|
||||
}
|
||||
// wait for all IO goroutine copiers to finish
|
||||
wg.Wait()
|
||||
return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
|
||||
}
|
||||
|
|
|
@ -624,6 +624,11 @@ var defaultSeccompProfile = &configs.Seccomp{
|
|||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{},
|
||||
},
|
||||
{
|
||||
Name: "ipc",
|
||||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{},
|
||||
},
|
||||
{
|
||||
Name: "kill",
|
||||
Action: configs.Allow,
|
||||
|
@ -864,6 +869,39 @@ var defaultSeccompProfile = &configs.Seccomp{
|
|||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{},
|
||||
},
|
||||
{
|
||||
Name: "personality",
|
||||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0x0,
|
||||
Op: configs.EqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "personality",
|
||||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0x0008,
|
||||
Op: configs.EqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "personality",
|
||||
Action: configs.Allow,
|
||||
Args: []*configs.Arg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0xffffffff,
|
||||
Op: configs.EqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pipe",
|
||||
Action: configs.Allow,
|
||||
|
|
|
@ -12,6 +12,22 @@ import (
|
|||
"github.com/docker/docker/pkg/signal"
|
||||
)
|
||||
|
||||
type errNoSuchProcess struct {
|
||||
pid int
|
||||
signal int
|
||||
}
|
||||
|
||||
func (e errNoSuchProcess) Error() string {
|
||||
return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal)
|
||||
}
|
||||
|
||||
// isErrNoSuchProcess returns true if the error
|
||||
// is an instance of errNoSuchProcess.
|
||||
func isErrNoSuchProcess(err error) bool {
|
||||
_, ok := err.(errNoSuchProcess)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ContainerKill send signal to the container
|
||||
// If no signal is given (sig 0), then Kill with SIGKILL and wait
|
||||
// for the container to exit.
|
||||
|
@ -87,6 +103,9 @@ func (daemon *Daemon) Kill(container *container.Container) error {
|
|||
// So, instead we'll give it up to 2 more seconds to complete and if
|
||||
// by that time the container is still running, then the error
|
||||
// we got is probably valid and so we return it to the caller.
|
||||
if isErrNoSuchProcess(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if container.IsRunning() {
|
||||
container.WaitStop(2 * time.Second)
|
||||
|
@ -98,6 +117,9 @@ func (daemon *Daemon) Kill(container *container.Container) error {
|
|||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := killProcessDirectly(container); err != nil {
|
||||
if isErrNoSuchProcess(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -109,8 +131,9 @@ func (daemon *Daemon) Kill(container *container.Container) error {
|
|||
func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
|
||||
err := daemon.killWithSignal(container, sig)
|
||||
if err == syscall.ESRCH {
|
||||
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig)
|
||||
return nil
|
||||
e := errNoSuchProcess{container.GetPID(), sig}
|
||||
logrus.Debug(e)
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -128,7 +128,9 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
|||
// bind.Name is an already existing volume, we need to use that here
|
||||
bind.Driver = v.DriverName()
|
||||
bind.Named = true
|
||||
bind = setBindModeIfNull(bind)
|
||||
if bind.Driver == "local" {
|
||||
bind = setBindModeIfNull(bind)
|
||||
}
|
||||
}
|
||||
if label.RelabelNeeded(bind.Mode) {
|
||||
if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil {
|
||||
|
|
|
@ -42,7 +42,7 @@ type v2Pusher struct {
|
|||
config *ImagePushConfig
|
||||
repo distribution.Repository
|
||||
|
||||
// pushState is state built by the Download functions.
|
||||
// pushState is state built by the Upload functions.
|
||||
pushState pushState
|
||||
}
|
||||
|
||||
|
@ -216,6 +216,7 @@ type v2PushDescriptor struct {
|
|||
repoInfo reference.Named
|
||||
repo distribution.Repository
|
||||
pushState *pushState
|
||||
remoteDescriptor distribution.Descriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Key() string {
|
||||
|
@ -230,16 +231,16 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
|
|||
return pd.layer.DiffID()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
|
||||
diffID := pd.DiffID()
|
||||
|
||||
pd.pushState.Lock()
|
||||
if _, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
// it is already known that the push is not needed and
|
||||
// therefore doing a stat is unnecessary
|
||||
pd.pushState.Unlock()
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
pd.pushState.Unlock()
|
||||
|
||||
|
@ -249,14 +250,14 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, pd.ID(), "Image push failed")
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
if exists {
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.remoteLayers[diffID] = descriptor
|
||||
pd.pushState.Unlock()
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,7 +287,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
if mountFrom.SourceRepository != "" {
|
||||
namedRef, err := reference.WithName(mountFrom.SourceRepository)
|
||||
if err != nil {
|
||||
return err
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
// TODO (brianbland): We need to construct a reference where the Name is
|
||||
|
@ -294,12 +295,12 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
// richer reference package
|
||||
remoteRef, err := distreference.WithName(namedRef.RemoteName())
|
||||
if err != nil {
|
||||
return err
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
|
||||
|
@ -320,10 +321,10 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
return nil
|
||||
return err.Descriptor, nil
|
||||
}
|
||||
if mountFrom.SourceRepository != "" {
|
||||
// unable to mount layer from this repository, so this source mapping is no longer valid
|
||||
|
@ -332,13 +333,13 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
defer layerUpload.Close()
|
||||
|
||||
arch, err := pd.layer.TarStream()
|
||||
if err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
// don't care if this fails; best effort
|
||||
|
@ -357,12 +358,12 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
nn, err := layerUpload.ReadFrom(tee)
|
||||
compressedReader.Close()
|
||||
if err != nil {
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
pushDigest := digester.Digest()
|
||||
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
|
||||
|
@ -370,7 +371,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
pd.pushState.Lock()
|
||||
|
@ -379,23 +380,24 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
// speaks the v2 protocol.
|
||||
pd.pushState.confirmedV2 = true
|
||||
|
||||
pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
|
||||
descriptor := distribution.Descriptor{
|
||||
Digest: pushDigest,
|
||||
MediaType: schema2.MediaTypeLayer,
|
||||
Size: nn,
|
||||
}
|
||||
pd.pushState.remoteLayers[diffID] = descriptor
|
||||
|
||||
pd.pushState.Unlock()
|
||||
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
|
||||
pd.remoteDescriptor = descriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
|
||||
// Not necessary to lock pushStatus because this is always
|
||||
// called after all the mutation in pushStatus.
|
||||
// By the time this function is called, every layer will have
|
||||
// an entry in remoteLayers.
|
||||
return pd.pushState.remoteLayers[pd.DiffID()]
|
||||
return pd.remoteDescriptor
|
||||
}
|
||||
|
||||
// layerAlreadyExists checks if the registry already know about any of the
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -28,8 +29,8 @@ func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager {
|
|||
type uploadTransfer struct {
|
||||
Transfer
|
||||
|
||||
diffID layer.DiffID
|
||||
err error
|
||||
remoteDescriptor distribution.Descriptor
|
||||
err error
|
||||
}
|
||||
|
||||
// An UploadDescriptor references a layer that may need to be uploaded.
|
||||
|
@ -41,7 +42,12 @@ type UploadDescriptor interface {
|
|||
// DiffID should return the DiffID for this layer.
|
||||
DiffID() layer.DiffID
|
||||
// Upload is called to perform the Upload.
|
||||
Upload(ctx context.Context, progressOutput progress.Output) error
|
||||
Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error)
|
||||
// SetRemoteDescriptor provides the distribution.Descriptor that was
|
||||
// returned by Upload. This descriptor is not to be confused with
|
||||
// the UploadDescriptor interface, which is used for internally
|
||||
// identifying layers that are being uploaded.
|
||||
SetRemoteDescriptor(descriptor distribution.Descriptor)
|
||||
}
|
||||
|
||||
// Upload is a blocking function which ensures the listed layers are present on
|
||||
|
@ -50,7 +56,7 @@ type UploadDescriptor interface {
|
|||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
|
||||
var (
|
||||
uploads []*uploadTransfer
|
||||
dedupDescriptors = make(map[string]struct{})
|
||||
dedupDescriptors = make(map[string]*uploadTransfer)
|
||||
)
|
||||
|
||||
for _, descriptor := range layers {
|
||||
|
@ -60,12 +66,12 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
|
|||
if _, present := dedupDescriptors[key]; present {
|
||||
continue
|
||||
}
|
||||
dedupDescriptors[key] = struct{}{}
|
||||
|
||||
xferFunc := lum.makeUploadFunc(descriptor)
|
||||
upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
|
||||
defer upload.Release(watcher)
|
||||
uploads = append(uploads, upload.(*uploadTransfer))
|
||||
dedupDescriptors[key] = upload.(*uploadTransfer)
|
||||
}
|
||||
|
||||
for _, upload := range uploads {
|
||||
|
@ -78,6 +84,9 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
|
|||
}
|
||||
}
|
||||
}
|
||||
for _, l := range layers {
|
||||
l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -86,7 +95,6 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
|
|||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
u := &uploadTransfer{
|
||||
Transfer: NewTransfer(),
|
||||
diffID: descriptor.DiffID(),
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
@ -105,8 +113,9 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
|
|||
|
||||
retries := 0
|
||||
for {
|
||||
err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
u.remoteDescriptor = remoteDescriptor
|
||||
break
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
|
@ -35,13 +36,17 @@ func (u *mockUploadDescriptor) DiffID() layer.DiffID {
|
|||
return u.diffID
|
||||
}
|
||||
|
||||
// SetRemoteDescriptor is not used in the mock.
|
||||
func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) {
|
||||
}
|
||||
|
||||
// Upload is called to perform the upload.
|
||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
|
||||
if u.currentUploads != nil {
|
||||
defer atomic.AddInt32(u.currentUploads, -1)
|
||||
|
||||
if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
|
||||
return errors.New("concurrency limit exceeded")
|
||||
return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +54,7 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
|
|||
for i := int64(0); i <= 10; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return distribution.Descriptor{}, ctx.Err()
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
|
||||
}
|
||||
|
@ -57,10 +62,10 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
|
|||
|
||||
if u.simulateRetries != 0 {
|
||||
u.simulateRetries--
|
||||
return errors.New("simulating retry")
|
||||
return distribution.Descriptor{}, errors.New("simulating retry")
|
||||
}
|
||||
|
||||
return nil
|
||||
return distribution.Descriptor{}, nil
|
||||
}
|
||||
|
||||
func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
# Jenkins CI script for Windows to Linux CI.
|
||||
# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable.
|
||||
set +x
|
||||
SCRIPT_VER="4-Jan-2016 15:19 PST"
|
||||
set +xe
|
||||
SCRIPT_VER="Thu Feb 25 18:54:57 UTC 2016"
|
||||
|
||||
# TODO to make (even) more resilient:
|
||||
# - Wait for daemon to be running before executing docker commands
|
||||
# - Check if jq is installed
|
||||
# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version
|
||||
# - Make sure we are not running as local system. Can't do until all Azure nodes are updated.
|
||||
|
@ -20,31 +21,59 @@ SCRIPT_VER="4-Jan-2016 15:19 PST"
|
|||
ec=0
|
||||
echo INFO: Started at `date`. Script version $SCRIPT_VER
|
||||
|
||||
# get the ip
|
||||
|
||||
# !README!
|
||||
# There are two daemons running on the remote Linux host:
|
||||
# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon
|
||||
# from the sources matching the PR.
|
||||
# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted
|
||||
# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376).
|
||||
# The windows integration tests are run against this inner daemon.
|
||||
|
||||
# get the ip, inner and outer ports.
|
||||
ip="${DOCKER_HOST#*://}"
|
||||
port_outer="${ip#*:}"
|
||||
# inner port is like outer port with last two digits inverted.
|
||||
port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/')
|
||||
ip="${ip%%:*}"
|
||||
|
||||
# make sure it is the right DOCKER_HOST. No, this is not a typo, it really
|
||||
# is at port 2357. This is the daemon which is running on the Linux host.
|
||||
# The way CI works is to launch a second daemon, docker-in-docker, which
|
||||
# listens on port 2375 and is built from sources matching the PR. That's the
|
||||
# one which is tested against.
|
||||
export DOCKER_HOST="tcp://$ip:2357"
|
||||
echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner"
|
||||
|
||||
# If TLS is enabled
|
||||
if [ -n "$DOCKER_TLS_VERIFY" ]; then
|
||||
protocol=https
|
||||
if [ -z "$DOCKER_MACHINE_NAME" ]; then
|
||||
ec=1
|
||||
echo "ERROR: DOCKER_MACHINE_NAME is undefined"
|
||||
fi
|
||||
certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME)
|
||||
curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem"
|
||||
run_extra_args="-v tlscerts:/etc/docker"
|
||||
daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem"
|
||||
else
|
||||
protocol=http
|
||||
fi
|
||||
|
||||
# Save for use by make.sh and scripts it invokes
|
||||
export MAIN_DOCKER_HOST="$DOCKER_HOST"
|
||||
|
||||
export MAIN_DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
|
||||
# Verify we can get the remote node to respond to _ping
|
||||
if [ $ec -eq 0 ]; then
|
||||
reply=`curl -s http://$ip:2357/_ping`
|
||||
reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping`
|
||||
if [ "$reply" != "OK" ]; then
|
||||
ec=1
|
||||
echo "ERROR: Failed to get OK response from Linux node at $ip:2357. It may be down."
|
||||
echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers"
|
||||
echo " to see if the node is up and running."
|
||||
echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node"
|
||||
echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that"
|
||||
echo " either the daemon has crashed/is not running, or the Linux node is unavailable."
|
||||
echo
|
||||
echo " A regular ping to the remote Linux node is below. It should reply. If not, the"
|
||||
echo " machine cannot be reached at all and may have crashed. If it does reply, it is"
|
||||
echo " likely a case of the Linux daemon not running or having crashed, which requires"
|
||||
echo " further investigation."
|
||||
echo
|
||||
echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers"
|
||||
echo " for someone to perform further diagnostics, or take this node out of rotation."
|
||||
echo
|
||||
echo "Regular ping output for remote host below. It should reply. If not, it needs restarting."
|
||||
ping $ip
|
||||
else
|
||||
echo "INFO: The Linux nodes outer daemon replied to a ping. Good!"
|
||||
|
@ -54,7 +83,7 @@ fi
|
|||
# Get the version from the remote node. Note this may fail if jq is not installed.
|
||||
# That's probably worth checking to make sure, just in case.
|
||||
if [ $ec -eq 0 ]; then
|
||||
remoteVersion=`curl -s http://$ip:2357/version | jq -c '.Version'`
|
||||
remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'`
|
||||
echo "INFO: Remote daemon is running docker version $remoteVersion"
|
||||
fi
|
||||
|
||||
|
@ -153,7 +182,8 @@ fi
|
|||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Starting build of a Linux daemon to test against, and starting it..."
|
||||
set -x
|
||||
docker run --pid host --privileged -d --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH" bash -c 'echo "INFO: Compiling" && date && hack/make.sh binary && echo "INFO: Compile complete" && date && cp bundles/$(cat VERSION)/binary/docker /bin/docker && echo "INFO: Starting daemon" && exec docker daemon -D -H tcp://0.0.0.0:2375'
|
||||
# aufs in aufs is faster than vfs in aufs
|
||||
docker run $run_extra_args -e DOCKER_GRAPHDRIVER=aufs --pid host --privileged -d --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH" bash -c "echo 'INFO: Compiling' && date && hack/make.sh binary && echo 'INFO: Compile complete' && date && cp bundles/$(cat VERSION)/binary/docker /bin/docker && echo 'INFO: Starting daemon' && exec docker daemon -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args"
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
|
@ -166,8 +196,8 @@ if [ $ec -eq 0 ]; then
|
|||
echo "INFO: Starting local build of Windows binary..."
|
||||
set -x
|
||||
export TIMEOUT="120m"
|
||||
export DOCKER_HOST="tcp://$ip:2375"
|
||||
export DOCKER_TEST_HOST="tcp://$ip:2375"
|
||||
export DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
export DOCKER_TEST_HOST="tcp://$ip:$port_inner"
|
||||
unset DOCKER_CLIENTONLY
|
||||
export DOCKER_REMOTE_DAEMON=1
|
||||
hack/make.sh binary
|
||||
|
@ -193,6 +223,8 @@ fi
|
|||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Running Integration tests..."
|
||||
set -x
|
||||
export DOCKER_TEST_TLS_VERIFY="$DOCKER_TLS_VERIFY"
|
||||
export DOCKER_TEST_CERT_PATH="$DOCKER_CERT_PATH"
|
||||
hack/make.sh test-integration-cli
|
||||
ec=$?
|
||||
set +x
|
||||
|
|
|
@ -25,6 +25,12 @@ set -e
|
|||
|
||||
url='https://get.docker.com/'
|
||||
|
||||
key_servers="
|
||||
ha.pool.sks-keyservers.net
|
||||
pgp.mit.edu
|
||||
keyserver.ubuntu.com
|
||||
"
|
||||
|
||||
command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
@ -99,7 +105,10 @@ rpm_import_repository_key() {
|
|||
local key=$1; shift
|
||||
local tmpdir=$(mktemp -d)
|
||||
chmod 600 "$tmpdir"
|
||||
gpg --homedir "$tmpdir" --keyserver ha.pool.sks-keyservers.net --recv-keys "$key"
|
||||
for key_server in $key_servers ; do
|
||||
gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
|
||||
done
|
||||
gpg --homedir "$tmpdir" -k "$key" >/dev/null
|
||||
gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
|
||||
rpm --import "$tmpdir"/repo.key
|
||||
rm -rf "$tmpdir"
|
||||
|
@ -409,7 +418,10 @@ do_install() {
|
|||
fi
|
||||
(
|
||||
set -x
|
||||
$sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D"
|
||||
for key_server in $key_servers ; do
|
||||
$sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
|
||||
done
|
||||
$sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
|
||||
$sh_c "mkdir -p /etc/apt/sources.list.d"
|
||||
$sh_c "echo deb [arch=$(dpkg --print-architecture)] https://apt.dockerproject.org/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
|
||||
|
|
|
@ -124,7 +124,7 @@ fi
|
|||
# functionality.
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - -o /dev/null &> /dev/null ) \
|
||||
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -ldevmapper -o /dev/null &> /dev/null ) \
|
||||
; then
|
||||
DOCKER_BUILDTAGS+=' libdm_no_deferred_remove'
|
||||
fi
|
||||
|
@ -211,6 +211,8 @@ test_env() {
|
|||
# use "env -i" to tightly control the environment variables that bleed into the tests
|
||||
env -i \
|
||||
DEST="$DEST" \
|
||||
DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
|
||||
DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \
|
||||
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
|
||||
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
|
||||
DOCKER_HOST="$DOCKER_HOST" \
|
||||
|
|
|
@ -47,41 +47,41 @@ if [[ ! "${components[*]}" =~ $component ]] ; then
|
|||
components+=( $component )
|
||||
fi
|
||||
|
||||
# create/update apt-ftparchive file
|
||||
if [ ! -f "$APTDIR/conf/apt-ftparchive.conf" ]; then
|
||||
cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf"
|
||||
Dir {
|
||||
ArchiveDir "${APTDIR}";
|
||||
CacheDir "${APTDIR}/db";
|
||||
};
|
||||
# create apt-ftparchive file on every run. This is essential to avoid
|
||||
# using stale versions of the config file that could cause unnecessary
|
||||
# refreshing of bits for EOL-ed releases.
|
||||
cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf"
|
||||
Dir {
|
||||
ArchiveDir "${APTDIR}";
|
||||
CacheDir "${APTDIR}/db";
|
||||
};
|
||||
|
||||
Default {
|
||||
Packages::Compress ". gzip bzip2";
|
||||
Sources::Compress ". gzip bzip2";
|
||||
Contents::Compress ". gzip bzip2";
|
||||
};
|
||||
Default {
|
||||
Packages::Compress ". gzip bzip2";
|
||||
Sources::Compress ". gzip bzip2";
|
||||
Contents::Compress ". gzip bzip2";
|
||||
};
|
||||
|
||||
TreeDefault {
|
||||
BinCacheDB "packages-\$(SECTION)-\$(ARCH).db";
|
||||
Directory "pool/\$(SECTION)";
|
||||
Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages";
|
||||
SrcDirectory "pool/\$(SECTION)";
|
||||
Sources "\$(DIST)/\$(SECTION)/source/Sources";
|
||||
Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)";
|
||||
FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist";
|
||||
};
|
||||
EOF
|
||||
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Tree "dists/${suite}" {
|
||||
Sections "${components[*]}";
|
||||
Architectures "${arches[*]}";
|
||||
}
|
||||
|
||||
TreeDefault {
|
||||
BinCacheDB "packages-\$(SECTION)-\$(ARCH).db";
|
||||
Directory "pool/\$(SECTION)";
|
||||
Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages";
|
||||
SrcDirectory "pool/\$(SECTION)";
|
||||
Sources "\$(DIST)/\$(SECTION)/source/Sources";
|
||||
Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)";
|
||||
FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist";
|
||||
};
|
||||
EOF
|
||||
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Tree "dists/${suite}" {
|
||||
Sections "${components[*]}";
|
||||
Architectures "${arches[*]}";
|
||||
}
|
||||
|
||||
EOF
|
||||
done >> "$APTDIR/conf/apt-ftparchive.conf"
|
||||
fi
|
||||
done >> "$APTDIR/conf/apt-ftparchive.conf"
|
||||
|
||||
if [ ! -f "$APTDIR/conf/docker-engine-release.conf" ]; then
|
||||
cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf"
|
||||
|
|
|
@ -958,7 +958,11 @@ func (s *DockerSuite) TestContainerApiStart(c *check.C) {
|
|||
// second call to start should give 304
|
||||
status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(status, checker.Equals, http.StatusNotModified)
|
||||
|
||||
// TODO(tibor): figure out why this doesn't work on windows
|
||||
if isLocalDaemon {
|
||||
c.Assert(status, checker.Equals, http.StatusNotModified)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerApiStop(c *check.C) {
|
||||
|
|
|
@ -123,6 +123,36 @@ func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) {
|
|||
startExec(c, execID, http.StatusConflict)
|
||||
}
|
||||
|
||||
// #20638
|
||||
func (s *DockerSuite) TestExecApiStartWithDetach(c *check.C) {
|
||||
name := "foo"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "top")
|
||||
data := map[string]interface{}{
|
||||
"cmd": []string{"true"},
|
||||
"AttachStdin": true,
|
||||
}
|
||||
_, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data)
|
||||
c.Assert(err, checker.IsNil, check.Commentf(string(b)))
|
||||
|
||||
createResp := struct {
|
||||
ID string `json:"Id"`
|
||||
}{}
|
||||
c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b)))
|
||||
|
||||
_, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
b, err = readBody(body)
|
||||
comment := check.Commentf("response body: %s", b)
|
||||
c.Assert(err, checker.IsNil, comment)
|
||||
|
||||
resp, _, err := sockRequestRaw("GET", "/_ping", nil, "")
|
||||
c.Assert(err, checker.IsNil)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
c.Fatal("daemon is down, it should alive")
|
||||
}
|
||||
}
|
||||
|
||||
func createExec(c *check.C, name string) string {
|
||||
_, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}})
|
||||
c.Assert(err, checker.IsNil, check.Commentf(string(b)))
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -88,7 +87,7 @@ func (s *DockerSuite) TestApiDockerApiVersion(c *check.C) {
|
|||
|
||||
// Test using the env var first
|
||||
cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "version")
|
||||
cmd.Env = append([]string{"DOCKER_API_VERSION=xxx"}, os.Environ()...)
|
||||
cmd.Env = appendBaseEnv(false, "DOCKER_API_VERSION=xxx")
|
||||
out, _, _ := runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(svrVersion, check.Equals, "/vxxx/version")
|
||||
|
|
|
@ -4966,7 +4966,7 @@ func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
|
|||
c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName))
|
||||
}
|
||||
if qstderr != vstdout+vstderr {
|
||||
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout))
|
||||
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func (s *DockerSuite) TestConfigDir(c *check.C) {
|
|||
|
||||
// Test with env var too
|
||||
cmd := exec.Command(dockerBinary, "ps")
|
||||
cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir)
|
||||
cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir)
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out))
|
||||
|
@ -95,7 +95,10 @@ func (s *DockerSuite) TestConfigDir(c *check.C) {
|
|||
err = ioutil.WriteFile(tmpCfg, []byte(data), 0600)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Err creating file"))
|
||||
|
||||
env := appendBaseEnv(false)
|
||||
|
||||
cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps")
|
||||
cmd.Env = env
|
||||
out, _, err = runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(err, checker.NotNil, check.Commentf("out:%v", out))
|
||||
|
@ -105,7 +108,7 @@ func (s *DockerSuite) TestConfigDir(c *check.C) {
|
|||
// Reset headers and try again using env var this time
|
||||
headers = map[string][]string{}
|
||||
cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps")
|
||||
cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir)
|
||||
cmd.Env = append(env, "DOCKER_CONFIG="+cDir)
|
||||
out, _, err = runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(err, checker.NotNil, check.Commentf("%v", out))
|
||||
|
@ -115,7 +118,7 @@ func (s *DockerSuite) TestConfigDir(c *check.C) {
|
|||
// Reset headers and make sure flag overrides the env var
|
||||
headers = map[string][]string{}
|
||||
cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps")
|
||||
cmd.Env = append(os.Environ(), "DOCKER_CONFIG=MissingDir")
|
||||
cmd.Env = append(env, "DOCKER_CONFIG=MissingDir")
|
||||
out, _, err = runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(err, checker.NotNil, check.Commentf("out:%v", out))
|
||||
|
@ -127,10 +130,9 @@ func (s *DockerSuite) TestConfigDir(c *check.C) {
|
|||
// ignore - we don't want to default back to the env var.
|
||||
headers = map[string][]string{}
|
||||
cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps")
|
||||
cmd.Env = append(os.Environ(), "DOCKER_CONFIG="+cDir)
|
||||
cmd.Env = append(env, "DOCKER_CONFIG="+cDir)
|
||||
out, _, err = runCommandWithOutput(cmd)
|
||||
|
||||
c.Assert(err, checker.NotNil, check.Commentf("out:%v", out))
|
||||
c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out))
|
||||
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -30,7 +29,7 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
|||
}
|
||||
|
||||
homeKey := homedir.Key()
|
||||
baseEnvs := os.Environ()
|
||||
baseEnvs := appendBaseEnv(true)
|
||||
|
||||
// Remove HOME env var from list so we can add a new value later.
|
||||
for i, env := range baseEnvs {
|
||||
|
@ -54,9 +53,12 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
|||
out, _, err := runCommandWithOutput(helpCmd)
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
lines := strings.Split(out, "\n")
|
||||
foundTooLongLine := false
|
||||
for _, line := range lines {
|
||||
c.Assert(len(line), checker.LessOrEqualThan, 80, check.Commentf("Line is too long:\n%s", line))
|
||||
|
||||
if !foundTooLongLine && len(line) > 80 {
|
||||
c.Logf("Line is too long:\n%s", line)
|
||||
foundTooLongLine = true
|
||||
}
|
||||
// All lines should not end with a space
|
||||
c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space"))
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) {
|
|||
testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://..
|
||||
|
||||
cmd := exec.Command(dockerBinary, "info")
|
||||
cmd.Env = appendBaseEnv([]string{"HTTP_PROXY=http://127.0.0.1:9999"})
|
||||
cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999")
|
||||
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("%v", out))
|
||||
|
|
|
@ -148,6 +148,61 @@ func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) {
|
|||
testPushEmptyLayer(c)
|
||||
}
|
||||
|
||||
// testConcurrentPush pushes multiple tags to the same repo
|
||||
// concurrently.
|
||||
func testConcurrentPush(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
|
||||
repos := []string{}
|
||||
for _, tag := range []string{"push1", "push2", "push3"} {
|
||||
repo := fmt.Sprintf("%v:%v", repoName, tag)
|
||||
_, err := buildImage(repo, fmt.Sprintf(`
|
||||
FROM busybox
|
||||
ENTRYPOINT ["/bin/echo"]
|
||||
ENV FOO foo
|
||||
ENV BAR bar
|
||||
CMD echo %s
|
||||
`, repo), true)
|
||||
c.Assert(err, checker.IsNil)
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
|
||||
// Push tags, in parallel
|
||||
results := make(chan error)
|
||||
|
||||
for _, repo := range repos {
|
||||
go func(repo string) {
|
||||
_, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo))
|
||||
results <- err
|
||||
}(repo)
|
||||
}
|
||||
|
||||
for range repos {
|
||||
err := <-results
|
||||
c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err))
|
||||
}
|
||||
|
||||
// Clear local images store.
|
||||
args := append([]string{"rmi"}, repos...)
|
||||
dockerCmd(c, args...)
|
||||
|
||||
// Re-pull and run individual tags, to make sure pushes succeeded
|
||||
for _, repo := range repos {
|
||||
dockerCmd(c, "pull", repo)
|
||||
dockerCmd(c, "inspect", repo)
|
||||
out, _ := dockerCmd(c, "run", "--rm", repo)
|
||||
c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) {
|
||||
testConcurrentPush(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) {
|
||||
testConcurrentPush(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
|
||||
sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
// tag the image to upload it to the private registry
|
||||
|
|
|
@ -790,7 +790,7 @@ func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) {
|
|||
// the container
|
||||
|
||||
cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env")
|
||||
cmd.Env = appendBaseEnv([]string{})
|
||||
cmd.Env = appendBaseEnv(true)
|
||||
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
if err != nil {
|
||||
|
@ -824,7 +824,7 @@ func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) {
|
|||
// already in the env that we're overriding them
|
||||
|
||||
cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env")
|
||||
cmd.Env = appendBaseEnv([]string{"HOSTNAME=bar"})
|
||||
cmd.Env = appendBaseEnv(true, "HOSTNAME=bar")
|
||||
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
if err != nil {
|
||||
|
@ -2518,6 +2518,8 @@ func (s *DockerSuite) TestRunModeUTSHost(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerSuite) TestRunTLSverify(c *check.C) {
|
||||
// Remote daemons use TLS and this test is not applicable when TLS is required.
|
||||
testRequires(c, SameHostDaemon)
|
||||
if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 {
|
||||
c.Fatalf("Should have worked: %v:\n%v", err, out)
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
|
|||
|
||||
type pluginRequest struct {
|
||||
Name string
|
||||
Opts map[string]string
|
||||
}
|
||||
|
||||
type pluginResp struct {
|
||||
|
@ -69,6 +70,7 @@ func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
|
|||
type vol struct {
|
||||
Name string
|
||||
Mountpoint string
|
||||
Ninja bool // hack used to trigger an null volume return on `Get`
|
||||
}
|
||||
var volList []vol
|
||||
|
||||
|
@ -106,7 +108,8 @@ func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
|
|||
send(w, err)
|
||||
return
|
||||
}
|
||||
volList = append(volList, vol{Name: pr.Name})
|
||||
_, isNinja := pr.Opts["ninja"]
|
||||
volList = append(volList, vol{Name: pr.Name, Ninja: isNinja})
|
||||
send(w, nil)
|
||||
})
|
||||
|
||||
|
@ -125,6 +128,10 @@ func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
|
|||
|
||||
for _, v := range volList {
|
||||
if v.Name == pr.Name {
|
||||
if v.Ninja {
|
||||
send(w, map[string]vol{})
|
||||
return
|
||||
}
|
||||
v.Mountpoint = hostVolumePath(pr.Name)
|
||||
send(w, map[string]vol{"Volume": v})
|
||||
return
|
||||
|
@ -411,3 +418,12 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) {
|
|||
c.Assert(s.ec.gets, check.Equals, 1)
|
||||
c.Assert(out, checker.Contains, "No such volume")
|
||||
}
|
||||
|
||||
// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error.
|
||||
// Prior the daemon would panic in this scenario.
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) {
|
||||
dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc", "--opt", "ninja=1")
|
||||
out, _, err := dockerCmdWithError("volume", "inspect", "abc")
|
||||
c.Assert(err, checker.NotNil, check.Commentf(out))
|
||||
c.Assert(out, checker.Contains, "No such volume")
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) {
|
|||
id <- strings.TrimSpace(out)[:12]
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(10 * time.Second):
|
||||
c.Fatal("failed to observe new container created added to stats")
|
||||
case <-addedChan:
|
||||
// ignore, done
|
||||
|
|
|
@ -34,9 +34,11 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
out, err := exec.Command(dockerBinary, "images").CombinedOutput()
|
||||
cmd := exec.Command(dockerBinary, "images")
|
||||
cmd.Env = appendBaseEnv(true)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(fmt.Errorf("err=%v\nout=%s\n", err, out))
|
||||
}
|
||||
lines := strings.Split(string(out), "\n")[1:]
|
||||
for _, l := range lines {
|
||||
|
@ -709,7 +711,9 @@ func getAllVolumes() ([]*types.Volume, error) {
|
|||
var protectedImages = map[string]struct{}{}
|
||||
|
||||
func deleteAllImages() error {
|
||||
out, err := exec.Command(dockerBinary, "images").CombinedOutput()
|
||||
cmd := exec.Command(dockerBinary, "images")
|
||||
cmd.Env = appendBaseEnv(true)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1238,7 +1242,7 @@ func getContainerState(c *check.C, id string) (int, bool, error) {
|
|||
}
|
||||
|
||||
func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd {
|
||||
args := []string{"-D", "build", "-t", name}
|
||||
args := []string{"build", "-t", name}
|
||||
if !useCache {
|
||||
args = append(args, "--no-cache")
|
||||
}
|
||||
|
@ -1582,7 +1586,7 @@ func setupNotary(c *check.C) *testNotary {
|
|||
// appendBaseEnv appends the minimum set of environment variables to exec the
|
||||
// docker cli binary for testing with correct configuration to the given env
|
||||
// list.
|
||||
func appendBaseEnv(env []string) []string {
|
||||
func appendBaseEnv(isTLS bool, env ...string) []string {
|
||||
preserveList := []string{
|
||||
// preserve remote test host
|
||||
"DOCKER_HOST",
|
||||
|
@ -1591,6 +1595,9 @@ func appendBaseEnv(env []string) []string {
|
|||
// with "GetAddrInfoW: A non-recoverable error occurred during a database lookup."
|
||||
"SystemRoot",
|
||||
}
|
||||
if isTLS {
|
||||
preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH")
|
||||
}
|
||||
|
||||
for _, key := range preserveList {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
|
|
|
@ -171,7 +171,7 @@ func parseSubidFile(path, username string) (ranges, error) {
|
|||
}
|
||||
|
||||
text := strings.TrimSpace(s.Text())
|
||||
if text == "" {
|
||||
if text == "" || strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ":")
|
||||
|
|
|
@ -241,3 +241,31 @@ func compareTrees(left, right map[string]node) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestParseSubidFileWithNewlinesAndComments(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "parsesubid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fnamePath := filepath.Join(tmpDir, "testsubuid")
|
||||
fcontent := `tss:100000:65536
|
||||
# empty default subuid/subgid file
|
||||
|
||||
dockremap:231072:65536`
|
||||
if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ranges, err := parseSubidFile(fnamePath, "dockremap")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ranges) != 1 {
|
||||
t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges))
|
||||
}
|
||||
if ranges[0].Start != 231072 {
|
||||
t.Fatalf("wanted 231072, got %d instead", ranges[0].Start)
|
||||
}
|
||||
if ranges[0].Length != 65536 {
|
||||
t.Fatalf("wanted 65536, got %d instead", ranges[0].Length)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,6 +122,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
|
|||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}
|
||||
}
|
||||
|
|
|
@ -3,12 +3,24 @@ package stdcopy
|
|||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StdType is the type of standard stream
|
||||
// a writer can multiplex to.
|
||||
type StdType byte
|
||||
|
||||
const (
|
||||
// Stdin represents standard input stream type.
|
||||
Stdin StdType = iota
|
||||
// Stdout represents standard output stream type.
|
||||
Stdout
|
||||
// Stderr represents standard error steam type.
|
||||
Stderr
|
||||
|
||||
stdWriterPrefixLen = 8
|
||||
stdWriterFdIndex = 0
|
||||
stdWriterSizeIndex = 4
|
||||
|
@ -16,38 +28,32 @@ const (
|
|||
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
|
||||
)
|
||||
|
||||
// StdType prefixes type and length to standard stream.
|
||||
type StdType [stdWriterPrefixLen]byte
|
||||
|
||||
var (
|
||||
// Stdin represents standard input stream type.
|
||||
Stdin = StdType{0: 0}
|
||||
// Stdout represents standard output stream type.
|
||||
Stdout = StdType{0: 1}
|
||||
// Stderr represents standard error steam type.
|
||||
Stderr = StdType{0: 2}
|
||||
)
|
||||
|
||||
// StdWriter is wrapper of io.Writer with extra customized info.
|
||||
type StdWriter struct {
|
||||
// stdWriter is wrapper of io.Writer with extra customized info.
|
||||
type stdWriter struct {
|
||||
io.Writer
|
||||
prefix StdType
|
||||
sizeBuf []byte
|
||||
prefix byte
|
||||
}
|
||||
|
||||
func (w *StdWriter) Write(buf []byte) (n int, err error) {
|
||||
var n1, n2 int
|
||||
// Write sends the buffer to the underneath writer.
|
||||
// It insert the prefix header before the buffer,
|
||||
// so stdcopy.StdCopy knows where to multiplex the output.
|
||||
// It makes stdWriter to implement io.Writer.
|
||||
func (w *stdWriter) Write(buf []byte) (n int, err error) {
|
||||
if w == nil || w.Writer == nil {
|
||||
return 0, errors.New("Writer not instantiated")
|
||||
}
|
||||
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
|
||||
n1, err = w.Writer.Write(w.prefix[:])
|
||||
if err != nil {
|
||||
n = n1 - stdWriterPrefixLen
|
||||
} else {
|
||||
n2, err = w.Writer.Write(buf)
|
||||
n = n1 + n2 - stdWriterPrefixLen
|
||||
if buf == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
|
||||
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf)))
|
||||
|
||||
line := append(header[:], buf...)
|
||||
|
||||
n, err = w.Writer.Write(line)
|
||||
n -= stdWriterPrefixLen
|
||||
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
|
@ -60,16 +66,13 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) {
|
|||
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
||||
// `t` indicates the id of the stream to encapsulate.
|
||||
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
|
||||
func NewStdWriter(w io.Writer, t StdType) *StdWriter {
|
||||
return &StdWriter{
|
||||
Writer: w,
|
||||
prefix: t,
|
||||
sizeBuf: make([]byte, 4),
|
||||
func NewStdWriter(w io.Writer, t StdType) io.Writer {
|
||||
return &stdWriter{
|
||||
Writer: w,
|
||||
prefix: byte(t),
|
||||
}
|
||||
}
|
||||
|
||||
var errInvalidStdHeader = errors.New("Unrecognized input header")
|
||||
|
||||
// StdCopy is a modified version of io.Copy.
|
||||
//
|
||||
// StdCopy will demultiplex `src`, assuming that it contains two streams,
|
||||
|
@ -110,18 +113,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
|
|||
}
|
||||
|
||||
// Check the first byte to know where to write
|
||||
switch buf[stdWriterFdIndex] {
|
||||
case 0:
|
||||
switch StdType(buf[stdWriterFdIndex]) {
|
||||
case Stdin:
|
||||
fallthrough
|
||||
case 1:
|
||||
case Stdout:
|
||||
// Write on stdout
|
||||
out = dstout
|
||||
case 2:
|
||||
case Stderr:
|
||||
// Write on stderr
|
||||
out = dsterr
|
||||
default:
|
||||
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
|
||||
return 0, errInvalidStdHeader
|
||||
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
|
||||
}
|
||||
|
||||
// Retrieve the size of the frame
|
||||
|
|
|
@ -17,10 +17,9 @@ func TestNewStdWriter(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWriteWithUnitializedStdWriter(t *testing.T) {
|
||||
writer := StdWriter{
|
||||
Writer: nil,
|
||||
prefix: Stdout,
|
||||
sizeBuf: make([]byte, 4),
|
||||
writer := stdWriter{
|
||||
Writer: nil,
|
||||
prefix: byte(Stdout),
|
||||
}
|
||||
n, err := writer.Write([]byte("Something here"))
|
||||
if n != 0 || err == nil {
|
||||
|
@ -180,7 +179,7 @@ func TestStdCopyDetectsCorruptedFrame(t *testing.T) {
|
|||
src: buffer}
|
||||
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
|
||||
if written != startingBufLen {
|
||||
t.Fatalf("Expected 0 bytes read, got %d", written)
|
||||
t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal("Didn't get nil error")
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package volumedrivers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/volume"
|
||||
)
|
||||
|
@ -70,6 +72,11 @@ func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) {
|
|||
return a.Create(name, nil)
|
||||
}
|
||||
|
||||
// plugin may have returned no volume and no error
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("no such volume")
|
||||
}
|
||||
|
||||
return &volumeAdapter{
|
||||
proxy: a.proxy,
|
||||
name: v.Name,
|
||||
|
|
Loading…
Add table
Reference in a new issue