Parcourir la source

Merge pull request #9617 from tiborvass/bump_v1.3.3

Bump v1.3.3
Tibor Vass il y a 10 ans
Parent
commit
be8f9a2313

+ 14 - 0
CHANGELOG.md

@@ -1,5 +1,19 @@
 # Changelog
 # Changelog
 
 
+## 1.3.3 (2014-12-11)
+
+#### Security
+- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356)
+- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357)
+- Validate image IDs (CVE-2014-9358)
+
+#### Runtime
+- Fix an issue when image archives are being read slowly
+
+#### Client
+- Fix a regression related to stdin redirection
+- Fix a regression with `docker cp` when destination is the current directory
+
 ## 1.3.2 (2014-11-20)
 ## 1.3.2 (2014-11-20)
 
 
 #### Security
 #### Security

+ 1 - 1
Dockerfile

@@ -75,7 +75,7 @@ ENV	GOARM	5
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
 
 
 # Grab Go's cover tool for dead-simple code coverage testing
 # Grab Go's cover tool for dead-simple code coverage testing
-RUN	go get code.google.com/p/go.tools/cmd/cover
+RUN	go get golang.org/x/tools/cmd/cover
 
 
 # TODO replace FPM with some very minimal debhelper stuff
 # TODO replace FPM with some very minimal debhelper stuff
 RUN	gem install --no-rdoc --no-ri fpm --version 1.3.2
 RUN	gem install --no-rdoc --no-ri fpm --version 1.3.2

+ 1 - 1
VERSION

@@ -1 +1 @@
-1.3.2
+1.3.3

+ 98 - 7
api/client/hijack.go

@@ -2,6 +2,7 @@ package client
 
 
 import (
 import (
 	"crypto/tls"
 	"crypto/tls"
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
@@ -10,6 +11,7 @@ import (
 	"os"
 	"os"
 	"runtime"
 	"runtime"
 	"strings"
 	"strings"
+	"time"
 
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/dockerversion"
@@ -19,9 +21,99 @@ import (
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"
 )
 )
 
 
+type tlsClientCon struct {
+	*tls.Conn
+	rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+	// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+	// on its underlying connection.
+	if cwc, ok := c.rawConn.(interface {
+		CloseWrite() error
+	}); ok {
+		return cwc.CloseWrite()
+	}
+	return nil
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+	return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+	// We want the Timeout and Deadline values from dialer to cover the
+	// whole process: TCP connection and TLS handshake. This means that we
+	// also need to start our own timers now.
+	timeout := dialer.Timeout
+
+	if !dialer.Deadline.IsZero() {
+		deadlineTimeout := dialer.Deadline.Sub(time.Now())
+		if timeout == 0 || deadlineTimeout < timeout {
+			timeout = deadlineTimeout
+		}
+	}
+
+	var errChannel chan error
+
+	if timeout != 0 {
+		errChannel = make(chan error, 2)
+		time.AfterFunc(timeout, func() {
+			errChannel <- errors.New("")
+		})
+	}
+
+	rawConn, err := dialer.Dial(network, addr)
+	if err != nil {
+		return nil, err
+	}
+
+	colonPos := strings.LastIndex(addr, ":")
+	if colonPos == -1 {
+		colonPos = len(addr)
+	}
+	hostname := addr[:colonPos]
+
+	// If no ServerName is set, infer the ServerName
+	// from the hostname we're connecting to.
+	if config.ServerName == "" {
+		// Make a copy to avoid polluting argument or default.
+		c := *config
+		c.ServerName = hostname
+		config = &c
+	}
+
+	conn := tls.Client(rawConn, config)
+
+	if timeout == 0 {
+		err = conn.Handshake()
+	} else {
+		go func() {
+			errChannel <- conn.Handshake()
+		}()
+
+		err = <-errChannel
+	}
+
+	if err != nil {
+		rawConn.Close()
+		return nil, err
+	}
+
+	// This is Docker difference with standard's crypto/tls package: returned a
+	// wrapper which holds both the TLS and raw connections.
+	return &tlsClientCon{conn, rawConn}, nil
+}
+
 func (cli *DockerCli) dial() (net.Conn, error) {
 func (cli *DockerCli) dial() (net.Conn, error) {
 	if cli.tlsConfig != nil && cli.proto != "unix" {
 	if cli.tlsConfig != nil && cli.proto != "unix" {
-		return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
+		// Notice this isn't Go standard's tls.Dial function
+		return tlsDial(cli.proto, cli.addr, cli.tlsConfig)
 	}
 	}
 	return net.Dial(cli.proto, cli.addr)
 	return net.Dial(cli.proto, cli.addr)
 }
 }
@@ -109,12 +201,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 			io.Copy(rwc, in)
 			io.Copy(rwc, in)
 			log.Debugf("[hijack] End of stdin")
 			log.Debugf("[hijack] End of stdin")
 		}
 		}
-		if tcpc, ok := rwc.(*net.TCPConn); ok {
-			if err := tcpc.CloseWrite(); err != nil {
-				log.Debugf("Couldn't send EOF: %s", err)
-			}
-		} else if unixc, ok := rwc.(*net.UnixConn); ok {
-			if err := unixc.CloseWrite(); err != nil {
+
+		if conn, ok := rwc.(interface {
+			CloseWrite() error
+		}); ok {
+			if err := conn.CloseWrite(); err != nil {
 				log.Debugf("Couldn't send EOF: %s", err)
 				log.Debugf("Couldn't send EOF: %s", err)
 			}
 			}
 		}
 		}

+ 1 - 1
builder/parser/testfiles/docker/Dockerfile

@@ -76,7 +76,7 @@ ENV	GOARM	5
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
 
 
 # Grab Go's cover tool for dead-simple code coverage testing
 # Grab Go's cover tool for dead-simple code coverage testing
-RUN	go get code.google.com/p/go.tools/cmd/cover
+RUN	go get golang.org/x/tools/cmd/cover
 
 
 # TODO replace FPM with some very minimal debhelper stuff
 # TODO replace FPM with some very minimal debhelper stuff
 RUN	gem install --no-rdoc --no-ri fpm --version 1.0.2
 RUN	gem install --no-rdoc --no-ri fpm --version 1.0.2

+ 1 - 1
builder/parser/testfiles/docker/result

@@ -11,7 +11,7 @@
 (env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm 	darwin/amd64 darwin/386 	freebsd/amd64 freebsd/386 freebsd/arm")
 (env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm 	darwin/amd64 darwin/386 	freebsd/amd64 freebsd/386 freebsd/arm")
 (env "GOARM" "5")
 (env "GOARM" "5")
 (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
 (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
-(run "go get code.google.com/p/go.tools/cmd/cover")
+(run "go get golang.org/x/tools/cmd/cover")
 (run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
 (run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
 (run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
 (run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
 (run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
 (run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")

+ 34 - 0
docs/sources/release-notes.md

@@ -4,6 +4,40 @@ understanding, release
 
 
 #Release Notes
 #Release Notes
 
 
+##Version 1.3.3
+(2014-12-11)
+ 
+This release fixes several security issues. In order to encourage immediate
+upgrading, this release also patches some critical bugs. All users are highly
+encouraged to upgrade as soon as possible.
+ 
+*Security fixes*
+ 
+Patches and changes were made to address the following vulnerabilities:
+ 
+* CVE-2014-9356: Path traversal during processing of absolute symlinks. 
+Absolute symlinks were not adequately checked for  traversal which created a
+vulnerability via image extraction and/or volume mounts.
+* CVE-2014-9357: Escalation of privileges during decompression of LZMA (.xz)
+archives. Docker 1.3.2 added `chroot` for archive extraction. This created a
+vulnerability that could allow malicious images or builds to write files to the
+host system and escape containerization, leading to privilege escalation.
+* CVE-2014-9358: Path traversal and spoofing opportunities via image
+identifiers. Image IDs passed either via `docker load` or registry communications
+were not sufficiently validated. This created a vulnerability to path traversal
+attacks wherein malicious images or repository spoofing could lead to graph
+corruption and manipulation.
+ 
+*Runtime fixes*
+ 
+* Fixed an issue that cause image archives to be read slowly.
+ 
+*Client fixes*
+ 
+* Fixed a regression related to STDIN redirection.
+* Fixed a regression involving `docker cp` when the current directory is the
+destination.
+
 ##Version 1.3.2
 ##Version 1.3.2
 (2014-11-24)
 (2014-11-24)
 
 

+ 5 - 0
graph/load.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/log"
 	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/utils"
 )
 )
 
 
 // Loads a set of images into the repository. This is the complementary of ImageExport.
 // Loads a set of images into the repository. This is the complementary of ImageExport.
@@ -112,6 +113,10 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string
 			log.Debugf("Error unmarshalling json", err)
 			log.Debugf("Error unmarshalling json", err)
 			return err
 			return err
 		}
 		}
+		if err := utils.ValidateID(img.ID); err != nil {
+			log.Debugf("Error validating ID: %s", err)
+			return err
+		}
 		if img.Parent != "" {
 		if img.Parent != "" {
 			if !s.graph.Exists(img.Parent) {
 			if !s.graph.Exists(img.Parent) {
 				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
 				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {

+ 1 - 1
graph/tags_unit_test.go

@@ -16,7 +16,7 @@ import (
 
 
 const (
 const (
 	testImageName = "myapp"
 	testImageName = "myapp"
-	testImageID   = "foo"
+	testImageID   = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234"
 )
 )
 
 
 func fakeTar() (io.Reader, error) {
 func fakeTar() (io.Reader, error) {

+ 322 - 0
integration-cli/docker_cli_build_test.go

@@ -1167,6 +1167,133 @@ func TestBuildCopyDisallowRemote(t *testing.T) {
 	logDone("build - copy - disallow copy from remote")
 	logDone("build - copy - disallow copy from remote")
 }
 }
 
 
+func TestBuildAddBadLinks(t *testing.T) {
+	const (
+		dockerfile = `
+			FROM scratch
+			ADD links.tar /
+			ADD foo.txt /symlink/
+			`
+		targetFile = "foo.txt"
+	)
+	var (
+		name = "test-link-absolute"
+	)
+	defer deleteImages(name)
+	ctx, err := fakeContext(dockerfile, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ctx.Close()
+
+	tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
+	if err != nil {
+		t.Fatalf("failed to create temporary directory: %s", tempDir)
+	}
+	defer os.RemoveAll(tempDir)
+
+	symlinkTarget := fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
+	tarPath := filepath.Join(ctx.Dir, "links.tar")
+	nonExistingFile := filepath.Join(tempDir, targetFile)
+	fooPath := filepath.Join(ctx.Dir, targetFile)
+
+	tarOut, err := os.Create(tarPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tarWriter := tar.NewWriter(tarOut)
+
+	header := &tar.Header{
+		Name:     "symlink",
+		Typeflag: tar.TypeSymlink,
+		Linkname: symlinkTarget,
+		Mode:     0755,
+		Uid:      0,
+		Gid:      0,
+	}
+
+	err = tarWriter.WriteHeader(header)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tarWriter.Close()
+	tarOut.Close()
+
+	foo, err := os.Create(fooPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer foo.Close()
+
+	if _, err := foo.WriteString("test"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
+		t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+	}
+
+	logDone("build - ADD must add files in container")
+}
+
+func TestBuildAddBadLinksVolume(t *testing.T) {
+	const (
+		dockerfileTemplate = `
+		FROM busybox
+		RUN ln -s /../../../../../../../../%s /x
+		VOLUME /x
+		ADD foo.txt /x/`
+		targetFile = "foo.txt"
+	)
+	var (
+		name       = "test-link-absolute-volume"
+		dockerfile = ""
+	)
+	defer deleteImages(name)
+
+	tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
+	if err != nil {
+		t.Fatalf("failed to create temporary directory: %s", tempDir)
+	}
+	defer os.RemoveAll(tempDir)
+
+	dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
+	nonExistingFile := filepath.Join(tempDir, targetFile)
+
+	ctx, err := fakeContext(dockerfile, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ctx.Close()
+	fooPath := filepath.Join(ctx.Dir, targetFile)
+
+	foo, err := os.Create(fooPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer foo.Close()
+
+	if _, err := foo.WriteString("test"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
+		t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+	}
+
+	logDone("build - ADD should add files in volume")
+}
+
 // Issue #5270 - ensure we throw a better error than "unexpected EOF"
 // Issue #5270 - ensure we throw a better error than "unexpected EOF"
 // when we can't access files in the context.
 // when we can't access files in the context.
 func TestBuildWithInaccessibleFilesInContext(t *testing.T) {
 func TestBuildWithInaccessibleFilesInContext(t *testing.T) {
@@ -2867,6 +2994,118 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
 	logDone("build - ADD tar")
 	logDone("build - ADD tar")
 }
 }
 
 
+func TestBuildAddTarXz(t *testing.T) {
+	name := "testbuildaddtarxz"
+	defer deleteImages(name)
+
+	ctx := func() *FakeContext {
+		dockerfile := `
+			FROM busybox
+			ADD test.tar.xz /
+			RUN cat /test/foo | grep Hi`
+		tmpDir, err := ioutil.TempDir("", "fake-context")
+		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
+		if err != nil {
+			t.Fatalf("failed to create test.tar archive: %v", err)
+		}
+		defer testTar.Close()
+
+		tw := tar.NewWriter(testTar)
+
+		if err := tw.WriteHeader(&tar.Header{
+			Name: "test/foo",
+			Size: 2,
+		}); err != nil {
+			t.Fatalf("failed to write tar file header: %v", err)
+		}
+		if _, err := tw.Write([]byte("Hi")); err != nil {
+			t.Fatalf("failed to write tar file content: %v", err)
+		}
+		if err := tw.Close(); err != nil {
+			t.Fatalf("failed to close tar archive: %v", err)
+		}
+		xzCompressCmd := exec.Command("xz", "test.tar")
+		xzCompressCmd.Dir = tmpDir
+		out, _, err := runCommandWithOutput(xzCompressCmd)
+		if err != nil {
+			t.Fatal(err, out)
+		}
+
+		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
+			t.Fatalf("failed to open destination dockerfile: %v", err)
+		}
+		return &FakeContext{Dir: tmpDir}
+	}()
+
+	defer ctx.Close()
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+	}
+
+	logDone("build - ADD tar.xz")
+}
+
+func TestBuildAddTarXzGz(t *testing.T) {
+	name := "testbuildaddtarxzgz"
+	defer deleteImages(name)
+
+	ctx := func() *FakeContext {
+		dockerfile := `
+			FROM busybox
+			ADD test.tar.xz.gz /
+			RUN ls /test.tar.xz.gz`
+		tmpDir, err := ioutil.TempDir("", "fake-context")
+		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
+		if err != nil {
+			t.Fatalf("failed to create test.tar archive: %v", err)
+		}
+		defer testTar.Close()
+
+		tw := tar.NewWriter(testTar)
+
+		if err := tw.WriteHeader(&tar.Header{
+			Name: "test/foo",
+			Size: 2,
+		}); err != nil {
+			t.Fatalf("failed to write tar file header: %v", err)
+		}
+		if _, err := tw.Write([]byte("Hi")); err != nil {
+			t.Fatalf("failed to write tar file content: %v", err)
+		}
+		if err := tw.Close(); err != nil {
+			t.Fatalf("failed to close tar archive: %v", err)
+		}
+
+		xzCompressCmd := exec.Command("xz", "test.tar")
+		xzCompressCmd.Dir = tmpDir
+		out, _, err := runCommandWithOutput(xzCompressCmd)
+		if err != nil {
+			t.Fatal(err, out)
+		}
+
+		gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
+		gzipCompressCmd.Dir = tmpDir
+		out, _, err = runCommandWithOutput(gzipCompressCmd)
+		if err != nil {
+			t.Fatal(err, out)
+		}
+
+		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
+			t.Fatalf("failed to open destination dockerfile: %v", err)
+		}
+		return &FakeContext{Dir: tmpDir}
+	}()
+
+	defer ctx.Close()
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+	}
+
+	logDone("build - ADD tar.xz.gz")
+}
+
 func TestBuildFromGIT(t *testing.T) {
 func TestBuildFromGIT(t *testing.T) {
 	name := "testbuildfromgit"
 	name := "testbuildfromgit"
 	defer deleteImages(name)
 	defer deleteImages(name)
@@ -3175,3 +3414,86 @@ func TestBuildExoticShellInterpolation(t *testing.T) {
 
 
 	logDone("build - exotic shell interpolation")
 	logDone("build - exotic shell interpolation")
 }
 }
+
+func TestBuildSymlinkBreakout(t *testing.T) {
+	name := "testbuildsymlinkbreakout"
+	tmpdir, err := ioutil.TempDir("", name)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	ctx := filepath.Join(tmpdir, "context")
+	if err := os.MkdirAll(ctx, 0755); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
+	from busybox
+	add symlink.tar /
+	add inject /symlink/
+	`), 0644); err != nil {
+		t.Fatal(err)
+	}
+	inject := filepath.Join(ctx, "inject")
+	if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
+		t.Fatal(err)
+	}
+	f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	w := tar.NewWriter(f)
+	w.WriteHeader(&tar.Header{
+		Name:     "symlink2",
+		Typeflag: tar.TypeSymlink,
+		Linkname: "/../../../../../../../../../../../../../../",
+		Uid:      os.Getuid(),
+		Gid:      os.Getgid(),
+	})
+	w.WriteHeader(&tar.Header{
+		Name:     "symlink",
+		Typeflag: tar.TypeSymlink,
+		Linkname: filepath.Join("symlink2", tmpdir),
+		Uid:      os.Getuid(),
+		Gid:      os.Getgid(),
+	})
+	w.Close()
+	f.Close()
+	if _, err := buildImageFromContext(name, &FakeContext{Dir: ctx}, false); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
+		t.Fatal("symlink breakout - inject")
+	} else if !os.IsNotExist(err) {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	logDone("build - symlink breakout")
+}
+
+func TestBuildXZHost(t *testing.T) {
+	name := "testbuildxzhost"
+	defer deleteImages(name)
+
+	ctx, err := fakeContext(`
+FROM busybox
+ADD xz /usr/local/sbin/
+RUN chmod 755 /usr/local/sbin/xz
+ADD test.xz /
+RUN [ ! -e /injected ]`,
+		map[string]string{
+			"test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" +
+				"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" +
+				"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21",
+			"xz": "#!/bin/sh\ntouch /injected",
+		})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ctx.Close()
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatal(err)
+	}
+
+	logDone("build - xz host is being used")
+}

+ 38 - 0
integration-cli/docker_cli_cp_test.go

@@ -371,3 +371,41 @@ func TestCpUnprivilegedUser(t *testing.T) {
 
 
 	logDone("cp - unprivileged user")
 	logDone("cp - unprivileged user")
 }
 }
+
+func TestCpToDot(t *testing.T) {
+	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
+	if err != nil || exitCode != 0 {
+		t.Fatal("failed to create a container", out, err)
+	}
+
+	cleanedContainerID := stripTrailingCharacters(out)
+	defer deleteContainer(cleanedContainerID)
+
+	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
+	if err != nil || stripTrailingCharacters(out) != "0" {
+		t.Fatal("failed to set up container", out, err)
+	}
+
+	tmpdir, err := ioutil.TempDir("", "docker-integration")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	cwd, err := os.Getwd()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Chdir(cwd)
+	if err := os.Chdir(tmpdir); err != nil {
+		t.Fatal(err)
+	}
+	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".")
+	if err != nil {
+		t.Fatalf("couldn't docker cp to \".\" path: %s", err)
+	}
+	content, err := ioutil.ReadFile("./test")
+	if string(content) != "lololol\n" {
+		t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n")
+	}
+	logDone("cp - to dot path")
+}

+ 134 - 0
integration-cli/docker_cli_save_load_test.go

@@ -62,6 +62,140 @@ func TestSaveAndLoadRepoStdout(t *testing.T) {
 	logDone("load - load a repo using stdout")
 	logDone("load - load a repo using stdout")
 }
 }
 
 
+// save a repo using gz compression and try to load it using stdout
+func TestSaveXzAndLoadRepoStdout(t *testing.T) {
+	tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempDir)
+
+	tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		t.Fatalf("failed to create a container: %v %v", out, err)
+	}
+
+	cleanedContainerID := stripTrailingCharacters(out)
+
+	repoName := "foobar-save-load-test-xz-gz"
+
+	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+	out, _, err = runCommandWithOutput(inspectCmd)
+	if err != nil {
+		t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
+	}
+
+	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	out, _, err = runCommandWithOutput(commitCmd)
+	if err != nil {
+		t.Fatalf("failed to commit container: %v %v", out, err)
+	}
+
+	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	before, _, err := runCommandWithOutput(inspectCmd)
+	if err != nil {
+		t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+	}
+
+	saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
+	saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
+	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+	out, _, err = runCommandWithOutput(saveCmd)
+	if err != nil {
+		t.Fatalf("failed to save repo: %v %v", out, err)
+	}
+
+	deleteImages(repoName)
+
+	loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
+	loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+	out, _, err = runCommandWithOutput(loadCmd)
+	if err == nil {
+		t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+	}
+
+	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	after, _, err := runCommandWithOutput(inspectCmd)
+	if err == nil {
+		t.Fatalf("the repo should not exist: %v", after)
+	}
+
+	deleteContainer(cleanedContainerID)
+	deleteImages(repoName)
+
+	logDone("load - save a repo with xz compression & load it using stdout")
+}
+
+// save a repo using xz+gz compression and try to load it using stdout
+func TestSaveXzGzAndLoadRepoStdout(t *testing.T) {
+	tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempDir)
+
+	tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		t.Fatalf("failed to create a container: %v %v", out, err)
+	}
+
+	cleanedContainerID := stripTrailingCharacters(out)
+
+	repoName := "foobar-save-load-test-xz-gz"
+
+	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+	out, _, err = runCommandWithOutput(inspectCmd)
+	if err != nil {
+		t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
+	}
+
+	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	out, _, err = runCommandWithOutput(commitCmd)
+	if err != nil {
+		t.Fatalf("failed to commit container: %v %v", out, err)
+	}
+
+	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	before, _, err := runCommandWithOutput(inspectCmd)
+	if err != nil {
+		t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+	}
+
+	saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
+	saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
+	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+	out, _, err = runCommandWithOutput(saveCmd)
+	if err != nil {
+		t.Fatalf("failed to save repo: %v %v", out, err)
+	}
+
+	deleteImages(repoName)
+
+	loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
+	loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+	out, _, err = runCommandWithOutput(loadCmd)
+	if err == nil {
+		t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+	}
+
+	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	after, _, err := runCommandWithOutput(inspectCmd)
+	if err == nil {
+		t.Fatalf("the repo should not exist: %v", after)
+	}
+
+	deleteContainer(cleanedContainerID)
+	deleteImages(repoName)
+
+	logDone("load - save a repo with xz+gz compression & load it using stdout")
+}
+
 func TestSaveSingleTag(t *testing.T) {
 func TestSaveSingleTag(t *testing.T) {
 	repoName := "foobar-save-single-tag-test"
 	repoName := "foobar-save-single-tag-test"
 
 

+ 31 - 30
pkg/archive/archive.go

@@ -432,32 +432,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
 	return pipeReader, nil
 	return pipeReader, nil
 }
 }
 
 
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `path`.
-// The archive may be compressed with one of the following algorithms:
-//  identity (uncompressed), gzip, bzip2, xz.
-// FIXME: specify behavior when target path exists vs. doesn't exist.
-func Untar(archive io.Reader, dest string, options *TarOptions) error {
-	dest = filepath.Clean(dest)
-
-	if options == nil {
-		options = &TarOptions{}
-	}
-
-	if archive == nil {
-		return fmt.Errorf("Empty archive")
-	}
-
-	if options.Excludes == nil {
-		options.Excludes = []string{}
-	}
-
-	decompressedArchive, err := DecompressStream(archive)
-	if err != nil {
-		return err
-	}
-	defer decompressedArchive.Close()
-
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
 	tr := tar.NewReader(decompressedArchive)
 	tr := tar.NewReader(decompressedArchive)
 	trBuf := pools.BufioReader32KPool.Get(nil)
 	trBuf := pools.BufioReader32KPool.Get(nil)
 	defer pools.BufioReader32KPool.Put(trBuf)
 	defer pools.BufioReader32KPool.Put(trBuf)
@@ -498,10 +473,13 @@ loop:
 			}
 			}
 		}
 		}
 
 
-		// Prevent symlink breakout
 		path := filepath.Join(dest, hdr.Name)
 		path := filepath.Join(dest, hdr.Name)
-		if !strings.HasPrefix(path, dest) {
-			return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
+		rel, err := filepath.Rel(dest, path)
+		if err != nil {
+			return err
+		}
+		if strings.HasPrefix(rel, "..") {
+			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
 		}
 		}
 
 
 		// If path exits we almost always just want to remove and replace it
 		// If path exits we almost always just want to remove and replace it
@@ -537,10 +515,33 @@ loop:
 			return err
 			return err
 		}
 		}
 	}
 	}
-
 	return nil
 	return nil
 }
 }
 
 
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+//  identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(archive io.Reader, dest string, options *TarOptions) error {
+	if archive == nil {
+		return fmt.Errorf("Empty archive")
+	}
+	dest = filepath.Clean(dest)
+	if options == nil {
+		options = &TarOptions{}
+	}
+	if options.Excludes == nil {
+		options.Excludes = []string{}
+	}
+	decompressedArchive, err := DecompressStream(archive)
+	if err != nil {
+		return err
+	}
+	defer decompressedArchive.Close()
+	return Unpack(decompressedArchive, dest, options)
+}
+
 func (archiver *Archiver) TarUntar(src, dst string) error {
 func (archiver *Archiver) TarUntar(src, dst string) error {
 	log.Debugf("TarUntar(%s %s)", src, dst)
 	log.Debugf("TarUntar(%s %s)", src, dst)
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})

+ 23 - 20
pkg/archive/diff.go

@@ -21,20 +21,7 @@ func mkdev(major int64, minor int64) uint32 {
 	return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
 	return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
 }
 }
 
 
-// ApplyLayer parses a diff in the standard layer format from `layer`, and
-// applies it to the directory `dest`.
-func ApplyLayer(dest string, layer ArchiveReader) error {
-	dest = filepath.Clean(dest)
-
-	// We need to be able to set any perms
-	oldmask := syscall.Umask(0)
-	defer syscall.Umask(oldmask)
-
-	layer, err := DecompressStream(layer)
-	if err != nil {
-		return err
-	}
-
+func UnpackLayer(dest string, layer ArchiveReader) error {
 	tr := tar.NewReader(layer)
 	tr := tar.NewReader(layer)
 	trBuf := pools.BufioReader32KPool.Get(tr)
 	trBuf := pools.BufioReader32KPool.Get(tr)
 	defer pools.BufioReader32KPool.Put(trBuf)
 	defer pools.BufioReader32KPool.Put(trBuf)
@@ -94,12 +81,14 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
 		}
 		}
 
 
 		path := filepath.Join(dest, hdr.Name)
 		path := filepath.Join(dest, hdr.Name)
-		base := filepath.Base(path)
-
-		// Prevent symlink breakout
-		if !strings.HasPrefix(path, dest) {
-			return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
+		rel, err := filepath.Rel(dest, path)
+		if err != nil {
+			return err
 		}
 		}
+		if strings.HasPrefix(rel, "..") {
+			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+		}
+		base := filepath.Base(path)
 
 
 		if strings.HasPrefix(base, ".wh.") {
 		if strings.HasPrefix(base, ".wh.") {
 			originalBase := base[len(".wh."):]
 			originalBase := base[len(".wh."):]
@@ -159,6 +148,20 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-
 	return nil
 	return nil
 }
 }
+
+// ApplyLayer parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`.
+func ApplyLayer(dest string, layer ArchiveReader) error {
+	dest = filepath.Clean(dest)
+	// We need to be able to set any perms
+	oldmask := syscall.Umask(0)
+	defer syscall.Umask(oldmask)
+
+	layer, err := DecompressStream(layer)
+	if err != nil {
+		return err
+	}
+	return UnpackLayer(dest, layer)
+}

+ 37 - 16
pkg/chrootarchive/archive.go

@@ -7,6 +7,7 @@ import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
+	"path/filepath"
 	"runtime"
 	"runtime"
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
@@ -15,34 +16,48 @@ import (
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
 )
 )
 
 
+var chrootArchiver = &archive.Archiver{Untar}
+
+func chroot(path string) error {
+	if err := syscall.Chroot(path); err != nil {
+		return err
+	}
+	return syscall.Chdir("/")
+}
+
 func untar() {
 func untar() {
 	runtime.LockOSThread()
 	runtime.LockOSThread()
 	flag.Parse()
 	flag.Parse()
-
-	if err := syscall.Chroot(flag.Arg(0)); err != nil {
-		fatal(err)
-	}
-	if err := syscall.Chdir("/"); err != nil {
+	if err := chroot(flag.Arg(0)); err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
-	options := new(archive.TarOptions)
-	dec := json.NewDecoder(strings.NewReader(flag.Arg(1)))
-	if err := dec.Decode(options); err != nil {
+	var options *archive.TarOptions
+	if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
-	if err := archive.Untar(os.Stdin, "/", options); err != nil {
+	if err := archive.Unpack(os.Stdin, "/", options); err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
+	// fully consume stdin in case it is zero padded
+	flush(os.Stdin)
 	os.Exit(0)
 	os.Exit(0)
 }
 }
 
 
-var (
-	chrootArchiver = &archive.Archiver{Untar}
-)
+func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+	if tarArchive == nil {
+		return fmt.Errorf("Empty archive")
+	}
+	if options == nil {
+		options = &archive.TarOptions{}
+	}
+	if options.Excludes == nil {
+		options.Excludes = []string{}
+	}
 
 
-func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
-	var buf bytes.Buffer
-	enc := json.NewEncoder(&buf)
+	var (
+		buf bytes.Buffer
+		enc = json.NewEncoder(&buf)
+	)
 	if err := enc.Encode(options); err != nil {
 	if err := enc.Encode(options); err != nil {
 		return fmt.Errorf("Untar json encode: %v", err)
 		return fmt.Errorf("Untar json encode: %v", err)
 	}
 	}
@@ -51,9 +66,15 @@ func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
+	dest = filepath.Clean(dest)
+	decompressedArchive, err := archive.DecompressStream(tarArchive)
+	if err != nil {
+		return err
+	}
+	defer decompressedArchive.Close()
 
 
 	cmd := reexec.Command("docker-untar", dest, buf.String())
 	cmd := reexec.Command("docker-untar", dest, buf.String())
-	cmd.Stdin = archive
+	cmd.Stdin = decompressedArchive
 	out, err := cmd.CombinedOutput()
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("Untar %s %s", err, out)
 		return fmt.Errorf("Untar %s %s", err, out)

+ 57 - 0
pkg/chrootarchive/archive_test.go

@@ -1,10 +1,12 @@
 package chrootarchive
 package chrootarchive
 
 
 import (
 import (
+	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
 	"testing"
 	"testing"
+	"time"
 
 
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
@@ -42,3 +44,58 @@ func TestChrootTarUntar(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 }
 }
+
+type slowEmptyTarReader struct {
+	size      int
+	offset    int
+	chunkSize int
+}
+
+// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null")
+func (s *slowEmptyTarReader) Read(p []byte) (int, error) {
+	time.Sleep(100 * time.Millisecond)
+	count := s.chunkSize
+	if len(p) < s.chunkSize {
+		count = len(p)
+	}
+	for i := 0; i < count; i++ {
+		p[i] = 0
+	}
+	s.offset += count
+	if s.offset > s.size {
+		return count, io.EOF
+	}
+	return count, nil
+}
+
+func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	dest := filepath.Join(tmpdir, "dest")
+	if err := os.MkdirAll(dest, 0700); err != nil {
+		t.Fatal(err)
+	}
+	stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+	if err := Untar(stream, dest, nil); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	dest := filepath.Join(tmpdir, "dest")
+	if err := os.MkdirAll(dest, 0700); err != nil {
+		t.Fatal(err)
+	}
+	stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+	if err := ApplyLayer(dest, stream); err != nil {
+		t.Fatal(err)
+	}
+}

+ 21 - 7
pkg/chrootarchive/diff.go

@@ -3,8 +3,10 @@ package chrootarchive
 import (
 import (
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
+	"path/filepath"
 	"runtime"
 	"runtime"
 	"syscall"
 	"syscall"
 
 
@@ -16,28 +18,40 @@ func applyLayer() {
 	runtime.LockOSThread()
 	runtime.LockOSThread()
 	flag.Parse()
 	flag.Parse()
 
 
-	if err := syscall.Chroot(flag.Arg(0)); err != nil {
-		fatal(err)
-	}
-	if err := syscall.Chdir("/"); err != nil {
+	if err := chroot(flag.Arg(0)); err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
+	// We need to be able to set any perms
+	oldmask := syscall.Umask(0)
+	defer syscall.Umask(oldmask)
 	tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
 	tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
 	if err != nil {
 	if err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
 	os.Setenv("TMPDIR", tmpDir)
 	os.Setenv("TMPDIR", tmpDir)
-	if err := archive.ApplyLayer("/", os.Stdin); err != nil {
-		os.RemoveAll(tmpDir)
+	err = archive.UnpackLayer("/", os.Stdin)
+	os.RemoveAll(tmpDir)
+	if err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
 	os.RemoveAll(tmpDir)
 	os.RemoveAll(tmpDir)
+	flush(os.Stdin)
 	os.Exit(0)
 	os.Exit(0)
 }
 }
 
 
 func ApplyLayer(dest string, layer archive.ArchiveReader) error {
 func ApplyLayer(dest string, layer archive.ArchiveReader) error {
+	dest = filepath.Clean(dest)
+	decompressed, err := archive.DecompressStream(layer)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if c, ok := decompressed.(io.Closer); ok {
+			c.Close()
+		}
+	}()
 	cmd := reexec.Command("docker-applyLayer", dest)
 	cmd := reexec.Command("docker-applyLayer", dest)
-	cmd.Stdin = layer
+	cmd.Stdin = decompressed
 	out, err := cmd.CombinedOutput()
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("ApplyLayer %s %s", err, out)
 		return fmt.Errorf("ApplyLayer %s %s", err, out)

+ 8 - 0
pkg/chrootarchive/init.go

@@ -2,6 +2,8 @@ package chrootarchive
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"io"
+	"io/ioutil"
 	"os"
 	"os"
 
 
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
@@ -16,3 +18,9 @@ func fatal(err error) {
 	fmt.Fprint(os.Stderr, err)
 	fmt.Fprint(os.Stderr, err)
 	os.Exit(1)
 	os.Exit(1)
 }
 }
+
+// flush consumes all the bytes from the reader discarding
+// any errors
+func flush(r io.Reader) {
+	io.Copy(ioutil.Discard, r)
+}

+ 191 - 0
pkg/symlink/LICENSE.APACHE

@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2014 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 27 - 0
pkg/symlink/LICENSE.BSD

@@ -0,0 +1,27 @@
+Copyright (c) 2014 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 3 - 2
pkg/symlink/MAINTAINERS

@@ -1,2 +1,3 @@
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Victor Vieux <vieux@docker.com> (@vieux)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tianon Gravi <admwiggin@gmail.com> (@tianon)

+ 5 - 0
pkg/symlink/README.md

@@ -0,0 +1,5 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.

+ 107 - 77
pkg/symlink/fs.go

@@ -1,101 +1,131 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
 package symlink
 package symlink
 
 
 import (
 import (
-	"fmt"
+	"bytes"
+	"errors"
 	"os"
 	"os"
-	"path"
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
 )
 )
 
 
-const maxLoopCounter = 100
-
-// FollowSymlink will follow an existing link and scope it to the root
-// path provided.
-// The role of this function is to return an absolute path in the root
-// or normalize to the root if the symlink leads to a path which is
-// outside of the root.
-// Errors encountered while attempting to follow the symlink in path
-// will be reported.
-// Normalizations to the root don't constitute errors.
-func FollowSymlinkInScope(link, root string) (string, error) {
-	root, err := filepath.Abs(root)
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path
+func FollowSymlinkInScope(path, root string) (string, error) {
+	path, err := filepath.Abs(path)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-
-	link, err = filepath.Abs(link)
+	root, err = filepath.Abs(root)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
+	return evalSymlinksInScope(path, root)
+}
 
 
-	if link == root {
-		return root, nil
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+//   If /foo/bar -> /outside,
+//   FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+	root = filepath.Clean(root)
+	if path == root {
+		return path, nil
 	}
 	}
-
-	if !strings.HasPrefix(filepath.Dir(link), root) {
-		return "", fmt.Errorf("%s is not within %s", link, root)
+	if !strings.HasPrefix(path, root) {
+		return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
 	}
 	}
+	const maxIter = 255
+	originalPath := path
+	// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+	path = path[len(root):]
+	if root == string(filepath.Separator) {
+		path = string(filepath.Separator) + path
+	}
+	if !strings.HasPrefix(path, string(filepath.Separator)) {
+		return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+	}
+	path = filepath.Clean(path)
+	// consume path by taking each frontmost path element,
+	// expanding it if it's a symlink, and appending it to b
+	var b bytes.Buffer
+	// b here will always be considered to be the "current absolute path inside
+	// root" when we append paths to it, we also append a slash and use
+	// filepath.Clean after the loop to trim the trailing slash
+	for n := 0; path != ""; n++ {
+		if n > maxIter {
+			return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+		}
 
 
-	prev := "/"
-
-	for _, p := range strings.Split(link, "/") {
-		prev = filepath.Join(prev, p)
-
-		loopCounter := 0
-		for {
-			loopCounter++
-
-			if loopCounter >= maxLoopCounter {
-				return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter)
-			}
-
-			if !strings.HasPrefix(prev, root) {
-				// Don't resolve symlinks outside of root. For example,
-				// we don't have to check /home in the below.
-				//
-				//   /home -> usr/home
-				//   FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo")
-				break
-			}
-
-			stat, err := os.Lstat(prev)
-			if err != nil {
-				if os.IsNotExist(err) {
-					break
-				}
-				return "", err
-			}
-
-			// let's break if we're not dealing with a symlink
-			if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
-				break
-			}
+		// find next path component, p
+		i := strings.IndexRune(path, filepath.Separator)
+		var p string
+		if i == -1 {
+			p, path = path, ""
+		} else {
+			p, path = path[:i], path[i+1:]
+		}
 
 
-			// process the symlink
-			dest, err := os.Readlink(prev)
-			if err != nil {
-				return "", err
-			}
+		if p == "" {
+			continue
+		}
 
 
-			if path.IsAbs(dest) {
-				prev = filepath.Join(root, dest)
-			} else {
-				prev, _ = filepath.Abs(prev)
+		// this takes a b.String() like "b/../" and a p like "c" and turns it
+		// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+		// root gets prepended and we Clean again (to remove any trailing slash
+		// if the first Clean gave us just "/")
+		cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+		if cleanP == string(filepath.Separator) {
+			// never Lstat "/" itself
+			b.Reset()
+			continue
+		}
+		fullP := filepath.Clean(root + cleanP)
+
+		fi, err := os.Lstat(fullP)
+		if os.IsNotExist(err) {
+			// if p does not exist, accept it
+			b.WriteString(p)
+			b.WriteRune(filepath.Separator)
+			continue
+		}
+		if err != nil {
+			return "", err
+		}
+		if fi.Mode()&os.ModeSymlink == 0 {
+			b.WriteString(p + string(filepath.Separator))
+			continue
+		}
 
 
-				dir := filepath.Dir(prev)
-				prev = filepath.Join(dir, dest)
-				if dir == root && !strings.HasPrefix(prev, root) {
-					prev = root
-				}
-				if len(prev) < len(root) || (len(prev) == len(root) && prev != root) {
-					prev = filepath.Join(root, filepath.Base(dest))
-				}
-			}
+		// it's a symlink, put it at the front of path
+		dest, err := os.Readlink(fullP)
+		if err != nil {
+			return "", err
 		}
 		}
+		if filepath.IsAbs(dest) {
+			b.Reset()
+		}
+		path = dest + string(filepath.Separator) + path
 	}
 	}
-	if prev == "/" {
-		prev = root
-	}
-	return prev, nil
+
+	// see note above on "fullP := ..." for why this is double-cleaned and
+	// what's happening here
+	return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
 }
 }

+ 313 - 159
pkg/symlink/fs_test.go

@@ -1,248 +1,402 @@
+// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE
+
 package symlink
 package symlink
 
 
 import (
 import (
+	"fmt"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
 	"testing"
 	"testing"
 )
 )
 
 
-func abs(t *testing.T, p string) string {
-	o, err := filepath.Abs(p)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return o
+type dirOrLink struct {
+	path   string
+	target string
 }
 }
 
 
-func TestFollowSymLinkNormal(t *testing.T) {
-	link := "testdata/fs/a/d/c/data"
+func makeFs(tmpdir string, fs []dirOrLink) error {
+	for _, s := range fs {
+		s.path = filepath.Join(tmpdir, s.path)
+		if s.target == "" {
+			os.MkdirAll(s.path, 0755)
+			continue
+		}
+		if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {
+			return err
+		}
+		if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {
+			return err
+		}
+	}
+	return nil
+}
 
 
-	rewrite, err := FollowSymlinkInScope(link, "testdata")
+func testSymlink(tmpdir, path, expected, scope string) error {
+	rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))
 	if err != nil {
 	if err != nil {
-		t.Fatal(err)
+		return err
 	}
 	}
-
-	if expected := abs(t, "testdata/b/c/data"); expected != rewrite {
-		t.Fatalf("Expected %s got %s", expected, rewrite)
+	expected, err = filepath.Abs(filepath.Join(tmpdir, expected))
+	if err != nil {
+		return err
+	}
+	if expected != rewrite {
+		return fmt.Errorf("Expected %q got %q", expected, rewrite)
 	}
 	}
+	return nil
 }
 }
 
 
-func TestFollowSymLinkRelativePath(t *testing.T) {
-	link := "testdata/fs/i"
-
-	rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkAbsolute(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-
-	if expected := abs(t, "testdata/fs/a"); expected != rewrite {
-		t.Fatalf("Expected %s got %s", expected, rewrite)
+	defer os.RemoveAll(tmpdir)
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil {
+		t.Fatal(err)
 	}
 	}
 }
 }
 
 
-func TestFollowSymLinkUnderLinkedDir(t *testing.T) {
-	dir, err := ioutil.TempDir("", "docker-fs-test")
+func TestFollowSymlinkRelativePath(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	defer os.RemoveAll(dir)
-
-	os.Mkdir(filepath.Join(dir, "realdir"), 0700)
-	os.Symlink("realdir", filepath.Join(dir, "linkdir"))
-
-	linkDir := filepath.Join(dir, "linkdir", "foo")
-	dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar")
-	os.MkdirAll(dirUnderLinkDir, 0700)
+	defer os.RemoveAll(tmpdir)
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-	rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir)
+func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-
-	if rewrite != dirUnderLinkDir {
-		t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite)
+	defer os.RemoveAll(tmpdir)
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "linkdir", target: "realdir"},
+		{path: "linkdir/foo/bar"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil {
+		t.Fatal(err)
 	}
 	}
 }
 }
 
 
-func TestFollowSymLinkRandomString(t *testing.T) {
+func TestFollowSymlinkInvalidScopePathPair(t *testing.T) {
 	if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
 	if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
-		t.Fatal("Random string should fail but didn't")
+		t.Fatal("expected an error")
 	}
 	}
 }
 }
 
 
-func TestFollowSymLinkLastLink(t *testing.T) {
-	link := "testdata/fs/a/d"
-
-	rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkLastLink(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-
-	if expected := abs(t, "testdata/b"); expected != rewrite {
-		t.Fatalf("Expected %s got %s", expected, rewrite)
+	defer os.RemoveAll(tmpdir)
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil {
+		t.Fatal(err)
 	}
 	}
 }
 }
 
 
-func TestFollowSymLinkRelativeLink(t *testing.T) {
-	link := "testdata/fs/a/e/c/data"
-
-	rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-
-	if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite {
-		t.Fatalf("Expected %s got %s", expected, rewrite)
+	defer os.RemoveAll(tmpdir)
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil {
+		t.Fatal(err)
+	}
+	// avoid letting allowing symlink e lead us to ../b
+	// normalize to the "testdata/fs/a"
+	if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil {
+		t.Fatal(err)
 	}
 	}
 }
 }
 
 
-func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
+func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil {
+		t.Fatal(err)
+	}
 	// avoid letting symlink f lead us out of the "testdata" scope
 	// avoid letting symlink f lead us out of the "testdata" scope
 	// we don't normalize because symlink f is in scope and there is no
 	// we don't normalize because symlink f is in scope and there is no
 	// information leak
 	// information leak
-	{
-		link := "testdata/fs/a/f"
-
-		rewrite, err := FollowSymlinkInScope(link, "testdata")
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := abs(t, "testdata/test"); expected != rewrite {
-			t.Fatalf("Expected %s got %s", expected, rewrite)
-		}
+	if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil {
+		t.Fatal(err)
 	}
 	}
-
 	// avoid letting symlink f lead us out of the "testdata/fs" scope
 	// avoid letting symlink f lead us out of the "testdata/fs" scope
 	// we don't normalize because symlink f is in scope and there is no
 	// we don't normalize because symlink f is in scope and there is no
 	// information leak
 	// information leak
-	{
-		link := "testdata/fs/a/f"
-
-		rewrite, err := FollowSymlinkInScope(link, "testdata/fs")
-		if err != nil {
-			t.Fatal(err)
-		}
+	if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		if expected := abs(t, "testdata/fs/test"); expected != rewrite {
-			t.Fatalf("Expected %s got %s", expected, rewrite)
-		}
+func TestFollowSymlinkRelativeLinkChain(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain")
+	if err != nil {
+		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(tmpdir)
 
 
 	// avoid letting symlink g (pointed at by symlink h) take out of scope
 	// avoid letting symlink g (pointed at by symlink h) take out of scope
 	// TODO: we should probably normalize to scope here because ../[....]/root
 	// TODO: we should probably normalize to scope here because ../[....]/root
 	// is out of scope and we leak information
 	// is out of scope and we leak information
-	{
-		link := "testdata/fs/b/h"
-
-		rewrite, err := FollowSymlinkInScope(link, "testdata")
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := abs(t, "testdata/root"); expected != rewrite {
-			t.Fatalf("Expected %s got %s", expected, rewrite)
-		}
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "testdata/fs/b/h", target: "../g"},
+		{path: "testdata/fs/g", target: "../../../../../../../../../../../../root"},
+	}); err != nil {
+		t.Fatal(err)
 	}
 	}
+	if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-	// avoid letting allowing symlink e lead us to ../b
-	// normalize to the "testdata/fs/a"
-	{
-		link := "testdata/fs/a/e"
-
-		rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a")
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := abs(t, "testdata/fs/a"); expected != rewrite {
-			t.Fatalf("Expected %s got %s", expected, rewrite)
-		}
+func TestFollowSymlinkBreakoutPath(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath")
+	if err != nil {
+		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(tmpdir)
 
 
 	// avoid letting symlink -> ../directory/file escape from scope
 	// avoid letting symlink -> ../directory/file escape from scope
 	// normalize to "testdata/fs/j"
 	// normalize to "testdata/fs/j"
-	{
-		link := "testdata/fs/j/k"
-
-		rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j")
-		if err != nil {
-			t.Fatal(err)
-		}
+	if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		if expected := abs(t, "testdata/fs/j"); expected != rewrite {
-			t.Fatalf("Expected %s got %s", expected, rewrite)
-		}
+func TestFollowSymlinkToRoot(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot")
+	if err != nil {
+		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(tmpdir)
 
 
 	// make sure we don't allow escaping to /
 	// make sure we don't allow escaping to /
 	// normalize to dir
 	// normalize to dir
-	{
-		dir, err := ioutil.TempDir("", "docker-fs-test")
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer os.RemoveAll(dir)
-
-		linkFile := filepath.Join(dir, "foo")
-		os.Mkdir(filepath.Join(dir, ""), 0700)
-		os.Symlink("/", linkFile)
-
-		rewrite, err := FollowSymlinkInScope(linkFile, dir)
-		if err != nil {
-			t.Fatal(err)
-		}
+	if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		if rewrite != dir {
-			t.Fatalf("Expected %s got %s", dir, rewrite)
-		}
+func TestFollowSymlinkSlashDotdot(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot")
+	if err != nil {
+		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(tmpdir)
+	tmpdir = filepath.Join(tmpdir, "dir", "subdir")
 
 
 	// make sure we don't allow escaping to /
 	// make sure we don't allow escaping to /
 	// normalize to dir
 	// normalize to dir
-	{
-		dir, err := ioutil.TempDir("", "docker-fs-test")
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer os.RemoveAll(dir)
-
-		linkFile := filepath.Join(dir, "foo")
-		os.Mkdir(filepath.Join(dir, ""), 0700)
-		os.Symlink("/../../", linkFile)
-
-		rewrite, err := FollowSymlinkInScope(linkFile, dir)
-		if err != nil {
-			t.Fatal(err)
-		}
+	if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		if rewrite != dir {
-			t.Fatalf("Expected %s got %s", dir, rewrite)
-		}
+func TestFollowSymlinkDotdot(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot")
+	if err != nil {
+		t.Fatal(err)
 	}
 	}
+	defer os.RemoveAll(tmpdir)
+	tmpdir = filepath.Join(tmpdir, "dir", "subdir")
 
 
 	// make sure we stay in scope without leaking information
 	// make sure we stay in scope without leaking information
 	// this also checks for escaping to /
 	// this also checks for escaping to /
 	// normalize to dir
 	// normalize to dir
-	{
-		dir, err := ioutil.TempDir("", "docker-fs-test")
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer os.RemoveAll(dir)
+	if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		linkFile := filepath.Join(dir, "foo")
-		os.Mkdir(filepath.Join(dir, ""), 0700)
-		os.Symlink("../../", linkFile)
+func TestFollowSymlinkRelativePath2(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
 
 
-		rewrite, err := FollowSymlinkInScope(linkFile, dir)
-		if err != nil {
-			t.Fatal(err)
-		}
+	if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil {
+		t.Fatal(err)
+	}
+}
 
 
-		if rewrite != dir {
-			t.Fatalf("Expected %s got %s", dir, rewrite)
-		}
+func TestFollowSymlinkScopeLink(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "root2"},
+		{path: "root", target: "root2"},
+		{path: "root2/foo", target: "../bar"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFollowSymlinkRootScope(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	expected, err := filepath.EvalSymlinks(tmpdir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	rewrite, err := FollowSymlinkInScope(tmpdir, "/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if rewrite != expected {
+		t.Fatalf("expected %q got %q", expected, rewrite)
+	}
+}
+
+func TestFollowSymlinkEmpty(t *testing.T) {
+	res, err := FollowSymlinkInScope("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	wd, err := os.Getwd()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res != wd {
+		t.Fatal("expected %q got %q", wd, res)
+	}
+}
+
+func TestFollowSymlinkCircular(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+		t.Fatal("expected an error for foo -> foo")
+	}
+
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "root/bar", target: "baz"},
+		{path: "root/baz", target: "../bak"},
+		{path: "root/bak", target: "/bar"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+		t.Fatal("expected an error for bar -> baz -> bak -> bar")
+	}
+}
+
+func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "root2"},
+		{path: "root", target: "root2"},
+		{path: "root/a", target: "r/s"},
+		{path: "root/r", target: "../root/t"},
+		{path: "root/root/t/s/b", target: "/../u"},
+		{path: "root/u/c", target: "."},
+		{path: "root/u/x/y", target: "../v"},
+		{path: "root/u/v", target: "/../w"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFollowSymlinkBreakoutNonExistent(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "root/slash", target: "/"},
+		{path: "root/sym", target: "/idontexist/../slash"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFollowSymlinkNoLexicalCleaning(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	if err := makeFs(tmpdir, []dirOrLink{
+		{path: "root/sym", target: "/foo/bar"},
+		{path: "root/hello", target: "/sym/../baz"},
+	}); err != nil {
+		t.Fatal(err)
+	}
+	if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil {
+		t.Fatal(err)
 	}
 	}
 }
 }

+ 0 - 1
pkg/symlink/testdata/fs/a/d

@@ -1 +0,0 @@
-/b

+ 0 - 1
pkg/symlink/testdata/fs/a/e

@@ -1 +0,0 @@
-../b

+ 0 - 1
pkg/symlink/testdata/fs/a/f

@@ -1 +0,0 @@
-../../../../test

+ 0 - 1
pkg/symlink/testdata/fs/b/h

@@ -1 +0,0 @@
-../g

+ 0 - 1
pkg/symlink/testdata/fs/g

@@ -1 +0,0 @@
-../../../../../../../../../../../../root

+ 0 - 1
pkg/symlink/testdata/fs/i

@@ -1 +0,0 @@
-a

+ 0 - 1
pkg/symlink/testdata/fs/j/k

@@ -1 +0,0 @@
-../i/a

+ 2 - 2
registry/registry.go

@@ -23,7 +23,6 @@ var (
 	ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
 	ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
 	ErrDoesNotExist          = errors.New("Image does not exist")
 	ErrDoesNotExist          = errors.New("Image does not exist")
 	errLoginRequired         = errors.New("Authentication is required.")
 	errLoginRequired         = errors.New("Authentication is required.")
-	validHex                 = regexp.MustCompile(`^([a-f0-9]{64})$`)
 	validNamespace           = regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
 	validNamespace           = regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
 	validRepo                = regexp.MustCompile(`^([a-z0-9-_.]+)$`)
 	validRepo                = regexp.MustCompile(`^([a-z0-9-_.]+)$`)
 )
 )
@@ -177,7 +176,8 @@ func validateRepositoryName(repositoryName string) error {
 		namespace = "library"
 		namespace = "library"
 		name = nameParts[0]
 		name = nameParts[0]
 
 
-		if validHex.MatchString(name) {
+		// the repository name must not be a valid image ID
+		if err := utils.ValidateID(name); err == nil {
 			return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
 			return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
 		}
 		}
 	} else {
 	} else {

+ 7 - 5
utils/utils.go

@@ -31,6 +31,10 @@ type KeyValuePair struct {
 	Value string
 	Value string
 }
 }
 
 
+var (
+	validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+)
+
 // Request a given URL and return an io.Reader
 // Request a given URL and return an io.Reader
 func Download(url string) (resp *http.Response, err error) {
 func Download(url string) (resp *http.Response, err error) {
 	if resp, err = http.Get(url); err != nil {
 	if resp, err = http.Get(url); err != nil {
@@ -190,11 +194,9 @@ func GenerateRandomID() string {
 }
 }
 
 
 func ValidateID(id string) error {
 func ValidateID(id string) error {
-	if id == "" {
-		return fmt.Errorf("Id can't be empty")
-	}
-	if strings.Contains(id, ":") {
-		return fmt.Errorf("Invalid character in id: ':'")
+	if ok := validHex.MatchString(id); !ok {
+		err := fmt.Errorf("image ID '%s' is invalid", id)
+		return err
 	}
 	}
 	return nil
 	return nil
 }
 }